コード例 #1
0
    def forward(self, wrf):
        wrf_qice = wrf[:, 3:7]
        wrf_qice = torch.layer_norm(wrf_qice,
                                    normalized_shape=tuple(wrf_qice[0,
                                                                    0].shape),
                                    eps=1e-30)
        wrf_qsnow = wrf[:, 12:16]
        wrf_qsnow = torch.layer_norm(wrf_qsnow,
                                     normalized_shape=tuple(
                                         wrf_qsnow[0, 0].shape),
                                     eps=1e-30)
        wrf_qgroup = wrf[:, 21:25]
        wrf_qgroup = torch.layer_norm(wrf_qgroup,
                                      normalized_shape=tuple(
                                          wrf_qgroup[0, 0].shape),
                                      eps=1e-30)
        wrf_w = wrf[:, 27:28]
        wrf_rain = wrf[:, 28:29]

        wrf_qice = self.conv2d_qice(wrf_qice)
        wrf_qsnow = self.conv2d_qsnow(wrf_qsnow)
        wrf_qgroup = self.conv2d_qgroup(wrf_qgroup)
        wrf_w = self.conv2d_w(wrf_w)
        wrf_rain = self.conv2d_rain(wrf_rain)

        wrf_enc = torch.cat([wrf_qice, wrf_qsnow, wrf_qgroup, wrf_w, wrf_rain],
                            dim=1)
        wrf_enc = self.layernorm(wrf_enc)
        wrf_enc = self.encoder(wrf_enc)
        return wrf_enc
コード例 #2
0
ファイル: test_model8a.py プロジェクト: amimai/NNets
    def forward(self, x, h):
        bs = x.shape[0]

        out = self.lin_in(x)  #input->hidden dim encoder
        out = out.transpose(0, 1)  #mha1 batch transpose
        tmp, attn = self.mha1(out, out, out)
        out = torch.layer_norm(torch.add(tmp, out), tmp.shape)
        tmp, attn = self.mha1(out, out, out)
        out = torch.layer_norm(torch.add(tmp, out), tmp.shape)
        tmp, attn = self.mha1(out, out, out)
        out = torch.layer_norm(torch.add(tmp, out), tmp.shape)
        out = out.transpose(0, 1)
        out = self.fc1(out.reshape(bs, self.backwards * self.hidden_dim))
        #out = self.drop(out)
        out = self.r1_dnet(out)
        out = self.fc3(out)
        return out, h
コード例 #3
0
    def forward(self, x):
        N = x.size()[0]
        C = x.size()[1]

        decoded = self.decoded(x)
        encoded = self.encoded(
            self.relu(torch.layer_norm(decoded,
                                       decoded.size()[1:])))
        encoded = nn.functional.softmax(encoded)
        cnn = x * encoded
        return cnn
コード例 #4
0
ファイル: convlstm.py プロジェクト: zhwzhong/deep-video-mvs
    def forward(self, input_tensor, cur_state, previous_pose, current_pose,
                estimated_current_depth, camera_matrix):
        h_cur, c_cur = cur_state

        if previous_pose is not None:
            transformation = torch.bmm(torch.inverse(previous_pose),
                                       current_pose)

            non_valid = estimated_current_depth <= 0.01
            h_cur = warp_frame_depth(image_src=h_cur,
                                     depth_dst=estimated_current_depth,
                                     src_trans_dst=transformation,
                                     camera_matrix=camera_matrix,
                                     normalize_points=False,
                                     sampling_mode='bilinear')
            b, c, h, w = h_cur.size()
            non_valid = torch.cat([non_valid] * c, dim=1)
            h_cur.data[non_valid] = 0.0

        combined = torch.cat([input_tensor, h_cur],
                             dim=1)  # concatenate along channel axis
        combined_conv = self.conv(combined)
        cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv,
                                             self.hidden_dim,
                                             dim=1)

        b, c, h, w = h_cur.size()
        i = torch.sigmoid(cc_i)
        f = torch.sigmoid(cc_f)
        o = torch.sigmoid(cc_o)

        cc_g = torch.layer_norm(cc_g, [h, w])
        g = self.activation_function(cc_g)

        c_next = f * c_cur + i * g
        c_next = torch.layer_norm(c_next, [h, w])
        h_next = o * self.activation_function(c_next)

        return h_next, c_next
コード例 #5
0
 def pt_layer_norm(a, shape, normalized_axis, weight, bias):
     a_np = a.asnumpy()
     weight_pt = bias_pt = None
     if weight:
         weight_pt = torch.from_numpy(weight.asnumpy())
     if bias:
         bias_pt = torch.from_numpy(bias.asnumpy())
     a_pt = torch.from_numpy(a_np)
     pt_normalized_axis = []
     for i in range(len(normalized_axis)):
         pt_normalized_axis.append(shape[normalized_axis[i]])
     a_out = torch.layer_norm(a_pt, pt_normalized_axis, weight_pt, \
             bias_pt, eps=CustomLayerNormUtils.EPSILON_FLOAT, cudnn_enable=False)
     return a_out.numpy()
コード例 #6
0
    def forward(self, block_input):
        N = block_input.size()[0]
        C = block_input.size()[1]

        attention = self.attention(block_input)

        block_input = nn.functional.softmax(block_input)

        block_input_flattened = torch.reshape(block_input, [N, C, -1])
        attention = torch.squeeze(attention, dim=3)
        attention_flattened = torch.reshape(attention, [N, -1])

        c11 = torch.einsum('bcf,bf->bc', block_input_flattened,
                           attention_flattened)
        c11 = torch.reshape(c11, (N, C, 1, 1))

        c12 = self.c12(c11)

        c15 = self.c15(self.relu(torch.layer_norm(c12, c12.size()[1:])))
        cnn = torch.add(block_input, c15)
        return cnn
コード例 #7
0
    def forward(self,
                inputs,
                sample_m,
                sentence_ms,
                sen_mask,
                using_GPU=False):
        """
        为了方便 输入的数据使用pad过的,每个batch下sentence_num和sentence_len保持一致
        输入数据全部为tensor数据
        :param sen_mask: mask the padding sentence(int(0)). shape(batch_size, sentence_num)
        :param inputs: shape(batch_size, sentence_num, sentence_len, embedding_dim)
        :param sample_m: shape(batch_size, metaphor_dim)
        :param sentence_ms: shape(batch_size, sentence_num, metaphor_dim)
        :return: label distribution shape(batch_size, num_class)
        """
        batch_size, sen_num, sen_len, embedding_dim = inputs.size()

        cnn_re1 = self.cnn1(
            inputs.view(-1, sen_len, embedding_dim).permute(0, 2, 1))
        cnn_re2 = self.cnn2(
            inputs.view(-1, sen_len, embedding_dim).permute(0, 2, 1))
        # cnn_re: shape(batch_size*sen_num, cnn_dim, cnn_len)
        # print('cnn_re', cnn_re1.size())

        max_p_m1 = nn.AvgPool1d(
            cnn_re1.size(-1)).cuda() if using_GPU else nn.AvgPool1d(
                cnn_re1.size(-1))
        max_p_m2 = nn.AvgPool1d(
            cnn_re2.size(-1)).cuda() if using_GPU else nn.AvgPool1d(
                cnn_re2.size(-1))
        mp_re1 = max_p_m1(cnn_re1).squeeze(-1).view(batch_size, sen_num, -1)
        mp_re2 = max_p_m2(cnn_re1).squeeze(-1).view(batch_size, sen_num, -1)
        # mp_re: shape(batch_size, sen_num, cnn_dim)
        # print('mp_re', mp_re1.size())

        temp_re = torch.cat([mp_re1, mp_re2], dim=-1)
        # temp_re: shape(batch_size, sen_num, cnn_dim*2)
        # print('temp_re', temp_re.size())
        temp_re = self.dropout_cnn(temp_re)
        cnn_re = temp_re

        if self.using_RNN:
            temp_re, _ = self.rnn(temp_re)
            # temp_re: shape(batch_size, sen_num, rnn_dim*2)
            # print('temp_re', temp_re.size())
            temp_re = self.dropout_cnn(temp_re)

            # 使用残差与Norm结构请保证cnn_dim=rnn_dim
            temp_re = temp_re + cnn_re
            temp_re = temp_re.permute(1, 0, 2)
            temp_re = torch.layer_norm(temp_re, temp_re.size()[1:])
            temp_re = temp_re.permute(1, 0, 2)

        batch_query = self.pro_query(sample_m).unsqueeze(1)
        # batch_query: shape(batch_size, 1, deep)
        # print('batch_query', batch_query.size())
        batch_query = torch.tanh(batch_query)

        batch_keys = self.pro_key(sentence_ms).permute(0, 2, 1)
        # batch_keys: shape(batch_size, deep, sen_num)
        # print('batch_keys', batch_keys.size())
        batch_keys = torch.tanh(batch_keys)

        # batch_values = temp_re
        # batch_values = self.pro_value(temp_re)
        batch_values = torch.cat([self.pro_value(temp_re), sentence_ms],
                                 dim=-1)
        batch_values = self.rnn_mlp(batch_values)

        batch_logits = torch.matmul(batch_query, batch_keys)
        # batch_logits = torch.nn.functional.layer_norm(batch_logits, normalized_shape=batch_logits.size()[:])
        batch_logits = batch_logits + sen_mask.unsqueeze(1)

        batch_weights = nn.functional.softmax(batch_logits, dim=-1)
        # batch_weights: shape(batch_size, 1, sen_num)
        # print('batch_weights', batch_weights.size())

        result = torch.matmul(batch_weights, batch_values).squeeze(1)
        # result: shape(batch_size, rnn/cnn_dim)
        # print('result:', result.size())

        # 不使用att
        # std = int(temp_re.size(-1)/2)
        # result = torch.cat([temp_re[:, -1, :std], temp_re[:, 0, std:]], dim=-1).squeeze(1)

        result = self.out_dropout(result)
        out = self.out_label(result)
        # out: shape(batch_size, num_class)
        output = nn.functional.log_softmax(out, dim=-1)
        return output