Пример #1
0
    def __init__(self, config, is_training=True):
        super(BiLSTM, self).__init__()
        self.is_training = is_training
        self.batch_size = config.batch_size * config.rnn_batch_size
        print("batch size is {} ".format(self.batch_size))
        self.input_size = config.input_size
        self.hidden_size = config.hidden_size
        self.num_step = config.num_step
        self.reshape = P.Reshape()
        self.cast = P.Cast()
        k = (1 / self.hidden_size) ** 0.5
        self.rnn1 = P.DynamicRNN(forget_bias=0.0)
        self.rnn_bw = P.DynamicRNN(forget_bias=0.0)
        self.w1 = Parameter(np.random.uniform(-k, k, \
            (self.input_size + self.hidden_size, 4 * self.hidden_size)).astype(np.float32), name="w1")
        self.w1_bw = Parameter(np.random.uniform(-k, k, \
            (self.input_size + self.hidden_size, 4 * self.hidden_size)).astype(np.float32), name="w1_bw")

        self.b1 = Parameter(np.random.uniform(-k, k, (4 * self.hidden_size)).astype(np.float32), name="b1")
        self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.hidden_size)).astype(np.float32), name="b1_bw")

        self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))
        self.h1_bw = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))

        self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))
        self.c1_bw = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))
        self.reverse_seq = P.ReverseV2(axis=[0])
        self.concat = P.Concat()
        self.transpose = P.Transpose()
        self.concat1 = P.Concat(axis=2)
        self.dropout = nn.Dropout(0.7)
        self.use_dropout = config.use_dropout
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
Пример #2
0
    def __init__(self,
                 config: GNMTConfig,
                 is_training: bool,
                 compute_type=mstype.float32):
        super(GNMTEncoder, self).__init__()
        self.input_mask_from_dataset = config.input_mask_from_dataset
        self.max_positions = config.seq_length
        self.attn_embed_dim = config.hidden_size

        self.num_layers = config.num_hidden_layers
        self.hidden_dropout_prob = config.hidden_dropout_prob
        self.vocab_size = config.vocab_size
        self.seq_length = config.seq_length
        self.batch_size = config.batch_size
        self.word_embed_dim = config.hidden_size

        self.transpose = P.Transpose()
        self.transpose_orders = (1, 0, 2)
        self.reshape = P.Reshape()
        self.concat = P.Concat(axis=-1)
        encoder_layers = []
        for i in range(0, self.num_layers + 1):
            if i == 2:
                # the bidirectional layer's output is [T,D,2N]
                scaler = 2
            else:
                # the rest layer's output is [T,D,N]
                scaler = 1
            layer = DynamicRNNNet(seq_length=self.seq_length,
                                  batchsize=self.batch_size,
                                  word_embed_dim=scaler * self.word_embed_dim,
                                  hidden_size=self.word_embed_dim)
            encoder_layers.append(layer)
        self.encoder_layers = nn.CellList(encoder_layers)
        self.reverse_v2 = P.ReverseV2(axis=[0])
        self.dropout = nn.Dropout(keep_prob=1.0 - config.hidden_dropout_prob)
Пример #3
0
 def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
     super(MaxPool2dPytorch, self).__init__()
     self.maxpool = nn.MaxPool2d(kernel_size=kernel_size,
                                 stride=stride,
                                 pad_mode=pad_mode)
     self.reverse = F.ReverseV2(axis=[2, 3])
Пример #4
0
 def __init__(self, bidirectional=False):
     super(LSTM_Ascend, self).__init__()
     self.bidirectional = bidirectional
     self.dynamic_rnn = P.DynamicRNN(forget_bias=0.0)
     self.reverseV2 = P.ReverseV2(axis=[0])
     self.concat = P.Concat(2)