示例#1
0
    def forward_rnn_encode_proj(self, X):
        # Reset rnn state
        self.reset_rnn_state()
        # Get input shape
        in_size, batch_size, in_dim = X.shape
        enc_states = X
        for currL in range(len(self.rnn_enc)):
            for i in range(in_size):
                temp_f = F.expand_dims(
                    F.dropout(self[self.rnn_enc[currL]](enc_states[i]),
                              ratio=self.cfg["dropout"]["rnn"]), 0)
                # if bi-directional
                if self.bi_rnn:
                    temp_r = F.expand_dims(
                        F.dropout(self[self.rnn_rev_enc[currL]](
                            enc_states[-1]),
                                  ratio=self.cfg["dropout"]["rnn"]), 0)

                if i > 0:
                    h_fwd = F.concat((h_fwd, temp_f), axis=0)
                    if self.bi_rnn:
                        h_rev = F.concat((h_rev, temp_r), axis=0)
                else:
                    h_fwd = temp_f
                    if self.bi_rnn:
                        h_rev = temp_r
            # end current rnn layer
            if self.bi_rnn:
                h_rev = F.flipud(h_rev)
                rnn_states = F.concat((h_fwd, h_rev), axis=2)
            else:
                rnn_states = h_fwd
            """
            Apply linear projection
            """
            # print(f"Applying rnn {currL}")
            if currL < (len(self.rnn_enc) - 1):
                # print(f"Applying linear linear_proj {currL}")
                for i in range(0, in_size):
                    currH = F.relu(self[f"enc_proj{currL}_bn"](
                        self[f"enc_proj{currL}"](rnn_states[i])))
                    if i > 0:
                        enc_states = F.concat(
                            (enc_states, F.expand_dims(currH, 0)), axis=0)
                    else:
                        enc_states = F.expand_dims(currH, 0)
                # end for all hidden states
        # end all layers

        # Make the batch size as the first dimension
        self.enc_states = F.swapaxes(enc_states, 0, 1)
示例#2
0
    def forward_rnn_encode(self, X):
        # Reset rnn state
        self.reset_rnn_state()
        # Get input shape
        in_size, batch_size, in_dim = X.shape
        # For each time step
        for i in range(in_size):
            # Store all hidden states
            if i > 0:
                h_fwd = F.concat(
                    (h_fwd, F.expand_dims(self.feed_rnn(X[i], self.rnn_enc),
                                          0)),
                    axis=0)
                if self.bi_rnn:
                    h_rev = F.concat(
                        (h_rev,
                         F.expand_dims(self.feed_rnn(X[-i], self.rnn_rev_enc),
                                       0)),
                        axis=0)
            else:
                h_fwd = F.expand_dims(self.feed_rnn(X[i], self.rnn_enc), 0)
                if self.bi_rnn:
                    h_rev = F.expand_dims(
                        self.feed_rnn(X[-i], self.rnn_rev_enc), 0)
        """
        Concatenate fwd and rev RNN hidden states
        Flip reverse RNN hidden state order
        """
        if self.bi_rnn:
            h_rev = F.flipud(h_rev)
            rnn_states = F.concat((h_fwd, h_rev), axis=2)
        else:
            rnn_states = h_fwd
        """
        Check if linear projection layer required
        """
        self.enc_states = rnn_states

        # Make the batch size as the first dimension
        self.enc_states = F.swapaxes(self.enc_states, 0, 1)
示例#3
0
 def forward(self, inputs, device):
     x, = inputs
     return functions.flipud(x),
示例#4
0
 def forward(self, inputs, device):
     x, = inputs
     return functions.flipud(x),
示例#5
0
 def f(x):
     y = functions.flipud(x)
     return y * y
示例#6
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.flipud(x)

        testing.assert_allclose(y.data, numpy.flipud(self.x))