def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh): '''recurrent steps with sequence length''' time_step = x.shape[0] h_t = h if self.is_lstm: hidden_size = h[0].shape[-1] zero_output = P.ZerosLike()(h_t[0]) else: hidden_size = h.shape[-1] zero_output = P.ZerosLike()(h_t) seq_length = P.Cast()(seq_length, mindspore.float32) seq_length = P.BroadcastTo((hidden_size, -1))(seq_length) seq_length = P.Cast()(seq_length, mindspore.int32) seq_length = P.Transpose()(seq_length, (1, 0)) outputs = [] state_t = h_t t = 0 while t < time_step: x_t = x[t:t + 1:1] x_t = P.Squeeze(0)(x_t) h_t = self.cell(x_t, state_t, w_ih, w_hh, b_ih, b_hh) seq_cond = seq_length > t if self.is_lstm: state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0]) state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1]) output = P.Select()(seq_cond, h_t[0], zero_output) state_t = (state_t_0, state_t_1) else: state_t = P.Select()(seq_cond, h_t, state_t) output = P.Select()(seq_cond, h_t, zero_output) outputs.append(output) t += 1 outputs = P.Stack()(outputs) return outputs, state_t
def variable_recurrent(self, x, h, seq_length): time_step = range(x.shape[0]) h_t = h if self.is_lstm: hidden_size = h[0].shape[-1] zero_output = P.ZerosLike()(h_t[0]) else: hidden_size = h.shape[-1] zero_output = P.ZerosLike()(h_t) seq_length = P.BroadcastTo((hidden_size, -1))(seq_length) seq_length = P.Transpose()(seq_length, (1, 0)) outputs = [] state_t = h_t for t in time_step: h_t = self.cell(x[t], state_t) seq_cond = seq_length > t if self.is_lstm: state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0]) state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1]) output = P.Select()(seq_cond, h_t[0], zero_output) state_t = (state_t_0, state_t_1) else: state_t = P.Select()(seq_cond, h_t, state_t) output = P.Select()(seq_cond, h_t, zero_output) outputs.append(output) outputs = P.Stack()(outputs) return outputs, state_t
def __init__(self, args, embedding_table=None): super(NewsEncoder, self).__init__() # categories self.category_embedding = nn.Embedding(args.n_categories, args.category_embedding_dim) self.category_dense = nn.Dense(args.category_embedding_dim, args.n_filters, has_bias=True, activation="relu") self.sub_category_embedding = nn.Embedding(args.n_sub_categories, args.category_embedding_dim) self.subcategory_dense = nn.Dense(args.category_embedding_dim, args.n_filters, has_bias=True, activation="relu") # title and abstract if embedding_table is None: word_embedding = [ nn.Embedding(args.n_words, args.word_embedding_dim) ] else: word_embedding = [ nn.Embedding(args.n_words, args.word_embedding_dim, embedding_table=embedding_table) ] title_CNN = [ nn.Conv1d(args.word_embedding_dim, args.n_filters, kernel_size=args.window_size, pad_mode='same', has_bias=True), nn.ReLU() ] abstract_CNN = [ nn.Conv1d(args.word_embedding_dim, args.n_filters, kernel_size=args.window_size, pad_mode='same', has_bias=True), nn.ReLU() ] if args.phase == "train": word_embedding.append( nn.Dropout(keep_prob=(1 - args.dropout_ratio))) title_CNN.append(nn.Dropout(keep_prob=(1 - args.dropout_ratio))) abstract_CNN.append(nn.Dropout(keep_prob=(1 - args.dropout_ratio))) self.word_embedding = nn.SequentialCell(word_embedding) self.title_CNN = nn.SequentialCell(title_CNN) self.abstract_CNN = nn.SequentialCell(abstract_CNN) self.title_attention = Attention(args.query_vector_dim, args.n_filters) self.abstract_attention = Attention(args.query_vector_dim, args.n_filters) self.total_attention = Attention(args.query_vector_dim, args.n_filters) self.pack = ops.Stack(axis=1) self.title_shape = (-1, args.n_words_title) self.abstract_shape = (-1, args.n_words_abstract)
def recurrent(self, x, h): time_step = range(x.shape[0]) outputs = [] for t in time_step: h = self.cell(x[t], h) if self.is_lstm: outputs.append(h[0]) else: outputs.append(h) outputs = P.Stack()(outputs) return outputs, h
def _stacked_bi_dynamic_rnn(self, x, xr, h, seq_length): """stacked bidirectional dynamic_rnn""" input_forward = x input_backward = xr h_n = () c_n = () outputs = () for i, (forward_cell, backward_cell) in enumerate( zip(self.forward_layers, self.backward_layers)): offset = i * 2 h_f_i = (h[0][offset], h[1][offset]) h_b_i = (h[0][offset + 1], h[1][offset + 1]) output_f, h_t_f = forward_cell(input_forward, h_f_i, seq_length) output_b, h_t_b = backward_cell(input_backward, h_b_i, seq_length) if seq_length is None: output_b = P.ReverseV2([0])(output_b) else: output_b = P.ReverseSequence(0, 1)(output_b, seq_length) output = P.Concat(2)((output_f, output_b)) outputs += (output, ) input_forward = output_f input_backward = output_b h_t = P.Concat(1)((h_t_f[0], h_t_b[0])) c_t = P.Concat(1)((h_t_f[1], h_t_b[1])) h_n += (h_t, ) c_n += (c_t, ) h_n = P.Stack(0)(h_n) c_n = P.Stack(0)(c_n) outputs = P.Stack(0)(outputs) outputs = self.dropout(outputs) return outputs, (h_n, c_n)
def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True, use_ds=True): super(NestedUNet, self).__init__() self.in_channel = in_channel self.n_class = n_class self.feature_scale = feature_scale self.use_deconv = use_deconv self.use_bn = use_bn self.use_ds = use_ds filters = [64, 128, 256, 512, 1024] filters = [int(x / self.feature_scale) for x in filters] # Down Sample self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same") self.conv00 = UnetConv2d(self.in_channel, filters[0], self.use_bn) self.conv10 = UnetConv2d(filters[0], filters[1], self.use_bn) self.conv20 = UnetConv2d(filters[1], filters[2], self.use_bn) self.conv30 = UnetConv2d(filters[2], filters[3], self.use_bn) self.conv40 = UnetConv2d(filters[3], filters[4], self.use_bn) # Up Sample self.up_concat01 = UnetUp(filters[1], filters[0], self.use_deconv, 2) self.up_concat11 = UnetUp(filters[2], filters[1], self.use_deconv, 2) self.up_concat21 = UnetUp(filters[3], filters[2], self.use_deconv, 2) self.up_concat31 = UnetUp(filters[4], filters[3], self.use_deconv, 2) self.up_concat02 = UnetUp(filters[1], filters[0], self.use_deconv, 3) self.up_concat12 = UnetUp(filters[2], filters[1], self.use_deconv, 3) self.up_concat22 = UnetUp(filters[3], filters[2], self.use_deconv, 3) self.up_concat03 = UnetUp(filters[1], filters[0], self.use_deconv, 4) self.up_concat13 = UnetUp(filters[2], filters[1], self.use_deconv, 4) self.up_concat04 = UnetUp(filters[1], filters[0], self.use_deconv, 5) # Finale Convolution self.final1 = nn.Conv2d(filters[0], n_class, 1) self.final2 = nn.Conv2d(filters[0], n_class, 1) self.final3 = nn.Conv2d(filters[0], n_class, 1) self.final4 = nn.Conv2d(filters[0], n_class, 1) self.stack = P.Stack(axis=0)
def recurrent(self, x, h_0, w_ih, w_hh, b_ih, b_hh): '''recurrent steps without sequence length''' time_step = x.shape[0] outputs = [] t = 0 h = h_0 while t < time_step: x_t = x[t:t + 1:1] x_t = P.Squeeze(0)(x_t) h = self.cell(x_t, h, w_ih, w_hh, b_ih, b_hh) if self.is_lstm: outputs.append(h[0]) else: outputs.append(h) t += 1 outputs = P.Stack()(outputs) return outputs, h