Exemple #1
0
 def _stacked_lstm_impl2(self, dim):
     rnn_cells = [layers.LSTMCell(dim) for _ in range(self.num_layers)]
     stacked_lstm = layers.StackedRNNCells(rnn_cells)
     lstm_layer = layers.RNN(stacked_lstm, return_sequences=True)
     if self.bidirectional:
         lstm_layer = layers.Bidirectional(lstm_layer)
     return [lstm_layer]
Exemple #2
0
    def __init__(
        self,
        rnn_units: int,
        max_length: int,
        vocab_size: int,
        embedding_units: int,
        attention_units: int,
        num_decoder_cells: int = 2,
        dropout_prob: float = 0.0,
    ) -> None:

        super().__init__()
        self.vocab_size = vocab_size
        self.max_length = max_length

        self.embed = layers.Dense(embedding_units, use_bias=False)
        self.embed_tgt = layers.Embedding(embedding_units, self.vocab_size + 1)

        self.lstm_cells = layers.StackedRNNCells([
            layers.LSTMCell(rnn_units, implementation=1)
            for _ in range(num_decoder_cells)
        ])
        self.attention_module = AttentionModule(attention_units)
        self.output_dense = layers.Dense(self.vocab_size + 1, use_bias=True)
        self.dropout = layers.Dropout(dropout_prob)
Exemple #3
0
 def __init__(self, config):
     self.stack_rnn_size  = config.stack_rnn_size
     # xy encoder: [N,T1,h_dim]
     super(TrajectoryEncoder, self).__init__(name="trajectory_encoder")
     # Linear embedding of the observed positions (for each x,y)
     self.traj_xy_emb_enc = layers.Dense(config.emb_size,
         activation=config.activation_func,
         use_bias=True,
         name='position_embedding')
     # LSTM cell, including dropout, with a stacked configuration.
     # Output is composed of:
     # - the sequence of h's along time, from the highest level only: h1,h2,...
     # - last pair of states (h,c) for the first layer
     # - last pair of states (h,c) for the second layer
     # - ... and so on
     self.lstm_cells= [layers.LSTMCell(config.enc_hidden_size,
             name   = 'trajectory_encoder_cell',
             dropout= config.dropout_rate,
             recurrent_dropout=config.dropout_rate) for _ in range(self.stack_rnn_size)]
     self.lstm_cell = layers.StackedRNNCells(self.lstm_cells)
     # Recurrent neural network using the previous cell
     # Initial state is zero; We return the full sequence of h's and the pair of last states
     self.lstm      = layers.RNN(self.lstm_cell,
             name   = 'trajectory_encoder_rnn',
             return_sequences= True,
             return_state    = True)
 def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0):
     super(Seq2SeqAttentionDecoder, self).__init__()
     self.attention = AdditiveAttention(num_hiddens=8, dropout=0.1)
     self.embed = layers.Embedding(input_dim=vocab_size, output_dim=embed_size)
     self.rnn = layers.RNN(
         layers.StackedRNNCells([layers.GRUCell(units=num_hiddens, dropout=dropout) for _ in range(num_layers)])
         , return_state=True
         , return_sequences=True
     )
     self.dense = layers.Dense(units=vocab_size)
Exemple #5
0
    def __init__(
        self,
        rnn_units: int,
        max_length: int,
        vocab_size: int,
        embedding_units: int,
        attention_units: int,
        num_decoder_layers: int = 2,
        input_shape: Optional[List[Tuple[Optional[int]]]] = None,
    ) -> None:

        super().__init__()
        self.vocab_size = vocab_size
        self.lstm_decoder = layers.StackedRNNCells(
            [layers.LSTMCell(rnn_units, implementation=1) for _ in range(num_decoder_layers)]
        )
        self.embed = layers.Dense(embedding_units, use_bias=False, input_shape=(None, self.vocab_size + 1))
        self.attention_module = AttentionModule(attention_units)
        self.output_dense = layers.Dense(vocab_size + 1, use_bias=True, input_shape=(None, 2 * rnn_units))
        self.max_length = max_length

        # Initialize kernels
        if input_shape is not None:
            self.attention_module.call(layers.Input(input_shape[0][1:]), layers.Input((1, 1, rnn_units)))
    def __init__(self, x_rhus=[256,256], tr_shape=(20, 80),
                 mu_nl=None, logvar_nl=None, name="decoder", **kwargs):

        super(Decoder, self).__init__(name=name,   **kwargs)

        # x
        self.tr_shape = tr_shape

        # RNN specs
        self.x_rhus = x_rhus

        self.cells_x = [layers.LSTMCell(rhu) for rhu in self.x_rhus]
        self.cell_x = layers.StackedRNNCells(self.cells_x)
        #init_state = cell.get_initial_state(batch_size=bs, dtype=x.dtype)
        self.fullRNN_x = layers.RNN(self.cell_x, return_sequences=True, time_major=False)

        # fully connected layers for computing mu and logvar
        self.xmu_fclayer = layers.Dense(
            tr_shape[1], activation=mu_nl, use_bias=True,
            kernel_initializer='glorot_uniform', bias_initializer='zeros')

        self.xlogvar_fclayer = layers.Dense(
            tr_shape[1], activation=logvar_nl, use_bias=True,
            kernel_initializer='glorot_uniform', bias_initializer='zeros')
Exemple #7
0
  def __init__(self, source_dict_total_words, source_embedding_size, encoder_num_layers, encoder_rnn_size,
              target_dict_total_words, target_embedding_size, decoder_rnn_size, start_token, batch_size,
              end_token, target_size):
      super(seq2seq, self).__init__()
      '''
      encoder参数说明
      --source_dict_total_words:source字典的总单词个数
      --source_embedding_size:souce压缩的长度
      --encoder_num_layers:encoder堆叠的rnn cell数量
      --encoder_rnn_size:encoder中RNN单元的隐层结点数量
      decoder参数说明
      --target_dict_total_words:target字典的总单词个数
      --target_embedding_size:target压缩的长度:
      --decoder_num_layers:decoder堆叠的rnn cell数量
      --decoder_rnn_size:decoder中RNN单元的隐层结点数量
      --target_size:target中句子的长度
      其他参数说明
      --start_token:decoder输入的开始标志<GO>在target字典中的对应数字编号
      --end_token:decoder输入的结束标志<EOS>在target字典中的对应数字编号
      --batch_size:数据的batch_size
      '''
      
      self.source_dict_total_words = source_dict_total_words
      self.source_embedding_size = source_embedding_size
      self.encoder_num_layers = encoder_num_layers
      self.encoder_rnn_size = encoder_rnn_size
      self.target_dict_total_words = target_dict_total_words
      self.target_embedding_size = target_embedding_size
      self.decoder_rnn_size = decoder_rnn_size
      self.start_token = start_token
      self.batch_size = batch_size
      self.end_token = end_token
      self.target_size = target_size
      
      self.cross_entropy = keras.losses.SparseCategoricalCrossentropy(from_logits = True)
      self.optimzer = optimizers.Adam(lr = 1e-2)
      self.seq_len = tf.fill([self.batch_size], self.target_size-1)
      
 
 
 
      #######################Encoder##################################
      #1.embedding
      self.encoder_embedding = layers.Embedding(self.source_dict_total_words, self.source_embedding_size,
                                                embeddings_initializer = tf.initializers.RandomNormal(0., 0.1))
      #2.单层或多层rnn
      self.encoder_rnn_cells = [layers.LSTMCell(self.encoder_rnn_size, dropout = 0.5) for _ in range(self.encoder_num_layers)]
      self.encoder_stacked_lstm = layers.StackedRNNCells(self.encoder_rnn_cells)
      self.encoder_rnn = layers.RNN(self.encoder_stacked_lstm, return_state = True, return_sequences = True)
      #######################Decoder##################################  
      #1.embedding
      self.decoder_embedding = layers.Embedding(self.target_dict_total_words, self.target_embedding_size,
                                                embeddings_initializer = tf.initializers.RandomNormal(0., 0.1))
      #2.构造Decoder中的rnn单元
      self.decoder_rnn_cell = tf.keras.layers.LSTMCell(self.decoder_rnn_size)
      #3.构造Decoder中的dense单元
      self.decoder_dense_layer = layers.Dense(self.target_dict_total_words,
                                              kernel_initializer = tf.compat.v1.truncated_normal_initializer(mean = 0.0, stddev = 0.1))
      #3.train
      self.decoder_sampler = tfa.seq2seq.TrainingSampler()
      self.training_decoder = tfa.seq2seq.BasicDecoder(cell = self.decoder_rnn_cell, sampler = self.decoder_sampler,
                                                       output_layer = self.decoder_dense_layer)
      #4.predict     
      self.sampler = tfa.seq2seq.GreedyEmbeddingSampler()
      self.predicting_decoder = tfa.seq2seq.BasicDecoder(cell = self.decoder_rnn_cell, sampler = self.sampler,
                                                         output_layer = self.decoder_dense_layer)