예제 #1
0
 def exe_decoder(self, dec, enc_output, attention=None):
     params = copy.deepcopy(self.e_params)
     if "gru" in self.e_params["fw_cell"]:
         params["fw_cell"] = "gru_block"
     elif "rnn" in self.e_params["fw_cell"]:
         params["fw_cell"] = "rnn"
     else:
         params["fw_cell"] = "lstm_block"
     with tf.variable_scope("decoder",
                            initializer=self.initializer,
                            reuse=tf.AUTO_REUSE):
         if self.dtype == "grid":
             outputs, _ = rnn_utils.execute_decoder_cnn(
                 dec, enc_output, self.decoder_length, params, attention,
                 self.use_cnn, self.use_gen_cnn, self.mtype,
                 self.use_batch_norm, self.dropout)
             outputs = tf.stack(outputs, axis=1)
         else:
             if "gru" in self.e_params["fw_cell"] or self.e_params[
                     "fw_cell"] == "rnn":
                 enc_output = tf.squeeze(enc_output[0], 0)
             dec_data = tf.transpose(dec, [0, 2, 1, 3])
             dec_data = tf.reduce_mean(dec_data, axis=1)
             outputs = rnn_utils.execute_decoder(dec_data, enc_output,
                                                 self.decoder_length,
                                                 params, attention,
                                                 self.dropout_placeholder)
             outputs = tf.reshape(tf.stack(outputs, axis=1),
                                  shape=(pr.batch_size,
                                         self.decoder_length))
     return outputs
예제 #2
0
 def exe_decoder(self, dec_hidden_vectors, fn_state):
     with tf.variable_scope("decoder",
                            initializer=self.initializer,
                            reuse=tf.AUTO_REUSE):
         params = deepcopy(self.e_params)
         if "gru" in self.e_params["fw_cell"]:
             params["fw_cell"] = "gru_block"
         else:
             params["fw_cell"] = "lstm_block"
         # concatenate with z noise
         dec_hidden_vectors = tf.concat([dec_hidden_vectors, self.z],
                                        axis=1)
         dec_hidden_vectors = tf.layers.dense(dec_hidden_vectors,
                                              self.rnn_hidden_units,
                                              name="generation_hidden_seed",
                                              activation=tf.nn.tanh)
         outputs = rnn_utils.execute_decoder_cnn(
             None, fn_state, self.decoder_length, params,
             dec_hidden_vectors, self.use_cnn, self.use_gen_cnn, self.mtype,
             self.use_batch_norm, self.dropout)
         outputs = tf.stack(outputs, axis=1)
         outputs = tf.reshape(outputs, [
             pr.batch_size, self.decoder_length, pr.grid_size * pr.grid_size
         ])
     return outputs
예제 #3
0
 def exe_decoder_critic(self, dec, enc_output, attention=None):
     with tf.variable_scope("decoder",
                            initializer=self.initializer,
                            reuse=tf.AUTO_REUSE):
         params = copy.deepcopy(self.e_params)
         params["fw_cell"] = "gru_block"
         outputs, _ = rnn_utils.execute_decoder_cnn(
             dec, enc_output, self.decoder_length, params, attention,
             self.use_cnn, self.use_gen_cnn, self.mtype,
             self.use_batch_norm, self.dropout)
         # batch_size x decoder_length x grid_size x grid_size
         outputs = tf.stack(outputs, axis=1)
         # batch_size x decoder_length
     return outputs
예제 #4
0
 def exe_decoder(self, dec_hidden_vectors, fn_state):
     with tf.variable_scope("decoder", initializer=self.initializer, reuse=tf.AUTO_REUSE):
         params = deepcopy(self.e_params)
         if "gru" in self.e_params["fw_cell"]:
             params["fw_cell"] = "gru_block"
         else:
             params["fw_cell"] = "lstm_block"
         dctype = 5
         if self.grid_size == 32:
             dctype = 7
         outputs = rnn_utils.execute_decoder_cnn(None, fn_state, self.decoder_length, params, dec_hidden_vectors, \
                                                 self.use_cnn, self.use_gen_cnn, self.mtype, self.use_batch_norm, \
                                                 self.dropout, dctype=dctype, offset=self.offset)
         classes = None
         outputs = tf.stack(outputs, axis=1)
         outputs = tf.reshape(outputs, [self.batch_size, self.decoder_length - self.offset, self.grid_size * self.grid_size])
     return outputs, classes
예제 #5
0
 def exe_decoder(self, forecast_outputs, fn_state, weather_hidden_vector,
                 enc_inputs):
     last_enc_timestep = enc_inputs[:, :, :, 0]
     with tf.variable_scope("decoder",
                            initializer=self.initializer,
                            reuse=tf.AUTO_REUSE):
         params = deepcopy(self.e_params)
         if "gru" in self.e_params["fw_cell"]:
             params["fw_cell"] = "gru_block"
         else:
             params["fw_cell"] = "lstm_block"
         outputs, classes = rnn_utils.execute_decoder_cnn(forecast_outputs, fn_state, self.decoder_length, params, weather_hidden_vector, True, \
                                                         self.use_gen_cnn, self.mtype, False, self.dropout, dctype=5, classify=bool(self.num_class), \
                                                         init_input=last_enc_timestep, grid_input=False)
         outputs = tf.stack(outputs, axis=1)
         if classes:
             classes = tf.stack(classes, axis=1)
     return outputs, classes