while all_vars[decoder_var_start_id].name.find( "transformer/decoding") == -1: decoder_var_start_id += 1 encoder_variables = all_vars[:decoder_var_start_id] decoder_variables = all_vars[decoder_var_start_id:] op_encoder_result = op_encoder(inputs=from_tensor, encoder_args=encoder_args, encoder_vars=encoder_variables, attention_mask=attention_mask) op_encoder_result = tf.reshape( op_encoder_result, [batch_size, max_seq_len, encoder_hidden_dim]) finalized_op_output_ids, finalized_op_sequence_lengths, op_output_ids, \ op_parent_ids, op_sequence_lengths = op_decoding(op_encoder_result, memory_sequence_length, embedding_table, decoder_variables, decoding_args) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) finalized_tf_output_ids_result, tf_output_ids_result, tf_parent_ids_result, \ tf_sequence_lengths_result = sess.run( [finalized_tf_output_ids, tf_output_ids, tf_parent_ids, tf_sequence_lengths]) finalized_op_output_ids_result, op_output_ids_result, op_parent_ids_result, \ op_sequence_lengths_result = sess.run( [finalized_op_output_ids, op_output_ids, op_parent_ids, op_sequence_lengths])
batch_size, max_seq_len, memory_hidden_dim, tf_datatype) finalized_tf_output_ids, finalized_tf_sequence_lengths, tf_output_ids, \ tf_parent_ids, tf_sequence_lengths = tf_decoding(memory, memory_sequence_length, embedding_table, decoding_args, 0, kernel_initializer_range, bias_initializer_range) all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) finalized_op_output_ids, finalized_op_sequence_lengths, op_output_ids, \ op_parent_ids, op_sequence_lengths = op_decoding(memory, memory_sequence_length, embedding_table, all_vars, decoding_args) config = tf.ConfigProto() config.gpu_options.allow_growth = True if use_XLA == 1: config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) if args.cross_check == 1: finalized_tf_output_ids_result, tf_output_ids_result, tf_parent_ids_result, \ tf_sequence_lengths_result = sess.run( [finalized_tf_output_ids, tf_output_ids, tf_parent_ids, tf_sequence_lengths])
## end of tf decoding ## ## op decoding ## all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) decoder_var_start_id = 0 while all_vars[decoder_var_start_id].name.find( "transformer/decoding") == -1: decoder_var_start_id += 1 encoder_variables = all_vars[:decoder_var_start_id] decoder_variables = all_vars[decoder_var_start_id:] finalized_op_output_ids, finalized_op_sequence_lengths, op_output_ids, \ op_parent_ids, op_sequence_lengths = op_decoding(tf_encoder_result, memory_sequence_length, target_inputter.embedding, decoder_variables, # first one is embedding table decoding_args) op_target_ids = finalized_op_output_ids op_target_length = finalized_op_sequence_lengths op_target_tokens = target_vocab_rev.lookup(tf.cast(op_target_ids, tf.int64)) ## end of op decoding opennmt_target_ids = tf.cast(opennmt_target_ids, tf.int32) tf_target_ids = tf.cast(tf_target_ids, tf.int32) op_target_ids = tf.cast(op_target_ids, tf.int32) opennmt_target_length = tf.minimum(opennmt_target_length + 1,