예제 #1
0
                                                max_to_keep=5)
status = checkpoint.restore(checkpoint_manager.latest_checkpoint)

# Output file ###############################################################
results_file = checkpoint_directory + "/results_evaluate.txt"
predictions_file = checkpoint_directory + "/predictions_evaluate.txt"
image2seq.set_predictions_file(predictions_file)
attention_file = checkpoint_directory

#############################################################################
# Pre-processing                                                            #
#############################################################################
# STEP 1: Pre-process token #################################################
list_image_paths, list_processed_matrix_seqs = \
  token_preprocessing(images_seqs_csv,
                      batch_size=batch_size,
                      skip_padding=True)

len_list_image_paths = len(list_image_paths)

_, list_matrix_shapes = \
  detected_values_preprocessing(train_info_csv,
                                batch_size=batch_size)

logging.info("PREPROCESSING - Step 1 - Token preprocessing")
tf.compat.v1.debugging.assert_equal(len(_), len_list_image_paths)
tf.compat.v1.debugging.assert_equal(len(list_matrix_shapes),
                                    len(list_processed_matrix_seqs))
tf.compat.v1.debugging.assert_equal(len(_), len(list_matrix_shapes))

# STEP 2: Train-validation split ############################################
예제 #2
0
# status = checkpoint.restore(checkpoint_manager.latest_checkpoint)

# Output file ###############################################################
results_file = "image2seq/checkpoints/train/{}_{}_{}{}/results.txt"\
                .format(image2seq.get_model_name(), date, hour, minute)
predictions_file = "image2seq/checkpoints/train/{}_{}_{}{}/predictions.txt"\
                .format(image2seq.get_model_name(), date, hour, minute)
image2seq.set_predictions_file(predictions_file)

#############################################################################
# Pre-processing                                                            #
#############################################################################
# STEP 1: Pre-process token #################################################
list_image_paths, list_processed_matrix_seqs = \
  token_preprocessing(images_seqs_csv,
                      batch_size=batch_size,
                      skip_padding=True,
                      parallel_caption=True)

len_list_image_paths = len(list_image_paths)

_, list_matrix_shapes = \
  matrix_shape_preprocessing(train_info_csv,
                             batch_size=batch_size)

logging.info("PREPROCESSING - Step 1 - Token preprocessing")
tf.compat.v1.debugging.assert_equal(len(_), len_list_image_paths)
tf.compat.v1.debugging.assert_equal(len(list_matrix_shapes),
                                    len(list_processed_matrix_seqs))
tf.compat.v1.debugging.assert_equal(len(_), len(list_matrix_shapes))

# STEP 2: Train-validation split ############################################