def main(): parser = ArgumentParser("Start the model using the configuration.ini") parser.add_argument("-r", "--result_file", help="Path to the human readable results file", required=True) parser.add_argument("-m", "--model_path", help="", required=True) parser.add_argument("-b", "--batch_size", default=9, type=int, help="") parser.add_argument("-c", "--configuration", help="Determine a specific configuration to use. If not specified, the default is used.") run_opts = parser.parse_args() if run_opts.configuration: config = Configuration(run_opts.configuration) else: config = Configuration() import os os.environ["CUDA_VISIBLE_DEVICES"] = str(config.getGpuDevices()) result_file_path = run_opts.result_file results = load_json_from(result_file_path) batch_size = run_opts.batch_size samples = np.random.choice(results, batch_size, replace=False) sqr_batch_size = np.sqrt(batch_size).astype("uint8") visualize_image_attention_with_config(run_opts.model_path, "word", samples, sqr_batch_size, sqr_batch_size, config)
def load_prepared_questions_json_from(directory_path_or_file, split_name=None, flat=True): lookup_filename = DEFAULT_PREPARED_QUESTIONS_FILE_NAME if split_name and not flat: directory_path_or_file = "/".join([directory_path_or_file, split_name]) if split_name and flat: lookup_filename = DEFAULT_PREPARED_QUESTIONS_SPLIT_FILE_NAME_PATTERN.format(split_name) return load_json_from(directory_path_or_file, lookup_filename)
def load_questions_json_from(directory_path_or_file, corpus_type, split_name=None, flat=False): """ @param split_name: when given looks for the sub-directory or file in the flat directory @param flat: when True looks for a file in the given directory, otherwise looks into the sub-directory """ lookup_filename = DEFAULT_QUESTION_FILE_NAME_PATTERN.format(corpus_type) if split_name and not flat: directory_path_or_file = "/".join([directory_path_or_file, split_name]) if split_name and flat: raise Exception("Not supported to have source question files on the same level as the top dataset directory") return load_json_from(directory_path_or_file, lookup_filename)
def load_labels_json_from(directory_path_or_file, split_name=None, flat=True): """ @param split_name: when given looks for the sub-directory or file in the flat directory @param flat: when True looks for a file in the given directory, otherwise looks into the sub-directory @param force: when split and flat, then force to look for the split name (only use when you know what you are doing) """ lookup_filename = DEFAULT_LABELS_FILE_NAME if split_name and not flat: directory_path_or_file = "/".join([directory_path_or_file, split_name]) if split_name and flat: lookup_filename = "vqa1_labels_{}.json".format(split_name) # print("No support for split specific labels loading. Please just name the file to use to " + lookup_filename) return load_json_from(directory_path_or_file, lookup_filename)
def load_answers_by_question_from(directory_path_or_file, split_name=None, flat=True): """ @param split_name: when given looks for the sub-directory or file in the flat directory @param flat: when True looks for a file in the given directory, otherwise looks into the sub-directory """ lookup_filename = DEFAULT_ANSWERS_FILE_NAME if split_name and not flat: directory_path_or_file = "/".join([directory_path_or_file, split_name]) if split_name and flat: lookup_filename = "vqa1_answers_{}.json".format(split_name) return load_json_from(directory_path_or_file, lookup_filename)
def load_vocabulary_file_from(directory_path_or_file, split_name=None, flat=True): """ @param split_name: when given looks for the sub-directory or file in the flat directory @param flat: when True looks for a file in the given directory, otherwise looks into the sub-directory """ lookup_filename = DEFAULT_VOCABULARY_FILE_NAME if split_name and not flat: directory_path_or_file = "/".join([directory_path_or_file, split_name]) if split_name and flat: lookup_filename = "vqa1_vocabulary_{}.json".format(split_name) #print("No support for split specific vocabulary loading. Please just name the file to use to " + lookup_filename) tokenizer_config = load_json_from(directory_path_or_file, lookup_filename) tokenizer = tokenizer_from_json(json.dumps(tokenizer_config)) return tokenizer