def __init__(self, cfg, unique_id, apply_threshold, is_bert):
        self.dataset_cfg = get_dataset_metadata_cfg()

        self.workspace_dir = os.path.join(cfg["workspace"]["directory"],
                                          cfg["dataset"]["name"])

        self.tokenized_descriptions_file_path = os.path.join(
            self.workspace_dir, tokenized_descriptions)
        self.word_dictionary_file_path = os.path.join(self.workspace_dir,
                                                      word_dictionary)
        self.vector_encoding_file_path = os.path.join(self.workspace_dir,
                                                      vector_encoding)
        self.processed_descriptions_file_path = os.path.join(
            self.workspace_dir, index_replaced_file)
        self.index_file = self.dataset_cfg["data"]["googlecc"]["index_file"]
        self.dataset_path = self.dataset_cfg["data"]["googlecc"][
            "dataset_path"]
        self.cfg = cfg
        self.unique_id = unique_id
        self.is_bert = is_bert

        # Prepare the dataset processing original descriptions to new descriptions
        self.prepare_dataset()
        self.train_test_split()

        if is_bert:
            BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"
            with tf.Graph().as_default():
                bert_module = hub.Module(BERT_MODEL_HUB)
                tokenization_info = bert_module(signature="tokenization_info",
                                                as_dict=True)
                with tf.compat.v1.Session() as sess:
                    vocab_file, do_lower_case = tokenization_info[
                        "vocab_file"], tokenization_info["do_lower_case"]
                    print("vocab", vocab_file)
            tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
            tokenize_descriptions_bert(self.processed_descriptions_file_path,
                                       self.tokenized_descriptions_file_path,
                                       tokenizer)
            vector_encode_descriptions_bert(
                self.tokenized_descriptions_file_path,
                self.vector_encoding_file_path, tokenizer)
        else:
            if apply_threshold:
                tokenize_descriptions_with_threshold(
                    self.processed_descriptions_file_path,
                    self.tokenized_descriptions_file_path)
            else:
                tokenize_descriptions(self.processed_descriptions_file_path,
                                      self.tokenized_descriptions_file_path)

            create_word_map(self.tokenized_descriptions_file_path,
                            self.word_dictionary_file_path)
            word_map = read_word_dictionary(self.word_dictionary_file_path)
            vector_encode_descriptions(self.tokenized_descriptions_file_path,
                                       self.vector_encoding_file_path,
                                       word_map)
Esempio n. 2
0
def retrieve_data_list_file(cfg, run_type):
    dataset_cfg = get_dataset_metadata_cfg()

    if run_type == "train":
        data_list = dataset_cfg["data"]["flickr"]["train_images_file"]
    elif run_type is "validation":
        data_list = dataset_cfg["data"]["flickr"]["validation_images_file"]
    else:
        data_list = dataset_cfg["data"]["flickr"]["test_images_file"]

    return data_list
Esempio n. 3
0
def image_generator(cfg, data_list):
    dataset_metadata = get_dataset_metadata_cfg()
    with open(data_list, 'r') as file:
        for line in file:
            image = cv2.imread(
                os.path.join(
                    dataset_metadata["data"]["flickr"]["dataset_path"],
                    line.strip()))
            image = np.expand_dims(np.asarray(cv2.resize(image,
                                                         (299, 299))) / 255.0,
                                   axis=0)
            yield image
Esempio n. 4
0
    def __init__(self, cfg):
        dataset_cfg = get_dataset_metadata_cfg()

        self.workspace_dir = cfg["workspace"]["directory"]
        self.tokenized_descriptions_file_path = os.path.join(
            self.workspace_dir, tokenized_descriptions)
        self.word_dictionary_file_path = os.path.join(self.workspace_dir,
                                                      word_dictionary)
        self.vector_encoding_file_path = os.path.join(self.workspace_dir,
                                                      vector_encoding)
        self.cfg = cfg

        tokenize_descriptions(dataset_cfg["data"]["flickr"]["descriptions"],
                              self.tokenized_descriptions_file_path)
        create_word_map(self.tokenized_descriptions_file_path,
                        self.word_dictionary_file_path)

        word_map = read_word_dictionary(self.word_dictionary_file_path)
        vector_encode_descriptions(self.tokenized_descriptions_file_path,
                                   self.vector_encoding_file_path, word_map)
Esempio n. 5
0
    def __init__(self, cfg, unique_id, shape):
        self.dataset_cfg = get_dataset_metadata_cfg()

        self.workspace_dir = os.path.join(cfg["workspace"]["directory"],
                                          cfg["dataset"]["name"])

        self.tokenized_descriptions_file_path = os.path.join(
            self.workspace_dir, tokenized_descriptions)
        self.word_dictionary_file_path = os.path.join(self.workspace_dir,
                                                      word_dictionary)
        self.vector_encoding_file_path = os.path.join(self.workspace_dir,
                                                      vector_encoding)
        self.processed_descriptions_file_path = os.path.join(
            self.workspace_dir, index_replaced_file)
        self.index_file = self.dataset_cfg["data"]["googlecc"]["index_file"]
        self.dataset_path = self.dataset_cfg["data"]["googlecc"][
            "dataset_path"]
        self.apply_dataset_threshold = cfg["dataset"]["is_threshold"]
        self.cfg = cfg
        self.shape = shape
        self.unique_id = unique_id

        # Prepare the dataset processing original descriptions to new descriptions
        self.prepare_dataset()
        self.train_test_split()

        if self.apply_dataset_threshold:
            tokenize_descriptions_with_threshold(
                self.processed_descriptions_file_path,
                self.tokenized_descriptions_file_path)
        else:
            tokenize_descriptions(self.processed_descriptions_file_path,
                                  self.tokenized_descriptions_file_path)

        create_word_map(self.tokenized_descriptions_file_path,
                        self.word_dictionary_file_path)
        word_map = read_word_dictionary(self.word_dictionary_file_path)
        vector_encode_descriptions(self.tokenized_descriptions_file_path,
                                   self.vector_encoding_file_path, word_map)
    clear_session()
    parser = argparse.ArgumentParser(description="config")
    parser.add_argument(
        "--config",
        nargs="?",
        type=str,
        default="../configs/resnet50_lstm_with_threshold.yaml",
        help="Configuration file to use",
    )

    args = parser.parse_args()

    with open(args.config) as fp:
        cfg = yaml.load(fp)

    dataset_cfg = get_dataset_metadata_cfg()
    model_workspace_dir = os.path.join(cfg["workspace"]["directory"],
                                       cfg["dataset"]["name"],
                                       cfg["model"]["arch"])
    utils.make_directories(model_workspace_dir)

    img_model = ResNet50(weights='imagenet')

    dataset_preprocessor = PreProcessing(cfg, "resnet50", False, False)
    dataset_preprocessor.run_one_time_encoding(img_model)

    # Load train, validation sets from the pre-processor
    training_generator, validation_generator, test_generator = dataset_preprocessor.get_keras_generators(
        "resnet50")

    MAX_LEN = 40