def download_model(self, model_path):
     print("Downloading", model_path)
     blob_client = self.blob_service_client.get_blob_client(
         container=self.container_name, blob=model_path)
     create_dirs_if_not_found(os.path.dirname(model_path))
     with open(model_path, "wb+") as download_file:
         download_file.write(blob_client.download_blob().readall())
    def __init__(self, feature_name, tag=None, repeats=3):
        self.feature_name = feature_name
        self.grid_search_tag = "grid_search_" + (create_timestamp_str()
                                                 if tag is None else tag)
        self.save_dir = os.path.join(ROOT_DIR, self.grid_search_tag)
        self.log_path = os.path.join(self.save_dir, "log.txt")
        self.repeats = repeats

        create_dirs_if_not_found(self.save_dir)
        print("Running", self.grid_search_tag)
 def _save_model(self, model: Model, save_dir: str, tag=None) -> str:
     """
     Save a model.
     :param model: Model to save.
     :param save_dir: Path of save directory.
     :param tag: Optional file tag.
     :return: Generated filename.
     """
     if tag is None:
         tag = create_timestamp_str()
     create_dirs_if_not_found(save_dir)
     file_name = self.feature_name + "_" + model.name + "_" + tag + ".pth"
     save_path = os.path.join(save_dir, file_name)
     model.save(save_path)
     return file_name
    def extract(self, dataset_type: DatasetType) -> None:
        """
        Extract the features from an image dataset if not found.
        :param dataset_type: Dataset to use (training, test etc.)
        :return: None.
        """
        features_dir = self.get_features_dir(dataset_type)
        image_dataset = self.image_datasets.get_dataset(dataset_type)
        # Only extract if missing features
        if not os.path.exists(features_dir) or len(
                os.listdir(features_dir)) != len(image_dataset):
            # CUDA setup
            device = "cuda:0" if torch.cuda.is_available() else "cpu"
            print("Running feature extraction using", device)

            create_dirs_if_not_found(features_dir)
            if dataset_type is DatasetType.Competition:
                self._run_unlabelled_extraction(dataset_type, device)
            else:
                self._run_labelled_extraction(dataset_type, device)
def augment_all_in_dir(class_dir, num_outputs):
    input_dir_path = base_dir + "/" + class_dir
    output_dir_path = output_dir + "/" + class_dir
    create_dirs_if_not_found(output_dir_path)
    file_names = os.listdir(input_dir_path)
    aug_idx = 1
    with tqdm(total=num_outputs, desc="Augmenting " + str(class_dir)) as p_bar:
        while len(os.listdir(output_dir_path)) < num_outputs:
            random.shuffle(file_names)
            for file_name in file_names:
                input_path = input_dir_path + "/" + file_name
                output_path = output_dir_path + "/" + str(
                    aug_idx) + "_" + file_name
                img = Image.open(input_path)
                transformed_img = augment_image(img)
                transformed_img.save(output_path)
                if len(os.listdir(output_dir_path)) == num_outputs:
                    return
                p_bar.update(1)
            aug_idx += 1
def _setup_output_dirs() -> None:
    """
    Create output dirs.
    :return: None.
    """
    print("Creating output dirs")
    for class_name in utils.get_indexed_class_names():
        utils.create_dirs_if_not_found(
            os.path.join(TRAIN_OUTPUT_PATH, class_name))
        utils.create_dirs_if_not_found(
            os.path.join(VALIDATION_OUTPUT_PATH, class_name))
        utils.create_dirs_if_not_found(
            os.path.join(TEST_OUTPUT_PATH, class_name))
    utils.create_dirs_if_not_found(COMPETITION_OUTPUT_PATH)