Esempio n. 1
0
def delete_not_recognized_images(folder_path):
    """
    WARNING: DESTRUCTIVE FUNCTION. Use with care.
    Given a path, it will check if images has a marker recognized. If not, it will delete the images. This is mainly used
    to clean up the raw data input stream so that only images with paired JSON files will be sent off for machine learning trainings.

    :param folder_path: the path to the folder to carry out this operation.
    :return:
    """
    from PythonUtils.folder import recursive_list
    file_list = recursive_list(folder_path)
    accepted_extensions = ('png', 'jpg', 'gif', 'jpeg', 'tif', 'bmp')

    for file in tqdm(file_list):
        # Header image header name.
        image_format = imghdr.what(file)

        # Check if they are anticipated.
        if image_format not in accepted_extensions:
            continue  # skip file if image format is not compatible.

        # Check if JSON file exist.
        anticipated_JSON = file + ".ROI.json"
        if not os.path.exists(anticipated_JSON):
            os.remove(file)
Esempio n. 2
0
 def download_files(self):
     """
     return a list of all downloaded files in the download folder.
     :return:
     """
     from PythonUtils.folder import recursive_list
     file_list = recursive_list(self.download)
     return file_list
Esempio n. 3
0
def folder(input_folder_path, out_path, aug_seg, iterations):
    """
    Duplicate the entire folder X iterations before augmenting the entire folder, using the augmentation sequence provided over the number of times requests.
    It does a batch augmentation process per 1000 images because memory cannot load that many more at the same time.
    :param input_folder_path:
    :param out_path:
    :param aug_seg:
    :param iterations:
    :return:
    """

    input_files = recursive_list(input_folder_path)

    # Duplicate the folder X times.
    input_augment_files = duplicates_into_folders(input_files, out_path,
                                                  iterations)

    logger.info("Augmenting files from folder:" + out_path)

    # Decide if to do batch augmentation or all augmentation.
    with trange(len(input_augment_files)) as pbar:

        while len(input_augment_files) != 0:
            # While the files to be processed < 1000, just read the damn thing.
            if len(input_augment_files) < 1000:
                processing_data = copy.deepcopy(
                    input_augment_files
                )  # Assign all remaining items to processing data.
                input_augment_files.clear(
                )  # Empty the list to trigger exit condition.

            # When there are more files, we actually try to process 1k images at a time, augment, write out. then.
            else:
                # Transfer the top of the list to another variable.
                processing_data = input_augment_files[0:999]

                # Truncate original list.
                del input_augment_files[0:999]

            #COMMON PORTION. Previous section only modify the input_augment_files list and what needs to be processed.
            # Load images into a giant matrices from the TEMP folder
            images_ndarray = from_filelist(processing_data)

            # Now that all images are in memory, time to delete all these "source" input files.
            filelist_delete(processing_data)

            # Augment the giant matrices
            images_augmented = aug_seg.augment_images(images_ndarray)

            logger.info("Saving augmenting images to: " + out_path)

            # Save the augmented images out to path.
            save_images(images_augmented, out_path)

            # Update progress bar of the augmentation
            pbar.update(len(input_augment_files))
        pbar.close()
        logger.info("All images augmented.")
Esempio n. 4
0
def crop_folder(image_folder, output_folder, width, height, iterations):
    """
    crop the background images and generate the cropped version of them that are only 500x500
    :param image_folder: folder contain downloads.
    :param output_folder: folder where all the output will be dumped into
    :param width: width of the area will be cropped out.
    :param height: height of the area will be cropped out.
    :return:
    """
    # Change into the directory
    # List all the relevant folders.
    files = recursive_list(image_folder)

    # Make DIR if it does not already exist.
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    crop_filelist(files, output_folder, width, height, iterations)
Esempio n. 5
0
def generate_csv(folder_path, csv_path):
    """
    Given a path to a folder with images, and their JSON files, output a CSV of:
     1) all the valid images where markers were detected.
     2) all the JSON files with .ROI.json is present.
     3) parse the JSON and file path into the CSV.
    :param folder_path:
    :param csv_path:
    :return:
    """
    from PythonUtils.folder import recursive_list
    file_list = recursive_list(folder_path)
    accepted_extensions = ('png', 'jpg', 'gif', 'jpeg', 'tif', 'bmp')

    # Open files for writing.
    with open(csv_path, "w", newline='') as csv_file:

        # Open file
        csv_writer = csv.DictWriter(
            csv_file, fieldnames=["r1", "r2", "r0", "t1", "t2", "t0", "file"])
        csv_writer.writeheader()

        for file in file_list:
            # Header image header name.
            image_format = imghdr.what(file)

            # Check if they are anticipated.
            if image_format not in accepted_extensions:
                continue  # skip file if image format is not compatible.

            # Check if JSON file exist.
            anticipated_JSON = file + ".ROI.json"
            if not os.path.exists(anticipated_JSON):
                continue  # skip file if no JSON

            # Load JSON
            json_dictionary = read_json(anticipated_JSON)
            pose = json_dictionary.get("RelativePose")
            pose['file'] = file

            # Write to CSV
            csv_writer.writerow(pose)

    return csv_path
Esempio n. 6
0
from PythonUtils.folder import recursive_list
from PIL import Image
from tqdm import tqdm
files = recursive_list(
    r"C:\Yang\Dropbox\Machine_Learning\orientation_validation")
for file in tqdm(files):
    try:
        if "ROI" in file:
            continue
        im = Image.open(file)
        width, height = im.size  # Get dimensions
        new_width = 480
        left = (width - new_width) / 2
        right = (width + new_width) / 2
        new_im = im.crop((left, 0, right, height))
        new_im.save(file, "BMP")
    except:
        continue