def main():
    if not os.path.isfile(INPUT_FILE):
        print('Input image not found ', INPUT_FILE)
    else:
        if not os.path.isfile(MODEL_FILE):
            print('Model not found ', MODEL_FILE)

        else:
            print('Load model... ', MODEL_FILE)
            model = models.UNet(n_channels=1, n_classes=1)

            checkpoint = torch.load(pathlib.Path(MODEL_FILE))
            model.load_state_dict(checkpoint)
            model.to(device)
            model.eval()

            print('Load image... ', INPUT_FILE)
            img, h, w = image.load_image(INPUT_FILE)

            print('Prediction...')
            output_image = predict_image(model, img)

            print('Resize mask to original size...')
            mask_image = cv2.resize(output_image, (w, h))
            cv2.imwrite(OUTPUT_MASK, mask_image)

            print('Cut it out...')
            warped = image.extract_idcard(cv2.imread(INPUT_FILE), mask_image)
            cv2.imwrite(OUTPUT_FILE, warped)

            print('Done.')
def load_image_train(image_file, height, width, resize_ratio_before_crop):
    input_image, real_image = vertical_split(load_image(image_file))
    input_image, real_image = random_jitter(input_image, real_image, height,
                                            width, resize_ratio_before_crop)
    input_image, real_image = normalize_image(input_image), normalize_image(
        real_image)

    return input_image, real_image
def load_image_test(image_file, height, width):
    input_image, real_image = vertical_split(load_image(image_file))
    input_image, real_image = resize_image(input_image, height,
                                           width), resize_image(
                                               real_image, height, width)
    input_image, real_image = normalize_image(input_image), normalize_image(
        real_image)

    return input_image, real_image
예제 #4
0
def main():
    model = load_model('model.json')
    model.load_weights('weights.hdf5')
    names = load_list('names.json')
    print('Enter the file name (*.jpg)')
    while True:
        values = input('>> ').rstrip()
        if os.path.isfile(values) == False:
            print('File not exist')
            continue
        image = load_image(name=values, size=(64, 64))
        prediction = model.predict(image)
        display(names, prediction)
예제 #5
0
    def __getitem__(self, index):
        m_inputs = []
        for k in range(2):
            m_inputs.append(
                load_image(os.path.join(
                    self.data_path,
                    '{0}_im{1}.png'.format(self.data[index], k + 1)),
                           size=self.input_size,
                           channel_first=True))

        i_inputs = []
        for input_scale in self.input_scales:
            i_inputs.append(
                resize_image(m_inputs[0],
                             size=int(self.input_size * input_scale),
                             channel_first=True))

        inputs = (i_inputs, m_inputs)
        targets = resize_image(m_inputs[1], size = self.target_size, channel_first = True) - \
                  resize_image(m_inputs[0], size = self.target_size, channel_first = True)

        return inputs, targets * 128.
예제 #6
0
def montgomery(shuffle=True,
               size=(256, 256),
               trans_range=10.0,
               rot_range=np.pi / 24,
               scale_range=0.1,
               shear_range=0.05,
               lens_range=0.1):
    # If no data folder is found, download the data
    # TODO: Modularize things
    if not os.path.isdir(os.path.join(*[os.getcwd(), 'data', 'montgomery'])):
        data_root_orig = tf.keras.utils.get_file(
            'montgomery.zip',
            'http://openi.nlm.nih.gov/imgs/collections/NLM-MontgomeryCXRSet.zip',
            extract=True,
            cache_subdir=os.path.join(*[os.getcwd(), 'data']))
        # Remove the zip file
        try:
            os.remove(os.path.join(*['data', 'montgomery.zip']))
        except OSError as e:
            print('dataset.montgomery: ', e)
        # Clean up unnecessary stuff that comes with the ZIP file
        try:
            shutil.rmtree(os.path.join(*['data', '__MACOSX']))
        except OSError as e:
            print('dataset.montgomery: ', e)
        # rename folders
        try:
            from_dir = os.path.join(*['data', 'MontgomerySet'])
            to_dir = os.path.join(*['data', 'montgomery'])
            os.mkdir(to_dir)
            for file in os.listdir(from_dir):
                shutil.move(os.path.join(from_dir, file), to_dir)
            os.rmdir(from_dir)
        except OSError as e:
            print('dataset.montgomery: ', e)
        try:
            from_dir = os.path.join(
                *['data', 'montgomery', 'ClinicalReadings'])
            to_dir = os.path.join(*['data', 'montgomery', 'clinical_readings'])
            os.mkdir(to_dir)
            for file in os.listdir(from_dir):
                shutil.move(os.path.join(from_dir, file), to_dir)
            os.rmdir(from_dir)
        except OSError as e:
            print('dataset.montgomery: ', e)
        try:
            from_dir = os.path.join(*['data', 'montgomery', 'CXR_png'])
            to_dir = os.path.join(*['data', 'montgomery', 'images'])
            os.mkdir(to_dir)
            for file in os.listdir(from_dir):
                shutil.move(os.path.join(from_dir, file), to_dir)
            os.rmdir(from_dir)
        except OSError as e:
            print('dataset.montgomery: ', e)
        try:
            from_dir = os.path.join(*['data', 'montgomery', 'ManualMask'])
            to_dir = os.path.join(*['data', 'montgomery', 'masks'])
            os.mkdir(to_dir)
            for file in os.listdir(from_dir):
                shutil.move(os.path.join(from_dir, file), to_dir)
            os.rmdir(from_dir)
        except OSError as e:
            print('dataset.montgomery: ', e)
        try:
            from_dir = os.path.join(
                *['data', 'montgomery', 'masks', 'leftMask'])
            to_dir = os.path.join(*[
                'data', 'montgomery', 'masks', 'right'
            ])  # I think the original dataset has left and right confused
            os.mkdir(to_dir)
            for file in os.listdir(from_dir):
                shutil.move(os.path.join(from_dir, file), to_dir)
            os.rmdir(from_dir)
        except OSError as e:
            print('dataset.montgomery: ', e)
        try:
            from_dir = os.path.join(
                *['data', 'montgomery', 'masks', 'rightMask'])
            to_dir = os.path.join(*[
                'data', 'montgomery', 'masks', 'left'
            ])  # I think the original dataset has left and right confused
            os.mkdir(to_dir)
            for file in os.listdir(from_dir):
                shutil.move(os.path.join(from_dir, file), to_dir)
            os.rmdir(from_dir)
        except OSError as e:
            print('dataset.montgomery: ', e)
        try:
            os.rename(
                os.path.join(
                    *['data', 'montgomery', 'NLM-MontgomeryCXRSet-ReadMe.pdf'
                      ]), os.path.join(*['data', 'montgomery', 'README.pdf']))
        except OSError as e:
            print('dataset.montgomery: ', e)
        # Clean up .DS_Store
        try:
            os.remove(os.path.join(*['data', 'montgomery', '.DS_Store']))
            os.remove(
                os.path.join(*['data', 'montgomery', 'masks', '.DS_Store']))
            os.remove(
                os.path.join(*['data', 'montgomery', 'images', 'Thumbs.db']))
            os.remove(
                os.path.join(
                    *['data', 'montgomery', 'masks', 'right', 'Thumbs.db']))
        except OSError as e:
            print('dataset.montgomery: ', e)

    data_root = pathlib.Path('./data/montgomery')
    # read image paths using glob and convert them into string format
    image_paths = [str(path) for path in list(data_root.glob('images/*.png'))]
    mask_left_paths = [
        str(path) for path in list(data_root.glob('masks/left/*.png'))
    ]
    mask_right_paths = [
        str(path) for path in list(data_root.glob('masks/right/*.png'))
    ]
    count = len(image_paths)

    # shuffle them in a random order
    if shuffle:
        temp = list(
            zip(image_paths, mask_left_paths, mask_right_paths)
        )  # zip the three paths to make sure they are shuffled together
        random.shuffle(temp)  # shuffle
        image_paths, mask_left_paths, mask_right_paths = zip(
            *temp)  # unzip them
        image_paths = list(
            image_paths
        )  # unzipping converts the lists to tuples for some reason...
        mask_left_paths = list(
            mask_left_paths)  # we are explicitly converting them back to lists
        mask_right_paths = list(mask_right_paths)  #

    # Convert paths to tf.data.Dataset format
    image_path_ds = tf.data.Dataset.from_tensor_slices(image_paths)
    mask_left_path_ds = tf.data.Dataset.from_tensor_slices(mask_left_paths)
    mask_right_path_ds = tf.data.Dataset.from_tensor_slices(mask_right_paths)

    # Map path datasets to image datasets
    image_ds = image_path_ds.map(
        lambda x: load_image(x, channels=1, size=size),
        num_parallel_calls=AUTOTUNE)
    mask_left_ds = mask_left_path_ds.map(
        lambda x: load_image(x, channels=1, size=size),
        num_parallel_calls=AUTOTUNE)
    mask_right_ds = mask_right_path_ds.map(
        lambda x: load_image(x, channels=1, size=size),
        num_parallel_calls=AUTOTUNE)

    # Merge left and right masks
    mask_ds = tf.data.Dataset.zip((mask_left_ds, mask_right_ds))
    mask_ds = mask_ds.map(_mask_merge, num_parallel_calls=AUTOTUNE)

    # TODO: Random Noise

    # (image, mask) pair
    ds = tf.data.Dataset.zip((image_ds, mask_ds))

    # TODO: Random Transformation
    ds = ds.map(lambda x, y: random_transform(x,
                                              y,
                                              trans_range=trans_range,
                                              rot_range=rot_range,
                                              scale_range=scale_range,
                                              shear_range=shear_range,
                                              lens_range=lens_range),
                num_parallel_calls=AUTOTUNE)

    return ds, count
예제 #7
0
def load_image_test(image_file, label_file, height, width):
    input_image, real_image = load_image(image_file), load_image(label_file)
    input_image, real_image = normalize_image(input_image), normalize_image(real_image)

    return input_image, real_image
예제 #8
0
def load_image_train(image_file, label_file, height, width, resize_ratio_before_crop):
    input_image, real_image = load_image(image_file), load_image(label_file)
    input_image, real_image = normalize_image(input_image), normalize_image(real_image)

    return input_image, real_image
예제 #9
0
def main(args):
    """Puts it all together."""
    # Start measuring time
    tic = time.perf_counter()
    # Load project configurations
    cfg = load_configs()
    # Load the network
    net = cv2.dnn.readNetFromTensorflow(cfg['net']['model_file'], 
                                        cfg['net']['cfg_file'])
    # Input and load image
    input_file = args.input

    try:
        # If file is a compatible video file
        if is_video(input_file):
            # Process video
            process_video(input_file, args, cfg, net)

        # If file is a compatible image file
        elif is_image(input_file):
            # Load image
            input_img = load_image(input_file)
            # Process image
            img_steps = process_image(input_img, cfg, net)
            # Save final image to specified output filename
            out_filename = os.path.join(args.output, cfg['image']['output'])
            # Check for --show-detections flag
            output_img = check_if_adding_bboxes(args, img_steps)
            # Save image
            img_saved = save_image(out_filename, output_img)

        # If input_file is a dir
        elif is_directory(input_file):
            # For each file in the dir
            for file in os.listdir(input_file):
                # Join input dir and file name
                file = os.path.join(input_file, file)
                # If file is a compatible video file
                if is_video(file):
                    # Process video
                    process_video(file, args, cfg, net)
                # If file is a compatible image file    
                if is_image(file):
                    # Load image
                    input_img = load_image(file)
                    # Process image
                    img_steps = process_image(input_img, cfg, net)
                    # Save final image to specified output filename
                    out_filename = os.path.join(args.output, cfg['image']['output'])
                     # Check for --show-detections flag
                    output_img = check_if_adding_bboxes(args, img_steps)
                    # Save image
                    img_saved = save_image(out_filename, output_img)

    except ValueError:
        print('Input must be a valid image, video, or directory.')
    
    # Save processing steps
    if args.save_steps:
        # Set image output height
        output_height = cfg['image']['img_steps_height']
        # Set output filename
        steps_filename = os.path.join(args.output, cfg['image']['output_steps'])
        # Save file
        save_steps(steps_filename, img_steps, output_height)

    # End measuring time
    toc = time.perf_counter()
    print(f"Operation ran in {toc - tic:0.4f} seconds")
예제 #10
0
 def _load_image(self, path):
     im = load_image(path)
     im = cv2.resize(im, (self.shape[0], self.shape[1]))
     im = np.divide(im, 255)
     return im