Exemplo n.º 1
0
def preprocess_frames(img, out_path, idx):
    if (type(img) != np.ndarray and type(img) != np.memmap) or img is None:
        return None

    img_enh = utl.enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
    mask, circle = utl.get_retina_mask(img_enh)
    if circle[2] == 0:
        return None

    img = cv2.bitwise_and(img, mask)
    img = utl.crop_to_circle(img, circle)
    if img is not None and img.size != 0:
        cv2.imwrite(join(out_path, SUBFOLDER_PROCESSED, f'{idx}.jpg'), img)
    return img
Exemplo n.º 2
0
def preprocess_images(image_path, output_path, min_radius=300):
    img = cv2.imread(image_path)
    if (type(img) != np.ndarray and type(img) != np.memmap) or img is None:
        return

    img_enh = utl.enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
    mask, circle = utl.get_retina_mask(img_enh, min_radius=min_radius)
    if circle[2] == 0:
        return

    img = cv2.bitwise_and(img, mask)
    img = utl.crop_to_circle(img, circle)

    if img is not None and img.size != 0:
        filepath = join(output_path, os.path.splitext(os.path.basename(image_path))[0]) + '.jpg'
        cv2.imwrite(filepath, img)
def run(input_path, model_path, process):
    # Loading image
    img = cv2.imread(input_path)
    if (type(img) != np.ndarray and type(img) != np.memmap) or img is None:
        print('Invalid input image: ', input_path)
        return
    if process:
        img_enh = enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
        mask, circle = get_retina_mask(img_enh)
        if circle[2] == 0:
            print('Could not detect retinoscope lens.')
            return
        img = cv2.bitwise_and(img, mask)
        img = crop_to_circle(img, circle)
    print('Loaded image successfully...')

    # Necessary image augmentations
    aug_pipeline = alb.Compose([
        alb.Resize(425, 425, always_apply=True, p=1.0),
        alb.CenterCrop(399, 399, always_apply=True, p=1.0),
        alb.Normalize(always_apply=True, p=1.0),
        ToTensorV2(always_apply=True, p=1.0)
    ],
                               p=1.0)
    img_tensor = aug_pipeline(image=img)['image']

    # Loading model
    device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
    model = models.alexnet()
    model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, 2)
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()
    print('Loaded model successfully...')

    #Prediction
    id_to_class = {0: 'no_retinopathy', 1: 'retinopathy'}
    outputs = model(img_tensor.unsqueeze(0))
    _, prediction = torch.max(outputs, 1)
    prediction = prediction[0].item()
    print()
    print(
        f'Prediction for image {os.path.basename(input_path)} with the model {os.path.basename(model_path)}:'
    )
    print(f'------ {id_to_class[prediction]} ------')
Exemplo n.º 4
0
def run() -> None:
    images = load_images(
        '/data/simon/ownCloud/Data/Reflection Dataset/raw_images/',
        img_type='png')
    #images.extend(load_images('./C001R/', img_type='png'))

    images = [enhance_contrast_image(img, clip_limit=4) for img in images]

    circles = []
    for i, img in enumerate(images):
        mask, circle = get_retina_mask(img)
        images[i] = cv2.bitwise_and(img, mask)
        circles.append(circle)
    print(circles)

    images = normalize_and_register_retians(images, circles)
    show_image_row(images[::5], name='Training data', time=10000)

    perform_pca(images)

    time_wrap.print_prof_data()
Exemplo n.º 5
0
def run(input_path, output_path, model_path):
    unglarer: ug.GlareRemover = ug.GlareRemover(model_path=model_path,
                                                masked_class=3)

    images = load_images(
        '/data/simon/ownCloud/Data/Reflection Dataset/raw_images/',
        img_type='png')
    images2 = load_images('./C001R_Cut/', img_type='png')

    images_subset = [images[i] for i in range(0, len(images))]
    images_subset.extend(images2)
    images_subset = [
        enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
        for img in images_subset
    ]
    images_subset = [
        cv2.bitwise_and(img,
                        get_retina_mask(img)[0]) for img in images_subset
    ]
    unglarer.set_training_data(images_subset)

    #unglarer.show_training_data()
    #unglarer.train()
    unglarer.show_means()

    masked_images = []
    files = os.listdir(input_path)
    test_images = [cv2.imread(join(input_path, f)) for f in files]
    for img in test_images:
        mask = unglarer.get_glare_mask(img, show_mask=False, joined_prob=False)
        percentage = unglarer.get_glare_percentage(img, mask)
        masked_images.append(
            (cv2.bitwise_and(img, cv2.cvtColor(mask, code=cv2.COLOR_GRAY2BGR)),
             percentage))

    #[show_image(img, name=f'Masked images - {percentage:.2f}%') for img, percentage in masked_images]
    [
        cv2.imwrite(join(output_path, name), f)
        for name, f in zip(files, masked_images)
    ]
Exemplo n.º 6
0
def run():
    image = load_image('/data/simon/Anomaly Dataset/raw_images/SNAP_00035.png')
    image2 = load_image('./C001R_Cut/C001R04.jpg')
    show_image_row([image, image2])

    image, image2 = enhance_contrast_image(image), enhance_contrast_image(
        image2)
    show_image_row([image, image2])

    image_mask = get_retina_mask(image)
    image = cv2.bitwise_and(image, image_mask)

    image, image2 = cv2.medianBlur(image, 5), cv2.medianBlur(image2, 5)
    show_image_row(
        [image, image2,
         cv2.cvtColor(image[:, :, 1], cv2.COLOR_GRAY2BGR)])

    cluster_image(image2[:, :, 1])
    #visualize_color_space(image)
    #cluster_image(image2)

    ft, ft2 = get_features(image), get_features(image2)