def run():
    image = load_image(path='./C001R_Cut/C001R04.jpg')
    show_image(image)

    show_channels(image, show_hsv_channels=False)

    #divide_smoothed_green_channel(image)

    #image_cl = enhance_contrast_image(image, clip_limit=4, tile_size=16)
    image_cl_blur_1 = enhance_contrast_image(cv2.GaussianBlur(
        image, (7, 7), 0),
                                             clip_limit=3,
                                             tile_size=8)
    image_cl_blur_2 = enhance_contrast_image(cv2.GaussianBlur(
        image, (7, 7), 0),
                                             clip_limit=3,
                                             tile_size=16)
    image_cl_blur_3 = enhance_contrast_image(cv2.GaussianBlur(
        image, (7, 7), 0),
                                             clip_limit=3,
                                             tile_size=24)

    plot_historgram_one_channel(image[:, :, 1])
    plot_historgram_one_channel(clahe_green_channel(image, clip_limit=5.0))

    show_image_row(
        [image[:, :, 1],
         clahe_green_channel(image, clip_limit=5.0)])
def extract_feature_vector(X: np.array,
                           bin_size: int = 16,
                           haralick_dist: int = 4) -> np.ndarray:
    channels = cv2.split(X)
    features = []
    for c in channels:  # haarlick features
        # texture_feat_mean: np.array = mt.features.haralick(c, compute_14th_feature=True, distance=haralick_dist, return_mean=True)
        texture_feat: np.array = mt.features.haralick(
            c,
            compute_14th_feature=True,
            distance=haralick_dist,
            return_mean_ptp=True)
        # ht_mean = textures.mean(axis=0)
        # ht_range = np.ptp(textures, axis=0)
        # f = np.hstack((texture_feat_mean, texture_feat_range))
        features.append(texture_feat)

    img = utl.enhance_contrast_image(X, clip_limit=4.0, tile_size=12)

    hist = cv2.calcHist([cv2.cvtColor(img, cv2.COLOR_BGR2HSV)], [0, 1, 2],
                        None, [8, 3, 3],
                        [0, 180, 0, 256, 0, 256])  # Histogram features
    print(
        f'FEAT> Length haralick features {len(np.hstack(features))}, Length histogram features {len(hist.flatten())}'
    )
    X_trans = np.hstack([np.hstack(features), hist.flatten()])
    return X_trans
Beispiel #3
0
    def apply(self, img, **params):
        img2 = utl.enhance_contrast_image(img)
        img2 = img2[:, :, 1]

        mask = img2 > self.thresh
        mask = cv2.dilate(img_as_ubyte(mask), cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)))

        return cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)
def run():
    images = load_images(path='/home/simon/Videos/Anomaly Dataset/raw_images',
                         img_type='png')
    #images = load_images(path='./C001R', img_type='png')
    images = [
        enhance_contrast_image(images[i], clip_limit=4)
        for i in range(len(images))
    ]
    show_image_row(images, 'Contrast')
    [detect_circles(img) for img in images]
Beispiel #5
0
def preprocess_frames(img, out_path, idx):
    if (type(img) != np.ndarray and type(img) != np.memmap) or img is None:
        return None

    img_enh = utl.enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
    mask, circle = utl.get_retina_mask(img_enh)
    if circle[2] == 0:
        return None

    img = cv2.bitwise_and(img, mask)
    img = utl.crop_to_circle(img, circle)
    if img is not None and img.size != 0:
        cv2.imwrite(join(out_path, SUBFOLDER_PROCESSED, f'{idx}.jpg'), img)
    return img
Beispiel #6
0
def preprocess_images(image_path, output_path, min_radius=300):
    img = cv2.imread(image_path)
    if (type(img) != np.ndarray and type(img) != np.memmap) or img is None:
        return

    img_enh = utl.enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
    mask, circle = utl.get_retina_mask(img_enh, min_radius=min_radius)
    if circle[2] == 0:
        return

    img = cv2.bitwise_and(img, mask)
    img = utl.crop_to_circle(img, circle)

    if img is not None and img.size != 0:
        filepath = join(output_path, os.path.splitext(os.path.basename(image_path))[0]) + '.jpg'
        cv2.imwrite(filepath, img)
def run(input_path, model_path, process):
    # Loading image
    img = cv2.imread(input_path)
    if (type(img) != np.ndarray and type(img) != np.memmap) or img is None:
        print('Invalid input image: ', input_path)
        return
    if process:
        img_enh = enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
        mask, circle = get_retina_mask(img_enh)
        if circle[2] == 0:
            print('Could not detect retinoscope lens.')
            return
        img = cv2.bitwise_and(img, mask)
        img = crop_to_circle(img, circle)
    print('Loaded image successfully...')

    # Necessary image augmentations
    aug_pipeline = alb.Compose([
        alb.Resize(425, 425, always_apply=True, p=1.0),
        alb.CenterCrop(399, 399, always_apply=True, p=1.0),
        alb.Normalize(always_apply=True, p=1.0),
        ToTensorV2(always_apply=True, p=1.0)
    ],
                               p=1.0)
    img_tensor = aug_pipeline(image=img)['image']

    # Loading model
    device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
    model = models.alexnet()
    model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, 2)
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()
    print('Loaded model successfully...')

    #Prediction
    id_to_class = {0: 'no_retinopathy', 1: 'retinopathy'}
    outputs = model(img_tensor.unsqueeze(0))
    _, prediction = torch.max(outputs, 1)
    prediction = prediction[0].item()
    print()
    print(
        f'Prediction for image {os.path.basename(input_path)} with the model {os.path.basename(model_path)}:'
    )
    print(f'------ {id_to_class[prediction]} ------')
    def extract_single_feature_vector(self, x, distance, size, limit):
        features = []
        if not self.only_histogram:
            channels = cv2.split(cv2.cvtColor(x, cv2.COLOR_BGR2LAB))
            for c in channels:  # haarlick features
                try:
                    texture_feat: np.array = mt.features.haralick(
                        c,
                        compute_14th_feature=True,
                        distance=distance,
                        return_mean_ptp=True)
                except ValueError:
                    texture_feat = np.zeros(28, dtype=np.float32)
                features.append(texture_feat)

        img = utl.enhance_contrast_image(x, clip_limit=limit, tile_size=12)
        hist = cv2.calcHist([cv2.cvtColor(img, cv2.COLOR_BGR2HSV)], [0, 1, 2],
                            None, size,
                            [0, 180, 0, 256, 0, 256])  # Histogram features
        feat = np.hstack([np.hstack(features), hist.flatten()**0.25])
        return feat
Beispiel #9
0
def run() -> None:
    images = load_images(
        '/data/simon/ownCloud/Data/Reflection Dataset/raw_images/',
        img_type='png')
    #images.extend(load_images('./C001R/', img_type='png'))

    images = [enhance_contrast_image(img, clip_limit=4) for img in images]

    circles = []
    for i, img in enumerate(images):
        mask, circle = get_retina_mask(img)
        images[i] = cv2.bitwise_and(img, mask)
        circles.append(circle)
    print(circles)

    images = normalize_and_register_retians(images, circles)
    show_image_row(images[::5], name='Training data', time=10000)

    perform_pca(images)

    time_wrap.print_prof_data()
Beispiel #10
0
def run(input_path, output_path, model_path):
    unglarer: ug.GlareRemover = ug.GlareRemover(model_path=model_path,
                                                masked_class=3)

    images = load_images(
        '/data/simon/ownCloud/Data/Reflection Dataset/raw_images/',
        img_type='png')
    images2 = load_images('./C001R_Cut/', img_type='png')

    images_subset = [images[i] for i in range(0, len(images))]
    images_subset.extend(images2)
    images_subset = [
        enhance_contrast_image(img, clip_limit=3.5, tile_size=12)
        for img in images_subset
    ]
    images_subset = [
        cv2.bitwise_and(img,
                        get_retina_mask(img)[0]) for img in images_subset
    ]
    unglarer.set_training_data(images_subset)

    #unglarer.show_training_data()
    #unglarer.train()
    unglarer.show_means()

    masked_images = []
    files = os.listdir(input_path)
    test_images = [cv2.imread(join(input_path, f)) for f in files]
    for img in test_images:
        mask = unglarer.get_glare_mask(img, show_mask=False, joined_prob=False)
        percentage = unglarer.get_glare_percentage(img, mask)
        masked_images.append(
            (cv2.bitwise_and(img, cv2.cvtColor(mask, code=cv2.COLOR_GRAY2BGR)),
             percentage))

    #[show_image(img, name=f'Masked images - {percentage:.2f}%') for img, percentage in masked_images]
    [
        cv2.imwrite(join(output_path, name), f)
        for name, f in zip(files, masked_images)
    ]
Beispiel #11
0
 def __call__(self, image):
     if np.random.rand() < self.prob:
         image = image[:, :, [2, 1, 0]]
         image = utl.enhance_contrast_image(image, clip_limit=np.random.randint(2, 5))
         image = image[:, :, [2, 1, 0]]
     return image