Esempio n. 1
0
def preprocesing(path, down_tresh, up_thresh, left_tresh, right_tresh, gamma):

    import cv2 as cv
    from imadjust import imadjust

    # ################################################# WCZYTANIE OBRAZU ################################################# #
    img_gray = cv.imread(path, 0)

    # ######################################## PRZYGOTOWANIE DO SEGMENTACJI ############################################## #

    # FILTRACJA ____________________________________________________________________________________________________________
    img_gray_filtr = cv.medianBlur(img_gray, 5)

    # WYROWNANIE HISTOGRAMU ________________________________________________________________________________________________
    clahe = cv.createCLAHE(clipLimit=1., tileGridSize=(3, 3))
    img_gray_equal = clahe.apply(img_gray_filtr)

    # NIELINIOWA ZMIANA JASNOSCI ___________________________________________________________________________________________
    img_gamma = imadjust(img_gray_equal, down_tresh, up_thresh, 10, 215, gamma)

    # ########################################## KONIEC PRZYGOTOWANIA DO SEGMENTACJI ##################################### #
    return img_gamma
Esempio n. 2
0
                if eyes[b][2] > eyes[maxi][2]:
                    maxi = b

            # recortar ojos
            bbox_eye = eyes[maxi]
            eye = face[bbox_eye[1]:bbox_eye[1] + bbox_eye[3],
                       bbox_eye[0]:bbox_eye[0] + bbox_eye[2]]
            eyepic = facepic[bbox_eye[1]:bbox_eye[1] + bbox_eye[3],
                             bbox_eye[0]:bbox_eye[0] + bbox_eye[2]]

            # correccion de campo de plano para emparejar sombreado
            flatfielde = ff.gammaCorrection(
                eye, 0.7)  #valor 0.73 análogo a alpha=60 en MATLAB

            # ajuste de contraste - funcion análoga Imadjust de MATLAB
            adjust_e = ij.imadjust(flatfielde, (0.3, 0.5))  # 0.5 - 0.7

            double_e = i2d.im2double(adjust_e)

            # parametros de segmentacion
            alto, ancho = double_e.shape[:2]
            mid = round(ancho / 2)

            sec_R = [0, 0, mid, alto]
            sec_L = [mid + 1, 0, ancho, alto]

            # separamos el ojo izquierdo y derecho

            Reye = double_e[sec_R[1]:sec_R[1] + sec_R[3],
                            sec_R[0]:sec_R[0] + sec_R[2]]
            Leye = double_e[sec_L[1]:sec_L[1] + sec_L[3],
Esempio n. 3
0
import cv2 as cv
import imflatfield as ff
import imadjust as ij

path = r'C:\Users\Ryzen\Documents\GitHub\Proyecto\Frame1.jpg'

img = cv.imread(path)
img_original = img[:, :, 2]

gamma = 0.724

res = ff.gammaCorrection(img_original, gamma)
m_e = ij.imadjust(res, (0.18, 0.32))

cv.imshow("Gamma correction", res)
cv.imshow("Imadjust", m_e)

cv.waitKey()
cv.destroyAllWindows()
Esempio n. 4
0
# This section takes an RGB image as an input and prepares it for
# detection by removing the background.

(y, x, clrs) = im.shape  # Get size of image
img_hou = np.copy(im[0:y / p2crop, 0:x])  # Crop image for circle detection

# Determine ideal limits for brightness/contrast adjustment
if lim_type == 1:
    lims = stretchlim(im)
    lims_hou = stretchlim(img_hou)
if lim_type == 2:
    lims = stretchlim2(im)
    lims_hou = stretchlim2(img_hou)

# Adjust the brightness/contrast of the RGB image based on limits
img2 = np.copy(imadjust(im, lims))
img2_hou = np.copy(imadjust(img_hou, lims_hou))

# Remove Background from adjusted brightness/contrast image
img_remove_hou = np.copy(back_ground_remove(img2_hou))
img_remove = np.copy(back_ground_remove(img2))

# Convert the image to binary
img_seg_hou, img_gray_hou = image_segmentation(img_remove_hou)
img_seg, img_gray = image_segmentation_length(img_remove)

# Edge detection
# NOTE: This is not actually in use. cv2.HoughCircles uses canny edge
#       detection internally, so passing an edge image to it does not
#       nice things.
img_edge = cv2.Canny(img_seg_hou, canny_param[0], canny_param[1])
Esempio n. 5
0
#!/usr/bin/python

import cv2
import numpy as np
import sys
import matplotlib.pyplot as plt
import math

from imadjust import imadjust
from stretchlim import stretchlim
from back_ground_remove import back_ground_remove
from image_segmentation_length import image_segmentation_length

#Read image
im = cv2.imread('pipewrench.jpeg', 1)

lims = stretchlim(im)
img2 = imadjust(im, lims)
img_remove = back_ground_remove(img2)

cv2.imshow('imadjust output', img2)
cv2.imshow('background removal output', img_remove)

img_seg = image_segmentation_length(img_remove)
cv2.imshow('Image Segmentation Output', img_seg)
cv2.waitKey(0)