def equalize_hist(img): if len(img.shape) > 2 and img.shape[2] > 1: img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0]) return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) else: return cv2.equalizeHist(img)
def hisEqulColor(img): if len(img.shape) == 2: return hisEqul(img) ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB) channels = cv2.split(ycrcb) cv2.equalizeHist(channels[0], channels[0]) cv2.merge(channels, ycrcb) cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img) return img
def base(): if request.method == 'GET': return "<h1>Crop AI</h1>" if request.method == 'POST': if 'InputImg' not in request.files: print("No file part") return redirect(request.url) file = request.files['InputImg'] if file.filename == '': print('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filestr = request.files['InputImg'].read() img = cv2.imdecode(np.fromstring(filestr, np.uint8), cv2.IMREAD_COLOR) img = cv2.resize(img, (96, 96), interpolation=cv2.INTER_AREA) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # find the green color mask_green = cv2.inRange(hsv, (36, 0, 0), (86, 255, 255)) # find the brown color mask_brown = cv2.inRange(hsv, (8, 60, 20), (145, 255, 255)) # find the yellow color in the leaf mask_yellow = cv2.inRange(hsv, (5, 42, 143), (145, 255, 255)) # find the black color in the leaf mask_black = cv2.inRange(hsv, (100, 100, 100), (127, 127, 127)) # find any of the four colors(green or brown or yellow or black) in the image mask = cv2.bitwise_or(mask_green, mask_brown) mask = cv2.bitwise_or(mask, mask_yellow) mask = cv2.bitwise_or(mask, mask_black) # Bitwise-AND mask and original image res = cv2.bitwise_and(img, img, mask=mask) # Gaussian blur with 3x3 kernel blur_img = cv2.GaussianBlur(res, (3, 3), 0) # Histogram equalization B, G, R = cv2.split(blur_img) output_R = cv2.equalizeHist(R) output_G = cv2.equalizeHist(G) output_B = cv2.equalizeHist(B) img = cv2.merge((output_R, output_G, output_B)) img = img / 255 img_array = np.expand_dims(img, axis=0) output = label_dictionary[model.predict(img_array)[0].argmax()] return output
def adapt_hist_equilization(img): #img = cv2.imread("test_001.jpg") R, G, B = cv2.split(img) output1_R = cv2.equalizeHist(R) output1_G = cv2.equalizeHist(G) output1_B = cv2.equalizeHist(B) equ = cv2.merge((output1_R, output1_G, output1_B)) res = np.hstack((img, equ)) #stacking images side-by-side return res
def preprocess_ir_image(self, image: np.array) -> np.array: if self.preprocessing_method == self.NONE: channels = [image] * 3 elif self.preprocessing_method == self.INVERT_EQUALIZE: channels = [cv2.equalizeHist(np.invert(image))] * 3 elif self.preprocessing_method == self.THREE_CHANNEL_NONE_INVERT_EQUALIZE: channels = [image, np.invert(image), cv2.equalizeHist(image)] elif self.preprocessing_method == self.INVERT: channels = [np.invert(image)] * 3 elif self.preprocessing_method == self.EQUALIZE: channels = [cv2.equalizeHist(image)] * 3 else: raise ValueError("Unknown preprocessing type") return np.stack(channels, axis=2)
def detectAndDisplay(frame): frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_gray = cv2.equalizeHist(frame_gray) # Detectar faces # Divide a imagem em diversos retângulos e retorna uma lista com eles faces = face_cascade.detectMultiScale(frame_gray) for (x, y, w, h) in faces: center = (x + w // 2, y + h // 2) # Elipses rosas frame = cv2.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4) faceROI = frame_gray[y:y + h, x:x + w] # Em cada face, detectar olhos eyes = eyes_cascade.detectMultiScale(faceROI) for (x2, y2, w2, h2) in eyes: eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2) radius = int(round((w2 + h2) * 0.25)) # Circulos azuis frame = cv2.circle(frame, eye_center, radius, (255, 0, 0), 4) # Frame invertido horizontalmente por questão de estética =) frame = cv2.flip(frame, 1) cv2.imshow('Deteccao de Face e Olho', frame) return frame
def detect(filename, s, cascade_file = "../lbpcascade_animeface.xml"): if not os.path.isfile(cascade_file): raise RuntimeError("%s: not found" % cascade_file) try: print('trying image {}'.format(s)) cascade = cv2.CascadeClassifier(cascade_file) image = cv2.imread(filename, cv2.IMREAD_COLOR) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces = cascade.detectMultiScale(gray, # detector options scaleFactor = 1.1, minNeighbors = 5, minSize = (24, 24)) for (x, y, w, h) in faces: #cv2.rectangle(image, (x-w//4, y-w//4), (x + 5*w//4, y + 5*h//4), (0, 0, 255), 2) out = image[y-21:y+107,x-21:x+107] #out = image[y-42:y+214,x-42:x+214] #cv2.imshow("AnimeFaceDetect", image) #cv2.waitKey(0) cv2.imwrite("cropped/{}.jpg".format(s), out) print('success!') except: print('one pic error!')
def demo_thersholding(img, threshold=None, show=True, thresh_type=cv2.THRESH_BINARY_INV): plt.figure(654) hist, bins = np.histogram(img.ravel(), 256, [0, 256]) threshold = threshold if threshold is not None else bins[hist.argmax()] # threshold = threshold if threshold is not None else np.median(img) # aimg = cv2.cvtColor(img/255, cv2.COLOR_BGR2GRAY) # or convert equ = cv2.equalizeHist(img.astype(np.uint8)) threshold, equ_threshed = cv2.threshold(equ, threshold, equ.max(), thresh_type) if show: # Show histogram cdf = hist.cumsum() cdf_normalized = cdf * hist.max() / cdf.max() plt.plot(cdf_normalized, color='b') plt.hist(img.flatten(), 256, [0, 256], color='r') plt.xlim([0, 256]) plt.legend(('cdf', 'histogram'), loc='upper left') plt.show() # Compare images res = np.hstack( (img, equ.astype(np.float))) # stacking images side-by-side threshold, ret = cv2.threshold(res, threshold, res.max(), thresh_type) ret = ret / 255. cv2.imshow('try...', ret) cv2.waitKey(0) return equ_threshed
def main(img_path): """Using OpenCV’s histogram equalization method equalizeHist, write a program that improves the contrast of an input image. Visually inspect the output image and comment how histogram equalization changes its contrast. Compute the histograms of the input and equalized images and comment how the histogram has been stretched out more widely and uniformly. :param img: the path to an image :type img: str """ # load in the image image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # show the image utils.show('original', image) # look at the histogram of the original image utils.show_hist_gray(image) # equalise the histogram equalised = cv2.equalizeHist(image) # show the new image utils.show('equalised', equalised) # look at the histogram now utils.show_hist_gray(equalised)
def _preprocess(self, warped_img): ''' Preprocess the warped and rotated image. @warped_img: np.array, it should be the output of self._polar_warp_and_rotate(). @return: (s_mask, output_img), saturation mask and image after preprocessing. ''' warped_img = cv.GaussianBlur(warped_img, (3, 3), 1.5) hsv = cv.cvtColor(warped_img, cv.COLOR_BGR2HSV) warped_img = cv.cvtColor(warped_img, cv.COLOR_BGR2GRAY) warped_img = cv.equalizeHist(warped_img) # Enhance contrast _, s, _ = cv.split(hsv) _, s = cv.threshold(s, 0, 255, cv.THRESH_OTSU) s = cv.morphologyEx(s, cv.MORPH_ERODE, np.ones((5, 5))) _, contours, _ = cv.findContours(s, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=lambda ctr: cv.contourArea(ctr) ) # Sort to choose the largest area mask = cv.drawContours(np.zeros((warped_img.shape), np.uint8), contours, len(contours) - 1, (255, 255, 255), thickness=1) box = cv.boundingRect(get_points(mask)) # Largest area box-bouding mask = cv.rectangle(mask, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (255, 255, 255), cv.FILLED) # Fill the area that is to be removed mask = cv.bitwise_not(mask) # Ensure tooth existing area return mask, warped_img
def equalize(img: np.ndarray) -> np.ndarray: """ Args: img: image as numpy array Returns: image """ if is_colored(img): equ_b = cv2.equalizeHist(get_color(img, "b")) equ_g = cv2.equalizeHist(get_color(img, "g")) equ_r = cv2.equalizeHist(get_color(img, "r")) return cv2.merge((equ_b, equ_g, equ_r)) else: return cv2.equalizeHist(img)
def select_image(): # grab a reference to the image panels global panelA, panelB # open a file chooser dialog and allow the user to select an input # image path = filedialog.askopenfilename() path_out = '/home/anibe/Desktop/augustine/' img_path_in = path img_path_out = path_out + 'filtered.png' # ensure a file path was selected if len(path) > 0: img = cv2.imread(img_path_in)[:, :, 0] homo_filter = HomomorphicFilter(a=0.75, b=1.25) img_filtered = homo_filter.filter(I=img, filter_params=[30, 2]) cv2.imwrite(img_path_out, img_filtered) #HISTOGRAM EQUALIZATION APPLIED HERE cv2.equalizeHist(img) # convert the images to PIL format... image = Image.fromarray(img) edged = Image.fromarray(img_filtered) # ...and then to ImageTk format image = ImageTk.PhotoImage(image) edged = ImageTk.PhotoImage(edged) # if the panels are None, initialize them if panelA is None or panelB is None: # the first panel will store our original image panelA = Label(image=image) panelA.image = image panelA.pack(side="left", padx=10, pady=10) # while the second panel will store the edge map panelB = Label(image=edged) panelB.image = edged panelB.pack(side="right", padx=10, pady=10) # otherwise, update the image panels else: # update the pannels panelA.configure(image=image) panelB.configure(image=edged) panelA.image = image panelB.image = edged
def equalizeHistogram(image): __img = cv.imread(image) __img = cv.cvtColor(__img, cv.COLOR_BGR2GRAY) __img = cv.equalizeHist(__img) __img = cv.cvtColor(__img, cv.COLOR_GRAY2BGR) return __img
def segmentationBinarizationHist(image): __img = cv.imread(image) __img = cv.cvtColor(__img, cv.COLOR_BGR2GRAY) __img = cv.equalizeHist(__img) ret, __img = cv.threshold(__img, 128, 255, cv.THRESH_BINARY) __img = cv.cvtColor(__img, cv.COLOR_GRAY2BGR) return __img
def equalizeHist(self, path, file_name): try: img = cv2.imread(path + file_name) if session['rg_value'] == 1: img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # equalize the histogram of the Y channel img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0]) # convert the YUV image back to RGB format equ = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) else: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) equ = cv2.equalizeHist(img) f_name = str(randint(1000000000, 9999999999)) + session['org_name'] session['equalize_file_name'] = f_name cv2.imwrite(path + f_name, equ) return f_name except: return None
def equalise(path): """ CV2 histogram equalise function. :param path: :return: """ img = cv.imread(path, 0) equ = cv.equalizeHist(img) handler.plotFigs([img, equ])
def histograma(): image = grayScale() equ = cv2.equalizeHist(image) res = np.hstack((image, equ)) plt.hist(image.ravel(), 256, [0, 256]) plt.hist(res.ravel(), 256, [0, 256]) plt.ion() plt.show()
def data2img(data, normalization=1, histogram=0): # normalization, type castinf(uint8). gray2rgb if normalization == 1: data = cv2.normalize(src=data, dst=None, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX) data = np.uint8(data) if histogram == 1: data = cv2.equalizeHist(data) if len(data.shape) == 2: data = np.stack((data, ) * 3, axis=2) return data
def _get_saturation_edge(self, img): ''' Get image edges for saturation channel in HSV model. @img: np.array, input image. @return: np.array, edge image. ''' hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) _, s, _ = cv.split(hsv) s = cv.equalizeHist(s) s = cv.GaussianBlur(s, self._gassian_kernel_size, self._gassian_sigma) s = cv.Canny(s, max(self._canny_param), min(self._canny_param)) return s
def histogram(ImgNo, defThr=127): img = cv2.imread(impDef.select_img(ImgNo)) img_grayScale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) hist, bins = np.histogram(img_grayScale.ravel(), 256, [0, 256]) # Numpy를 이용해 히스토그램 구하기 cdf = hist.cumsum() # numpy배열을 1차원 배열로 변환한 후, 각 멤버값을 누적하여 더한 값을 멤버로 하는 1차원 numpy 배열생성 cdf_m = np.ma.masked_equal(cdf, 0) # numpy 1차원 배열인 cdf에서 값이 0인 부분은 모두 mask 처리하는 함수. 즉 cdf에서 값이 0인것은 무시 # numpy 1차원 배열 a가 [1, 0, 0, 2]라면 np.ma.masked_equal(a,0)의 값은 [1, --, --, 2] mask로 처리된 부분은 '-- ' 으로 표시됨' cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min()) # 히스토그램 균일화 방정식을 코드로 표현 cdf = np.ma.filled(cdf_m, 0).astype('uint8') # numpy 1차원 배열인 cdf_m에서 마스크된 부분을 0으로 채운 후 numpy 1차원 배열로 리턴 # 위에서 0을 마스크처리한 부분을 복원 img2 = cdf[img_grayScale] # 원래 이미지에 히스토그램을 적용한 새로운 이미지 img2를 생성 cv2.imshow('Gray Scale', img_grayScale) cv2.imshow('Histogram Equalization', img2) #cv2.imwrite('img/Histogram_equal.jpg', img2) #npImage = np.hstack((img_grayScale, img2)) # img_grayScal과 equ를 수평으로 붙임 #cv2.imshow('numpy Histogram Equalization', npImage) # OpenCV를 이용한 방법 equ = cv2.equalizeHist(img_grayScale) # numpy를 이용하여 구현한 것과 동일한 결과를 리턴 함. # 하지만 numpy는 컬러 이미지에도 적용가능하지만, cv2.equalizeHist()는 grayscal 이미지만 가능하면 리턴도 grayscal 이미지 임 res = np.hstack((img_grayScale, equ)) # img_grayScal과 equ를 수평으로 붙임 cv2.imshow('OpenCV Equalizer', res) # cv2.imshow('openCV Equalizer', equ) impDef.close_window()
def apply_histogram(img_n): img_n = np.array(img_n) newImg = [] for i in img_n: img = cv2.normalize(src=i, dst=None, alpha=0, beta=80, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) equ = cv2.equalizeHist(img) # res = np.hstack((img, adjusted, equ)) newImg.append(equ.tolist()) return newImg
def pre_dispose(self):#返回一个面上的颜色平均值HSVRGB img = cv2.GaussianBlur(self.frame,(7,7),0) b,g,r = cv2.split(img) avgb = cv2.mean(b)[0] avgg = cv2.mean(g)[0] avgr = cv2.mean(r)[0] k = (avgb+avgg+avgr)/3 kb = k/avgb kg = k/avgg kr = k/avgr b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0) g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0) r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0) img = cv2.merge([b,g,r]) img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) h,s,v = cv2.split(img_hsv) v = cv2.equalizeHist(v) img_hsv = cv2.merge([h,s,v]) self.img_hsv = img_hsv img = cv2.cvtColor(img_hsv,cv2.COLOR_HSV2BGR) self.img_bgr = img self.frame = img
def save3dBrain(fl, flOut): curDir = '/home/erika/NormalizacaoCerebro/I36464/' fileIn = curDir + fl fileNii = nib.load(fileIn) img_n = fileNii.get_fdata() newImg = [] for i in img_n: img = cv2.normalize(src=i, dst=None, alpha=0, beta=80, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) equ = cv2.equalizeHist(img) #res = np.hstack((img, adjusted, equ)) newImg.append(equ.tolist()) newFile = nib.Nifti1Image(np.array(newImg), fileNii.affine) nib.save(newFile, '/home/erika/Desktop/Resultados tcc/' + flOut)
def msi_to_rgb(in_file, out_file): ds = gdal.Open(in_file) rgb = read_as_rgb(ds) # create_output_image(ds, out_file, rgb) x = np.dstack(rgb).astype(np.uint8) H, S, V = cv2.split(cv2.cvtColor(x, cv2.COLOR_RGB2HSV)) eq_V = cv2.equalizeHist(V) img_output = cv2.cvtColor(cv2.merge([H, S, eq_V]), cv2.COLOR_HSV2RGB) # x = np.dstack(rgb).astype(np.uint8) # # img_yuv = cv2.cvtColor(x, cv2.COLOR_RGB2YUV) # img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) # img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB) # r = img_output[:, :, 0] g = img_output[:, :, 1] b = img_output[:, :, 2] zzz = [r, g, b] create_output_image(ds, out_file, zzz) del ds
def preprocessing(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = img.astype(np.uint8) img = cv2.equalizeHist(img) return img
def stereo_sgbm(self): voice = Voice() voice.completed.set() Left_Stereo_Map = (config.left_map1, config.left_map2) Right_Stereo_Map = (config.right_map1, config.right_map2) cv.namedWindow("Two Camera") # cv.namedWindow('depth') # cv.createTrackbar("windowsize", "depth", 1, 25, lambda x: None) # cv.createTrackbar("max disp", "depth", 247, 256, lambda x: None) leftCam = cv.VideoCapture(self.left_camera_id + cv.CAP_DSHOW) rightCam = cv.VideoCapture(self.right_camera_id + cv.CAP_DSHOW) leftCam.set(cv.CAP_PROP_FRAME_HEIGHT, self.frameHeight) leftCam.set(cv.CAP_PROP_FRAME_WIDTH, self.frameWidth) rightCam.set(cv.CAP_PROP_FRAME_HEIGHT, self.frameHeight) rightCam.set(cv.CAP_PROP_FRAME_WIDTH, self.frameWidth) # leftCam.set(cv.CAP_PROP_FPS, 3) # rightCam.set(cv.CAP_PROP_FPS, 3) # leftCam.set(cv.CAP_PROP_BUFFERSIZE, 3) _, fl = leftCam.read() # window_size = cv.getTrackbarPos("windowsize", "depth") window_size = 1 # max_disp = cv.getTrackbarPos("max disp", "depth") max_disp = 247 min_disp = 0 num_disp = max_disp - min_disp p1_var = 8 * len(fl.shape) * window_size * window_size p2_var = 32 * len(fl.shape) * window_size * window_size stereo = cv.StereoSGBM_create(minDisparity=min_disp, numDisparities=num_disp, blockSize=window_size, uniquenessRatio=10, speckleWindowSize=100, speckleRange=1, disp12MaxDiff=10, P1=p1_var, P2=p2_var) # Used for the filtered image # Create another stereo for right this time stereoR = cv.ximgproc.createRightMatcher(stereo) # WLS FILTER Parameters lmbda = 80000 sigma = 2.0 wls_filter = cv.ximgproc.createDisparityWLSFilter(matcher_left=stereo) wls_filter.setLambda(lmbda) wls_filter.setSigmaColor(sigma) shot = True if not (leftCam.isOpened() and rightCam.isOpened()): exit(1) while True: retvalOfRight, rightFrame = rightCam.read() retvalOfLeft, leftFrame = leftCam.read() if not (retvalOfRight and retvalOfLeft): print("read fail") break key = cv.waitKey(1) twoFrame = cv.hconcat([rightFrame, leftFrame]) cv.imshow("Two Camera", twoFrame) if key & 0xFF == ord('q'): print("結束") break # elif key & 0xFF == ord('s'): elif shot: frameL = leftFrame frameR = rightFrame shot = False else: time.sleep(0.1) shot = True continue remapped_left_side = cv.remap(frameL, Left_Stereo_Map[0], Left_Stereo_Map[1], cv.INTER_LANCZOS4, cv.BORDER_CONSTANT, 0) remapped_right_side = cv.remap(frameR, Right_Stereo_Map[0], Right_Stereo_Map[1], cv.INTER_LANCZOS4, cv.BORDER_CONSTANT, 0) grayR = cv.cvtColor(remapped_right_side, cv.COLOR_BGR2GRAY) grayL = cv.cvtColor(remapped_left_side, cv.COLOR_BGR2GRAY) grayR = cv.equalizeHist(grayR) grayL = cv.equalizeHist(grayL) # cv.imshow('grayR', grayR) # cv.imshow('grayL', grayR) disp = stereo.compute(grayL, grayR) dispL = np.int16(disp) dispR = stereoR.compute(grayR, grayL) dispR = np.int16(dispR) # cv.imshow('dispR', dispR) filteredImg = wls_filter.filter(dispL, grayL, None, dispR) # cv.imshow('filteredImg', filteredImg) filteredImg = cv.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv.NORM_MINMAX) filteredImg = np.uint8(filteredImg) # contours, hierarchy = cv.findContours(filteredImg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE ) # print(len(contours[0])) # cv.drawContours(grayL, [cnt], 0, (0,255,0), 3) # cv.drawContours(filteredImg, contours, 1, (0,0,255), 20) self.filter_disp = ( (filteredImg.astype(np.float32) / 16) - min_disp) / num_disp # filt_Color = cv.applyColorMap(filteredImg, cv.COLORMAP_RAINBOW ) # cv.imshow('depth', filt_Color) cv.imshow('depth', filteredImg) left = cv.line(remapped_left_side, (0, 0), (0, self.frameHeight), (0, 0, 0), 1) self.detect() if len(self.sentences) == 1: if voice.completed.is_set(): voice.completed.clear() voice.say(self.sentences.pop(len(self.sentences) - 1)) cv.imshow('calc', left) rightCam.release() leftCam.release() cv.destroyAllWindows() voice.terminate()
#-*-coding:utf-8-*- from cv2 import cv2 import numpy as np from matplotlib import pyplot as plt filename = 'snapshot/b8f3506c-3f65-11eb-9798-16f63a1aa8c9.jpg' # img = cv2.imread('images/watershed.jpg') img = cv2.imread(filename) # binaray image로 변환 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (3, 3), 0) gray = cv2.equalizeHist(gray) mean = np.average(gray) ret, thresh1 = cv2.threshold(gray, mean, 255, cv2.THRESH_BINARY_INV) ret, thresh2 = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) # thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,15,4) thresh = np.bitwise_or(thresh1, thresh2) #Morphology의 opening, closing을 통해서 노이즈나 Hole제거 kernel = np.ones((3, 3), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # dilate를 통해서 확실한 Backgroud sure_bg = cv2.dilate(opening, kernel, iterations=2) #distance transform을 적용하면 중심으로 부터 Skeleton Image를 얻을 수 있음. # 즉, 중심으로 부터 점점 옅어져 가는 영상. # 그 결과에 thresh를 이용하여 확실한 FG를 파악
def equalizer(image): if len(image.shape) == 3: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) eq = cv2.equalizeHist(image) return eq
def rmReflection(self, bhImage, gsInvertedImg): img = cv2.add(gsInvertedImg, bhImage) return cv2.equalizeHist(cv2.medianBlur(img, 5))
right_eye_image_resized = imutils.resize(right_eye_image, width=200) # Finding centre of left and right eye and # drawing it on their images M_left = cv2.moments(leftEye) cX_left = int(M_left["m10"] / M_left["m00"]) cY_left = int(M_left["m01"] / M_left["m00"]) M_right = cv2.moments(rightEye) cX_right = int(M_right["m10"] / M_right["m00"]) cY_right = int(M_right["m01"] / M_right["m00"]) cv2.circle(frame, (cX_left, cY_left), 1, (0, 0, 255), -1) cv2.circle(frame, (cX_right, cY_right), 1, (0, 0, 255), -1) # Finding Hough Circles in left and right eye images gray_left = cv2.cvtColor(left_eye_image, cv2.COLOR_BGR2GRAY) equ_left = cv2.equalizeHist(gray_left) equ_left_resized = cv2.resize(equ_left, (24, 8), interpolation=cv2.INTER_AREA) gray_right = cv2.cvtColor(right_eye_image, cv2.COLOR_BGR2GRAY) equ_right = cv2.equalizeHist(gray_right) equ_right_resized = cv2.resize(equ_right, (24, 8), interpolation=cv2.INTER_AREA) #circles_left = cv2.HoughCircles(equ_left,cv2.HOUGH_GRADIENT,1,gray_left.shape[0]/8,param1=250,param2=15,minRadius=gray_left.shape[1]/8,maxRadius=gray_left.shape[0]/3) circles_left = cv2.HoughCircles(equ_left, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)