def watershed_demo(image): blurred = cv.pyrMeanShiftFiltering(image, 10, 100) gray = cv.cvtColor(blurred, cv.COLOR_BGR2GRAY) ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU) cv.imshow("binary", binary) kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) mb = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel, iterations=2) sure_bg = cv.dilate(binary, kernel, iterations=3) cv.imshow("mor", sure_bg) dist = cv.distanceTransform(mb, cv.DIST_L2, 3) dist_output = cv.normalize(dist, 0, 1.0, cv.NORM_MINMAX) cv.imshow("dist", dist_output * 50) ret, surface = cv.threshold(dist, dist.max() * 0.6, 255, cv.THRESH_BINARY) cv.imshow("interface", surface) surface_fg = np.uint8(surface) unknow = cv.subtract(sure_bg, surface_fg) ret, markers = cv.connectedComponents(surface_fg) print(ret) markers += 1 markers[unknow == 255] = 0 markers = cv.watershed(image, markers=markers) image[markers == -1] = [0, 0, 255] cv.imshow("result", image)
def GetBrain(image): ret, thresh = cv2.threshold(image, 70, 255, cv2.THRESH_BINARY) # plt.subplot(121) # plt.imshow(image) # plt.subplot(122) # plt.imshow(thresh) # plt.show() ret, markers = cv2.connectedComponents(thresh) try: marker_area = [ np.sum(markers == m) for m in range(np.max(markers)) if m != 0 ] largest_component = np.argmax(marker_area) + 1 brain_mask = markers == largest_component plt.imshow(brain_mask) plt.show() brain_out = image.copy() brain_out[brain_mask == False] = 0 return brain_out except: return image
def watershed(image, image_color): print('watershed', image.shape) cv2.imshow('Image', image) gradiente = segmentar_iterativo(image) cv2.imshow('Gradiente', gradiente) gradiente_inverso = image_not(gradiente) cv2.imshow('Gradiente inverso', gradiente_inverso) kernel = np.ones((3, 3), np.uint8) thresh = gradiente_inverso opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3) gradiente_erode = cv2.erode(opening, kernel, iterations=13) cv2.imshow('Gradiente erode', gradiente_erode) gradiente_erode = np.uint8(gradiente_erode) unknown = cv2.subtract(gradiente_inverso, gradiente_erode) cv2.imshow('gradiente_inverso - gradiente_erode', unknown) ret, markers = cv2.connectedComponents(gradiente_erode) # gradiente_inverso é a imagem dos limites da barragem markers = markers + 1 markers[unknown == 255] = 0 # markers[markers >= 1] = 250 cv2.imshow('markers', markers) markers = cv2.watershed(image_color, markers) image_color[markers == -1] = [255, 255, 0] cv2.imshow('Resultado - Watershed', image_color) cv2.waitKey(0)
def s2(img): #segmentacionWarershed img = img gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # Eliminación del ruido kernel = np.ones((3, 3), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # Encuentra el área del fondo sure_bg = cv2.dilate(opening, kernel, iterations=3) # Encuentra el área del primer dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5) ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0) # Encuentra la región desconocida (bordes) sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg, sure_fg) # Etiquetado ret, markers = cv2.connectedComponents(sure_fg) # Adiciona 1 a todas las etiquetas para asegurra que el fondo sea 1 en lugar de cero markers = markers + 1 # Ahora se marca la región desconocida con ceros markers[unknown == 255] = 0 markers = cv2.watershed(img, markers) img[markers == -1] = [255, 0, 0] return img
def _image_swtcc(self, swt_mat): """ Function to find connected components of the SWT image, each connected component region is filled with a unique label value. parameters -------------------------------------- swt_mat : np.ndarray, required SWT Transform of the image returns -------------------------------------- tuple - (num_labels, labelmask) Returns Number of lables found and nd.array for the of the connected components. """ threshmask = swt_mat.copy().astype(np.int16) threshmask[threshmask == np.max( threshmask )] = 0 # Set the maximum value(Diagonal of the Image :: Maximum Stroke Width) to 0 threshmask[threshmask > 0] = 1 threshmask = threshmask.astype(np.int8) num_labels, labelmask = cv2.connectedComponents(threshmask, connectivity=8) return num_labels, labelmask
def watershed(self, _img=None): # # 灰度和二值转换 _img = self.img if _img is None else _img _gray = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY) _, _binary = cv2.threshold(_gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) # # 形态学操作 # # # 形态学操作卷积核 _kernel = np.ones((3, 3), np.uint8) # # # 开运算去噪(去掉椒盐噪声的影响) _opening = cv2.morphologyEx(_binary, cv2.MORPH_OPEN, _kernel, iterations=2) # # # 如果能画出背景和前景, 分割算法会很好 # # # 考虑到数据量的原因, 使用程序 机械的找出 # # # 找出一定是背景的部分 膨胀操作: 扩大图形区的面积 _sure_bg = cv2.dilate(_opening, _kernel, iterations=3) # cv_show(_sure_bg) # # 距离变换函数: 对原始图像进行计算 之后二值处理, 获取前景 # # 该函数的第一个参数只能是单通道的二值的图像, 第二个参数是距离方法 # # 计算图像上255点与最近的0点之间的距离 DIST_L2应是欧氏距离, 会输出小数 # # DIST_L1应是哈密顿距离, 不会有小数 _dist_transform = cv2.distanceTransform(_opening, cv2.DIST_L1, 5) # cv_show(_dist_transform) # # 距离变换之后做一二值变换, 得到大概率是图像前景的点 _, _sure_fg = cv2.threshold(_dist_transform, 0.5 * _dist_transform.max(), 255, cv2.THRESH_BINARY) # # 转换类型, 否则会很危险 _sure_fg = np.uint8(_sure_fg) # cv_show(_sure_fg) # # 绘制unknown区 交给算法, 自下而上的洪泛算法 _unknown = cv2.subtract(_sure_bg, _sure_fg) # cv_show(_unknown) _, _markers = cv2.connectedComponents(_sure_fg) _markers = _markers + 1 _markers[_unknown == 255] = 0 _img1 = _img.copy() _markers = cv2.watershed(_img1, _markers) # # 圈出来 之后可以根据结果将一部分的值变为黑色 def random_color(a: int): return np.random.randint(0, 255, (a, 3)) _markers_label = np.unique(_markers) _colors = random_color(_markers_label.size) for _mark, _color in zip(_markers_label, _colors): _img1[_markers == _mark] = _color # # 展示 cv_show(_img1)
def watershed(image, image_color): gradiente = segmentar_iterativo(image) gradiente_inverso = image_not(gradiente) kernel = np.ones((3, 3), np.uint8) thresh = gradiente_inverso opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3) gradiente_erode = cv2.erode(opening, kernel, iterations=13) gradiente_erode = np.uint8(gradiente_erode) unknown = cv2.subtract(gradiente_inverso, gradiente_erode) ret, markers = cv2.connectedComponents(gradiente_erode) markers = markers + 1 markers[unknown == 255] = 0 markers = cv2.watershed(image_color, markers) image_color[markers == -1] = [255, 255, 0] return image_color
def watershedAlgorithm(image): img = cv.imread(image) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU) # noise removal kernel = np.ones((5, 5), np.uint8) opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2) # sure background area sure_bg = cv.dilate(opening, kernel, iterations=3) # Finding sure foreground area dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5) ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv.subtract(sure_bg, sure_fg) # Marker labelling ret, markers = cv.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 markers = markers + 1 # Now, mark the region of unknown with zero markers[unknown == 255] = 0 markers = cv.watershed(img, markers) img[markers == -1] = [255, 0, 0] return img
def watershed(img, img_gray): # mean = np.average(img_gray) # _, thresh1 = cv2.threshold(img_gray,mean,255,cv2.THRESH_BINARY_INV) # _, thresh2 = cv2.threshold(img_gray,200,255,cv2.THRESH_BINARY) # thresh = np.bitwise_or(thresh1, thresh2) _, thresh = cv2.threshold(img_gray,np.average(img_gray)-40,255,cv2.THRESH_BINARY_INV) kernel = np.ones((3,3),np.uint8) opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=2) sure_bg = cv2.dilate(opening,kernel,iterations=2) dist_transform = cv2.distanceTransform(sure_bg,cv2.DIST_L2,5) _, sure_fg = cv2.threshold(dist_transform,0.5*dist_transform.max(),255,0) sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_fg, sure_bg) ret, markers = cv2.connectedComponents(unknown) markers = markers + 1 markers[unknown == 255] = 0 markers = cv2.watershed(img,markers) return dist_transform
kernel = np.ones((3, 3), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) sure_bg = cv2.dilate(opening, kernel, iterations=3) cv2.imshow('Image2 - sure_bg', sure_bg) # cv2.imshow('Image2 - opening', opening) dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5) ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0) cv2.imshow('Image3 - sure_fg', sure_fg) cv2.imshow('Image3 - dist_transform', dist_transform) sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg, sure_fg) cv2.imshow('Image4', unknown) ret, markers = cv2.connectedComponents(sure_fg) # markers = markers+1 markers[unknown == 255] = 0 markers[markers >= 1] = 255 cv2.imshow('Image5', markers) # if len(markers.shape) == 2: # a = 0 # for x in range(markers.shape[0]): # for y in range(markers.shape[1]): # if markers[x, y] > 0: # a += 1 # print(markers[x, y]) # print('A = ', a, ' Tamanho = ', markers.shape[0]*markers.shape[1])
def display_selected_microtubule(self, frame): frame = np.array(frame) frame = frame.astype(np.uint8) frame = cv2.adaptiveThreshold(frame, frame.max(), cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, -1) frame = np.array(frame) frame = frame.astype(np.uint8) frame = cv2.bilateralFilter(frame, 11, 70, 70) kernel = np.ones((3, 3), np.uint8) frame = cv2.erode(frame, kernel, iterations=1) frame = cv2.dilate(frame, kernel, iterations=2) frame = cv2.morphologyEx(frame, cv2.MORPH_OPEN, kernel) frame = cv2.morphologyEx(frame, cv2.MORPH_OPEN, kernel) frame = frame.astype(np.uint8) # Get the line ends self.x0 = self.microtubule_ends[0][0] self.y0 = self.microtubule_ends[0][1] self.x1 = self.microtubule_ends[1][0] self.y1 = self.microtubule_ends[1][1] frame = np.transpose(frame) # Get all connected components of the frame _, label = cv2.connectedComponents(frame, connectivity=8) # If we have already analyzed the first frame, get the slope and b from microtubule m = (self.y1 - self.y0) / ((self.x1 - self.x0) + 1e-9) if m == 0: m += 1e-9 c = self.y0 - (m * self.x0) if self.vid.frame_counter > 0: m, c = self.microtubule.getLineVals() maxRange = 2 x_range = np.arange( np.min([self.x0, self.x1]) + maxRange, np.max([self.x0, self.x1]) - maxRange, 1) if len(x_range) == 0: x_range = [self.x0, self.x1] component_values = [ label[x - maxRange:x + maxRange, math.floor(m * x + c) - maxRange:math.floor(m * x + c) + maxRange].max() for x in x_range ] data = Counter(component_values) componentNumber = data.most_common(1)[0][0] # Set everything that is not this component number to be the background label[label != componentNumber] = 0 return label
def binary_captchar(pathname): ''' 根据图片位置生成验证码 ''' img = cv.imread(pathname, 0) ret, dst = cv.threshold(img, 10, 255, cv.THRESH_BINARY) # 二值化 for i in range(img.shape[0]): # 反色 for j in range(img.shape[1]): dst[i, j] = 255-dst[i, j] ret, labels = cv.connectedComponents(dst) # 求连通区域 unique, counts = np.unique(labels, return_counts=True) labels_counts = dict(zip(unique, counts)) # 统计 ret, bimg = cv.threshold(img, 127, 255, cv.THRESH_BINARY) # 二值化底图 for i in range(img.shape[0]): # 降噪 for j in range(img.shape[1]): if(labels_counts[labels[i, j]] < 10 or labels_counts[labels[i, j]] > 80): bimg[i, j] = 0 else: bimg[i, j] = 255 contours, hierarchy = cv.findContours( bimg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) rects = [cv.minAreaRect(cnt) for cnt in contours] # 每个轮廓取最小框 boxes = [np.int0(cv.boxPoints(box)) for box in rects] # 填充 leftset = set() for box in boxes: leftset.add(min([i[0] for i in box])) left_list = sorted(leftset) # 把识别出的字母部分从左向右排序 captcha = '' for left in left_list: for box in boxes: Xs = [i[0] for i in box] Ys = [i[1] for i in box] x1 = min(Xs) if(x1 != left): continue x2 = max(Xs) y1 = min(Ys) y2 = max(Ys) hight = y2 - y1 width = x2 - x1 crop = bimg[y1 - 1:y1 + hight + 2, x1 - 1:x1 + width + 2] # 切割成的小份 tem_folder = os.path.join(os.path.abspath('.'), 'split') isExists = os.path.exists(tem_folder) if not isExists: os.makedirs(tem_folder) split_name = os.path.join(os.path.abspath( '.'), 'split', str(time.time()) + '.png') # print(split_name) cv.imwrite(split_name, crop) split_img = cv.imread(split_name) ''' if not split_img: print('can\' load split_img') sys.exit() ''' ''' 写入之后再读取才可以 ''' captcha += get_captcha(split_img) os.remove(split_name) # 删除临时文件 return captcha