def contours_detector(frame, typeBinariz=0, binarizParams=(9, 2.5), minSizeObj=(24, 24), maxSizeObj=(144, 144), aspectRatioObjInterval=(0.6, 1.3), compactObjInterval=(0.01, 0.2)): # cut left and right parts fr_cut_main = cv.GaussianBlur(frame, (5, 5), 0) fr_cut_left = frproc.cut_frame(fr_cut_main, (0, 0), (int(0.3 * fr_cut_main.shape[1]), fr_cut_main.shape[0])) fr_cut_right = frproc.cut_frame(fr_cut_main, (int(0.7 * fr_cut_main.shape[1]), 0), (fr_cut_main.shape[1], fr_cut_main.shape[0])) # equalize histogram for left and right parts of frame fr_cut_left = cv.createCLAHE(2.0, (5, 5)).apply(fr_cut_left) fr_cut_left = cv.GaussianBlur(fr_cut_left, (5, 5), 0) fr_cut_right = cv.createCLAHE(2.0, (5, 5)).apply(fr_cut_right) fr_cut_right = cv.GaussianBlur(fr_cut_right, (5, 5), 0) # binarization type if typeBinariz == 0: tr_left = cv.adaptiveThreshold(fr_cut_left, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, binarizParams[0], binarizParams[1]) tr_main = cv.adaptiveThreshold(fr_cut_main, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, binarizParams[0], binarizParams[1]) tr_right = cv.adaptiveThreshold(fr_cut_right, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, binarizParams[0], binarizParams[1]) elif typeBinariz == 1: tr_left = cv.Canny(fr_cut_left, binarizParams[0], binarizParams[1]) tr_main = cv.Canny(fr_cut_main, binarizParams[0], binarizParams[1]) tr_right = cv.Canny(fr_cut_right, binarizParams[0], binarizParams[1]) elif typeBinariz == 2: tr_left = imu.auto_canny(fr_cut_left) tr_main = imu.auto_canny(fr_cut_main) tr_right = imu.auto_canny(fr_cut_right) else: return # concatenate parts in full frame tr_main[0:fr_cut_main.shape[0], 0:(int(0.3 * fr_cut_main.shape[1]))] = tr_left[::] tr_main[0:fr_cut_main.shape[0], int(0.7 * fr_cut_main.shape[1]):fr_cut_main.shape[1]] = tr_right[::] cv.imshow("Test contours", tr_main) _, contours, _ = cv.findContours(tr_main, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) # sorting contours contours = [c for c in contours if maxSizeObj[0] >= cv.boundingRect(c)[2] >= minSizeObj[0] # width and maxSizeObj[1] >= cv.boundingRect(c)[3] >= minSizeObj[1] # height and (aspectRatioObjInterval[0] <= (float(cv.boundingRect(c)[2]) / cv.boundingRect(c)[3]) <= aspectRatioObjInterval[1]) # aspect and ((cv.arcLength(c, True) != 0) and (compactObjInterval[0] <= (float(cv.contourArea(c)) / (cv.arcLength(c, True) ** 2)) <= compactObjInterval[1]))] # compact rects_signs = [cv.boundingRect(c) for c in contours] return rects_signs
def verif(): with picamera.PiCamera() as camera: camera.resolution = ( 2592, 1944 ) #défini la résolution de la caméra (max :(2592, 1944); min :(64,64)) camera.framerate = 15 #défini la fréuence d'image de la caméra (défini à 15 pour la résolution max) camera.exposure_mode = 'auto' #mode d'exposition de la caméra (par défault : 'auto') camera.start_preview( ) #ouverture de l'aperçu de la caméra (pas nécessaire dans notre cas mais pratiquepour le tests) sleep( 2 ) #ouverture de l'objectif pendant 5s (pour réglage luminosité ...) camera.capture('/home/pi/varroasVerif.jpg' ) #capture de l'image et lien de stockage camera.stop_preview() #fermeture de l'apercu print('Vérification effectué') sleep(2) image = cv2.imread('varroasVerif.jpg') gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #met l'image en noir et blanc flougaussien = cv2.bilateralFilter( gray, 6, 157, 157) #floûte l'image -> faire une moyenne des pixels edge = imutils.auto_canny(flougaussien) #détermine les contours (cnts, _) = cv2.findContours( edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) #ajoute dans une liste les contours qu'ils trouvent suivant les méthodes de recherche compteurVerif = 0 #initialisation de la variable compteur for c in cnts: if (120 > cv2.contourArea(c) > 85 ): #calcule le périmètre des contours trouvés (non fermé) compteurVerif += 1 return compteurVerif
def preprocessing(self): img1 = self.img[0:400, :] _, img1 = cv2.threshold(img1, 25, 255, cv2.THRESH_TOZERO) _, img1 = cv2.threshold(img1, 25, 255, cv2.THRESH_BINARY) kernel = np.ones((9, 9), np.uint8) img1 = cv2.morphologyEx(img1, cv2.MORPH_CLOSE, kernel) img1 = cv2.erode(img1, (7, 7), iterations=1) img1 = cv2.morphologyEx(img1, cv2.MORPH_OPEN, kernel) img2 = self.img[400:500, :] img2 = cv2.GaussianBlur(img2, (5, 5), np.sqrt(16)) _, img2 = cv2.threshold(img2, 1, 255, cv2.THRESH_BINARY) img3 = self.img[500::, :] kernel1 = np.ones((5, 5), np.uint8) _, img3 = cv2.threshold(img3, 50, 255, cv2.THRESH_BINARY) img3 = cv2.morphologyEx(img3, cv2.MORPH_CLOSE, kernel1) img3 = cv2.erode(img3, (7, 7), iterations=3) img3 = cv2.morphologyEx(img3, cv2.MORPH_OPEN, kernel1) hold = np.zeros((self.img.shape[0], self.img.shape[1]), dtype=np.uint8) hold[0:400, :] = img1 hold[400:500, :] = img2 hold[500::, :] = img3 hold = imu.auto_canny(hold) hold = cv2.dilate(hold, (3, 3), iterations=3) return hold
def scan(img): # preprocess image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.bilateralFilter(gray, FILTER[0], FILTER[1], FILTER[2]) ret, gray = cv2.threshold(gray, THRESHOLD[0], THRESHOLD[1], 0) edges = imutils.auto_canny(gray) # extract contours _, cnts, _ = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cnts = [c for c in cnts if cv2.contourArea(c) >= MIN_CARD_AREA] card, c = None, None if cnts: # get largest contour c = sorted(cnts, key=cv2.contourArea, reverse=True)[0] # approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.05 * peri, True) pts = np.float32(approx) x, y, w, h = cv2.boundingRect(c) # Find center point of card by taking x and y average of the four corners. # average = np.sum(pts, axis=0)/len(pts) # cent_x = int(average[0][0]) # cent_y = int(average[0][1]) # center = [cent_x, cent_y] # Warp card into 200x300 flattened image using perspective transform card = util.flattener(img, pts, w, h) card = util.cv2_to_pil(card).rotate(ROTATION) return card, c, gray, edges
def read_test(image_path): resp = urllib.request.urlopen(image_path) image = np.asarray(bytearray(resp.read()), dtype="uint8") image = cv2.imdecode(image, cv2.IMREAD_COLOR) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edged = imutils.auto_canny(gray) ''' Find contours in the edge map, keeping only the largest one which is presmumed to be the nail. ''' cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] c = max(cnts, key=cv2.contourArea) # --- Extract the nail and resize it to a canonical width and height --- (x, y, w, h) = cv2.boundingRect(c) nail = gray[y:y + h, x:x + w] nail = cv2.resize(nail, (200, 100)) fd = hog(nail, orientations=9, pixels_per_cell=(10, 10), cells_per_block=(2, 2), transform_sqrt=True, block_norm="L1") return fd.reshape(1, -1)
def process_markers(img): hi, wi, _ = img.shape blur = cv2.medianBlur(img, 31) edge = imutils.auto_canny(blur) edge = cv2.dilate(edge, np.ones((5, 5)), iterations=2) _, contours, hierarchy = cv2.findContours(np.copy(edge), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) img_tmp = np.copy(img) cv2.drawContours(img_tmp, contours, -1, (100, 0, 0), 2) cv2.drawContours(edge, contours, -1, 100, 2) cnt = 0 roi_ptr = [] padding = 10 for i in range(len(hierarchy[0])): if hierarchy[0][i][3] == -1: x, y, w, h = cv2.boundingRect(contours[i]) if 2500 < w * h < 5e4 and h < 200 and w < 200: roi_ptr.append( (max(x - padding, 0), max(0, y - padding), min(w + 2 * padding, wi), min(hi, h + 2 * padding))) cnt += 1 cv2.rectangle(img_tmp, (x - padding, y - padding), (x + w + padding, y + w + padding), (0, 0, 255), 2) # for x, y, w, h in roi_ptr: # print find_markers(img[y:y + w, x:x + w]) if len(roi_ptr) == 0: return None, None, None x, y, w, h = roi_ptr[0] color, shape = find_markers(img[y:y + w, x:x + w]) return color, shape, len(roi_ptr)
def LabelDetector(self, AreaValue=2000, extendHeight=10, extendWight=10): th = cv2.adaptiveThreshold(self.img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) mol = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=1) mol = cv2.morphologyEx(mol, cv2.MORPH_CLOSE, kernel, iterations=7) edge = imu.auto_canny(mol) edge = cv2.morphologyEx(edge, cv2.MORPH_DILATE, kernel) cnts = cv2.findContours(edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imu.grab_contours(cnts) if len(cnts) != 0: (cnts, _) = contours.sort_contours(cnts) for cnt in cnts: area = cv2.contourArea(cnt) if area > AreaValue: box = cv2.minAreaRect(cnt) box = cv2.boxPoints(box) box = perspective.order_points(box) box = np.array(box, dtype="int") [tl, bl, br, tr] = self.cal_bouding_box(box) maxWidth, maxHeight = self.getOjectSize(tl, bl, br, tr) ratio = round(maxWidth / maxHeight, 4) return box, ratio, edge, area else: pass
def auto_canny(self, sigma=0.33): """compute the median of the single channel pixel intensities""" self.mat = imutils.auto_canny( image=self.mat, sigma=sigma, ) return self
def get_contours(self, base_img): """得到图片四角""" # 处理优化图片 h = cv2.Sobel(base_img, cv2.CV_32F, 0, 1, -1) v = cv2.Sobel(base_img, cv2.CV_32F, 1, 0, -1) img = cv2.add(h, v) img = cv2.convertScaleAbs(img) img = cv2.GaussianBlur(img, (3, 3), 0) ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY) kernel = np.ones((3, 3), np.uint8) # 膨胀 img = cv2.dilate(img, kernel, iterations=2) img = auto_canny(img) # 获取最大轮廓和轮廓周长 cnt, cnt_perimeter = self.get_max_area_cnt(img) base_img_perimeter = (base_img.shape[0] + base_img.shape[1]) * 2 # 答题卡框与整个图片周长比的阈值 CNT_PERIMETER_THRESHOLD = 0.35 if not cnt_perimeter > CNT_PERIMETER_THRESHOLD * base_img_perimeter: logger.error("[get_contours] 获取答题卡失败,请重新上传图片") raise ImageException("获取答题卡失败,请重新上传图片") # 10%,即0.1的精确度,忽略细节 epsilon = 0.1 * cv2.arcLength(cnt, True) # 计算多边形的顶点,并看是否是四个顶点 poly_node_list = cv2.approxPolyDP(cnt, epsilon, True) if not poly_node_list.shape[0] == 4: logger.error("[get_contours] 不支持该答题卡,请重新上传图片") raise ImageException("不支持该答题卡,请重新上传图片") return poly_node_list
def canny(mat): img = imutils.auto_canny(image=mat, sigma=0.3) img = add_frame_labels( frame=img, labels=[f"canny cost: {canny.cost:6.3f}s"], color=colors.get("WHITE"), ) return img
def load_template(image, load_type): try: template = cv2.imread(f"{directory}/data/templates/{image}", load_type) template = imutils.auto_canny(template) except cv2.error: print("Input one of the names in the 'data/templates' folder in order to detect") return None return template
def canny(self, visualization=False): from imutils import auto_canny # 보통은 영상처리전 원본이미지를 src(source), 목표로 하는 영상처리 결과이미지를 dst(destination)라고 네이밍해 src = self.get_image() dst = auto_canny(src) if visualization: cv2.imshow("Canny", dst) cv2.waitKey(1) return dst
def extract_hog_histogram(image, bins=(8, 8, 8)): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edged = imutils.auto_canny(gray) H = feature.hog(edged, orientations=9, pixels_per_cell=(10, 10), cells_per_block=(2, 2), normalise=True) return H
def canny_edge_detection(frame: np.array, sigma=33) -> np.array: """ Automatic edge detection from the imutils library. :param frame: A frame. :param sigma: A magic number. Shouldn't be changed without testing. :return: A black and white image of the edges in the frame. """ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return imutils.auto_canny(gray, sigma=sigma)
def _reset(self): self.cost_matrix = None self.contours = [] self.paths = [] self.count = 0 # number of closed nodes self.max_level = 0 # maximum level of nodes, used in displaying path tree self.seed = None self.pq = None self.pixel_map = None self.edges = imutils.auto_canny(self.img)
def grab_screen(): try: screenshot(f"{directory}/data/screenshots/screen_shot.JPG") image = cv2.imread(f"{directory}/data/screenshots/screen_shot.JPG") image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = imutils.auto_canny(image) return image except cv2.error: print("Make sure you have 'data/screenshots' folder") return None return image
def get_err(frame): # frame[-speed_patch.shape[0]:, -speed_patch.shape[1]:] = speed_patch frame[-250:, :250] = map_patch frame = frame[int(frame.shape[0] / 1.6):-130, 400:-400] h, w, _ = frame.shape frame = cv2.GaussianBlur(frame, initial_blur_kernel, 0) frame = imutils.auto_canny(frame) frame = er_cycle(frame, 5) # cv2.imshow("w",frame) def foo(r): tmp = np.where(r > 0) if tmp[0].shape[0] == 0: return 0 else: return int(np.mean(tmp)) centers = np.apply_along_axis(foo, 1, frame) new_frame = np.copy(frame) new_frame.fill(0) for y in range(centers.shape[0]): if centers[y] != 0: cv2.circle(new_frame, (centers[y], y), 1, 255, -1) kernel = np.ones((5, 5), np.uint8) new_frame = cv2.dilate(new_frame, kernel, iterations=1) new_frame = cv2.erode(new_frame, kernel, iterations=1) frame = new_frame points = list(zip(*np.where(frame > 0)[::-1])) # print(len(points)) if len(points) < 2: err = 0 frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) else: [vx, vy, x, y] = cv2.fitLine(np.array(points), cv2.DIST_WELSCH, 0, 0.01, 0.01) m = 100 x0, y0 = int(x[0] - m * vx[0]), int(y[0] - m * vy[0]) x1, y1 = int(x[0] + m * vx[0]), int(y[0] + m * vy[0]) err_y = h - 1 if y1 == y0: err_x = x1 else: err_x = x1 + (err_y - y1) * (x0 - x1) / (y0 - y1) err = err_x - w // 2 frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) frame = cv2.line(frame, (x0, y0), (x1, y1), (255, 0, 0), 5) cv2.putText(frame, 'key: %s' % key, (10, h - 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.putText(frame, 'err: %d' % err, (10, h - 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) out.write(frame) return err
def Contours(gray_image, print_true=True, test=False): #temp = cv2.Canny(gray_image, 50, 100) temp = imutils.auto_canny(gray_image) if test: cv2.imshow("Canny A", temp) else: temp = cv2.dilate(temp, None, iterations=1) temp = cv2.erode(temp, None, iterations=1) if test: cv2.imshow("Canny B", temp) if (print_true): cv2.imshow("EdgeDETECT", temp) cnts = cv2.findContours(temp.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) (cnts, _) = contours.sort_contours(cnts) return cnts
def process_image(raw, mark_image=False): cannyd = imutils.auto_canny(raw) height, width = cannyd.shape marker_width = coords['width'] marker_halfwidth = int(marker_width * .5) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = .5 color = (255, 0, 0) thickness = 2 spaces_left = len(coords['coords']) for coord in coords['coords']: x = int(coord['x']) - marker_width y = int(coord['y']) - marker_halfwidth x_max = x + marker_width y_max = y + marker_width if x_max > width: x_max = width if y_max > height: y_max = height if y < 0: y = 0 if x < 0: x = 0 box = cannyd[y:y_max, x:x_max] n_white_pix = np.sum(box == 255) percent_white = round(n_white_pix / marker_halfwidth**2, 2) if percent_white > .1: spaces_left -= 1 if mark_image: start_point = (x, y) end_point = (x_max, y_max) cannyd = cv2.rectangle(cannyd, start_point, end_point, (255, 0, 0), 2) cannyd = cv2.putText(cannyd, str(percent_white), (x, y - marker_halfwidth), font, fontScale, color, thickness, cv2.LINE_AA) return cannyd, spaces_left
def auto_canny(img_path): img = cv2.imread(img_path, 0) img_canny = imutils.auto_canny(img) img_skeleton = imutils.skeletonize(img, size=(3, 3)) plt.subplot(131) plt.imshow(imutils.opencv2matplotlib(img_canny)) plt.axis('off') plt.subplot(132) plt.imshow(imutils.opencv2matplotlib(img)) plt.axis('off') plt.subplot(133) plt.imshow(imutils.opencv2matplotlib(img_skeleton)) plt.axis('off') plt.show()
def apply_filter(img, select="Gray"): filters = {} output = img.copy() filters["Original"] = output filters["Blur"] = cv2.GaussianBlur(output, (3, 3), 0) filters["Gray"] = cv2.cvtColor(filters["Blur"], cv2.COLOR_BGR2GRAY) filters["Thresh"] = cv2.threshold( filters["Gray"], 113, 255, cv2.THRESH_BINARY)[1] # 60 filters["Thresh-Adaptive"] = cv2.adaptiveThreshold( filters["Gray"], 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) filters["Canny"] = cv2.Canny(filters["Blur"], 100, 200) filters["Canny-Auto"] = imutils.auto_canny(filters["Blur"]) return filters[select]
def lane_detection_pipeline(img, gaussian_kernel_size=3, rho=1, theta=np.pi / 180.0, threshold=40, min_line_len=40, max_line_gap=80): result = grayscale(img) result = gaussian_blur(result, gaussian_kernel_size) result = auto_canny(result) result = region_of_interest(result, create_region_of_interest(img.shape)) result = cv2.equalizeHist(result) lines_image = hough_lines(result, rho, theta, threshold, min_line_len, max_line_gap) return weighted_img(lines_image, img)
def extract_image_features(self, data): # Please do not modify the header above # extract feature vector from image data feature_data = [] for img in data: #gray = color.rgb2gray(img) edged = imutils.auto_canny(img) (H, hogImage) = feature.hog(edged, orientations=9, pixels_per_cell=(90, 90), cells_per_block=(2, 2), transform_sqrt=True, visualise=True) #viewer = ImageViewer(hogImage) #viewer.show() feature_data.append(H) return(feature_data)
def _create_edge_map(self, sigma=0.33, dilate_kernel_size=7): """Convert image into dilated edge map :param sigma: value to be passed to imutils.auto_canny :param dilate_kernel_size: kernel radius to be used during dilation step :return: numpy array/opencv gray image containing dilated edge map """ edge_map = imutils.auto_canny(self._background, sigma=sigma) if dilate_kernel_size > 2: if dilate_kernel_size % 2 == 0: dilate_kernel_size -= 1 kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (dilate_kernel_size, dilate_kernel_size)) edge_map = cv2.dilate(edge_map, kernel) return edge_map
def displayFilters(self, select="Gray"): filters = {} output = self.frame filters["Original"] = output filters["Blur"] = cv2.GaussianBlur(output, (7, 7), 0) filters["Gray"] = cv2.cvtColor(filters["Blur"], cv2.COLOR_BGR2GRAY) filters["Thresh"] = cv2.threshold(filters["Gray"], 145, 255, cv2.THRESH_BINARY)[1] # 60 filters["Thresh-Adaptive"] = cv2.adaptiveThreshold( filters["Gray"], 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 1) #filters["Thresh-Adaptive"] = cv2.dilate(filters["Thresh-Adaptive"], None, iterations=0) filters["Canny"] = cv2.Canny(filters["Blur"], 100, 200) filters["Canny-Auto"] = imutils.auto_canny(filters["Blur"]) cv2.imshow("Camera view:", filters[select])
def canny_edge(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) cv2.imshow("Gray", gray) cv2.imshow("Blurred", blurred) # compute a "wide", "mid-range", and "tight" threshold for the edges wide = cv2.Canny(blurred, 10, 200) mid = cv2.Canny(blurred, 30, 150) tight = cv2.Canny(blurred, 245, 250) auto_range = imutils.auto_canny(blurred) cv2.imshow("Wide Edge Map", wide) cv2.imshow("Mid Edge Map", mid) cv2.imshow("Tight Edge Map", tight) cv2.imshow("Auto canny", auto_range)
def trainData(): print('training Started...') data = [] labels = [] count = 0 dataFile = open('data.txt', 'w') labelFile = open('labels.txt', 'w') for car in vehicleList[:40000]: # extract the make of the car make = car.get('carType') imagePath = car.get('imagePath') # load the image, convert it to grayscale, and detect edges image = cv2.imread(imagePath) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edged = imutils.auto_canny(gray) # find contours in the edge map, keeping only the largest one which # is presmumed to be the car logo cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2(cnts) else cnts[1] c = max(cnts, key=cv2.contourArea) # extract the logo of the car and resize it to a canonical width # and height (x, y, w, h) = cv2.boundingRect(c) logo = gray[y:y + h, x:x + w] logo = cv2.resize(logo, (200, 100)) # extract Histogram of Oriented Gradients from the logo H = feature.hog(logo, orientations=9, pixels_per_cell=(10, 10), cells_per_block=(2, 2), transform_sqrt=True, block_norm="L1") # update the data and labels data.append(H) dataFile.write(str(H)) dataFile.write("\n") labels.append(make) labelFile.write(str(make)) labelFile.write("\n") count += 1 return data, labels
def Predict(image, model): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edge = imutils.auto_canny(gray) resized = cv2.resize(gray, image_resize) H, hogImage = feature.hog(resized, orientations=9, pixels_per_cell=ppc, cells_per_block=cpb, transform_sqrt=True, block_norm="L1", visualize=True) pred = model.predict(H.reshape(1, -1))[0] hogImage = exposure.rescale_intensity(hogImage, out_range=(0, 255)) hogImage = hogImage.astype("uint8") #cv2.imshow("Hog Image No. {}".format(i+1), hogImage) cv2.putText(image, pred.title(), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (110, 34, 59), 3) cv2.imshow("Test Image No. {}".format(i + 1), image)
def edges(self, frame=None, **params): rs = None image = self.frame(frame, **params) if not image is None: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) # apply Canny edge detection using a wide threshold, tight # threshold, and automatically determined threshold # edged = cv2.Canny(blurred, 10, 200) # edged = cv2.Canny(blurred, 225, 250) edged = imutils.auto_canny(blurred) rs = { 'frame': image, 'iframe': gray, 'oframe': edged, } return rs
def Edge_Canny_auto(self): # read image im = cv2.imread(self.Image) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) # apply Canny edge detection using a wide threshold, tight # threshold, and automatically determined threshold wide = cv2.Canny(blurred, 10, 200) tight = cv2.Canny(blurred, 225, 250) auto = imutils.auto_canny(blurred) # show the images cv2.imshow("Original", im) cv2.imshow("Wide", wide) cv2.imshow("Tight", tight) cv2.imshow("Auto", auto) cv2.waitKey(0)
def approx_realworld(): image = cv2.imread("../img/receipt.png") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edged = imutils.auto_canny(gray, 3.33) cv2.imshow("Original", image) cv2.imshow("Edge", edged) (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:7] for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.01 * peri, True) print("original: {}, approx: {}".format(len(c), len(approx))) if len(approx) == 4: cv2.drawContours(image, [approx], -1, (0, 255, 0), 3) cv2.imshow("Image", image)
cv2.imshow("Skeleton", skeleton) cv2.waitKey(0) cv2.destroyAllWindows() # 5. MATPLOTLIB # INCORRECT: show the image without converting color spaces plt.figure("Incorrect") plt.imshow(cactus) # CORRECT: convert color spaces before using plt.imshow plt.figure("Correct") plt.imshow(imutils.opencv2matplotlib(cactus)) plt.show() # 6. URL TO IMAGE # load an image from a URL, convert it to OpenCV, format, and # display it url = "http://pyimagesearch.com/static/pyimagesearch_logo_github.png" logo = imutils.url_to_image(url) cv2.imshow("URL to Image", logo) cv2.waitKey(0) cv2.destroyAllWindows() # 7. AUTO CANNY # convert the logo to grayscale and automatically detect edges gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY) edgeMap = imutils.auto_canny(gray) cv2.imshow("Original", logo) cv2.imshow("Automatic Edge Map", edgeMap) cv2.waitKey(0)
image = ndimage.imread( "..\\Exemple-annonces\\ordi7.jpg") plt.imshow(image) plt.show() hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower = np.array([115, 135, 135]) #RGB upper = np.array([125, 255, 255]) #RGB red = cv2.inRange(hsv, lower, upper) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) plt.imshow(gray,cmap='gray') plt.show() edged = imutils.auto_canny(red) plt.imshow(edged,cmap='gray') plt.show() im2,contours, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) contours.sort(key=cv2.contourArea,reverse=True ) area = [cv2.contourArea(contours[i]) for i in range(len(contours)) ] (x, y, w, h) = cv2.boundingRect(contours[1]) logo = gray[y:y + h, x:x + w] plt.imshow(logo) plt.show() H = feature.hog(logo, orientations=9, pixels_per_cell=(10, 10),
# USAGE # BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND # python sorting_contours.py # import the necessary packages from imutils import contours import imutils import cv2 # load the shapes image clone it, convert it to grayscale, and # detect edges in the image image = cv2.imread("../demo_images/shapes.png") orig = image.copy() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) edged = imutils.auto_canny(gray) # find contours in the edge map (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over the (unsorted) contours and label them for (i, c) in enumerate(cnts): orig = contours.label_contour(orig, c, i, color=(240, 0, 159)) # show the original image cv2.imshow("Original", orig) # loop over the sorting methods for method in ("left-to-right", "right-to-left", "top-to-bottom", "bottom-to-top"): # sort the contours