def detect_lines(self, img_grey, img_orig): """ Detect lines within the image. To switch between standard and probabilistic Hough transform, use cv.CV_HOUGH_STANDARD, or cv.CV_HOUGH_PROBABILISTIC. """ # Set transform method ('standard','probabilistic') transform_method = 'probabilistic' # Clear out our storage cv.cvClearMemStorage(self.lines_storage) sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) img_dst_color = cv.cvCreateImage(cv.cvGetSize(img_orig), 8, 3) tgrey = cv.cvCreateImage(sz, 8, 1) cv.cvCanny(tgrey, img_grey, 50, 200, 3) if transform_method == 'standard': lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_STANDARD, 1, cv.CV_PI/180, 100, 0, 0) else: lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_PROBABILISTIC, 1, cv.CV_PI/180, 50, 50, 10) return lines
def detect_lines(self, img_grey, img_orig): """ Detect lines within the image. To switch between standard and probabilistic Hough transform, use cv.CV_HOUGH_STANDARD, or cv.CV_HOUGH_PROBABILISTIC. """ # Set transform method ('standard','probabilistic') transform_method = 'probabilistic' # Clear out our storage cv.cvClearMemStorage(self.lines_storage) sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) img_dst_color = cv.cvCreateImage(cv.cvGetSize(img_orig), 8, 3) tgrey = cv.cvCreateImage(sz, 8, 1) cv.cvCanny(tgrey, img_grey, 50, 200, 3) if transform_method == 'standard': lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_STANDARD, 1, cv.CV_PI / 180, 100, 0, 0) else: lines = cv.cvHoughLines2(img_grey, self.lines_storage, cv.CV_HOUGH_PROBABILISTIC, 1, cv.CV_PI / 180, 50, 50, 10) return lines
def findEdges(original, out, threshold1 = 100, threshold2 = None): """Return a new edge detected image with a specified threshold""" warnings.warn("Use findBWEdges instead unless you really need colored edges.", DeprecationWarning) #Define threshold2 if threshold2 == None: threshold2 = threshold1 * 3 # Create two pictures with only one channel for a b/w copy # and one for storring the edges found in the b/w picture gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) edge = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) # Create the b/w copy of the original cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY) # Blur the b/w copy, but put the result into edge pic cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0) # Negate the b/w copy of original with newly blurred # b/w copy. This will make egdes stand out cv.cvNot(gray, edge) # Run an edge-finding algorithm called 'Canny' # It will analyse the first argument and store the # resulting picture in the second argument cv.cvCanny(gray, edge, threshold1, threshold2) # We initialize our out-image to black cv.cvSetZero(out) # Finally, we use the found edges, which are b/w, as # a mask for copying the colored edges from the original # to the out-image cv.cvCopy(original, out, edge)
def on_trackbar (position): cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0) cv.cvNot (gray, edge) # run the edge dector on gray scale cv.cvCanny (gray, edge, position, position * 3, 3) # reset cv.cvSetZero (col_edge) # copy edge points cv.cvCopy (image, col_edge, edge) # show the image highgui.cvShowImage (win_name, col_edge)
def on_trackbar(position): cv.cvSmooth(gray, edge, cv.CV_BLUR, 3, 3, 0) cv.cvNot(gray, edge) # run the edge dector on gray scale cv.cvCanny(gray, edge, position, position * 3, 3) # reset cv.cvSetZero(col_edge) # copy edge points cv.cvCopy(image, col_edge, edge) # show the image highgui.cvShowImage(win_name, col_edge)
def on_trackbar (position): #下面两句应该是没什么用的 cv.cvSmooth (gray, edge, cv.CV_BLUR, 3, 3, 0) #图像平滑 cv.cvNot (gray, edge) #计算数组元素的按位取反 # run the edge dector on gray scale cv.cvCanny (gray, edge, position, position * 3, 3) #采用 Canny 算法做边缘检测 # reset cv.cvSetZero (col_edge) #清空数组 # copy edge points cv.cvCopy (image, col_edge, edge) #参数edge影响拷贝的结果 # show the image highgui.cvShowImage (win_name, col_edge)
def findBWEdges(original, out, threshold1, threshold2): """Identical with findEdges except that this returns white edges on a black background. We really don't need colored edges any longer. This also makes it easy to to a manual merge of edge and blur picture.""" if threshold2 == None: threshold2 = threshold1 * 3 gray = cv.cvCreateImage(cv.cvGetSize(original), 8, 1) cv.cvCvtColor(original, gray, cv.CV_BGR2GRAY) cv.cvSmooth(gray, out, cv.CV_BLUR, 3, 3, 0) cv.cvNot(gray, out) cv.cvCanny(gray, out, threshold1, threshold2) return out
def detect_squares(self, img_grey, img_orig): """ Find squares within the video stream and draw them """ cv.cvClearMemStorage(self.faces_storage) N = 11 thresh = 5 sz = cv.cvSize(img_grey.width & -2, img_grey.height & -2) timg = cv.cvCloneImage(img_orig) pyr = cv.cvCreateImage(cv.cvSize(sz.width/2, sz.height/2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.squares_storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) cv.cvReleaseImage(timg) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) cv.cvReleaseImage(pyr) tgrey = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgrey cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if(l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgrey, img_grey, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(img_grey, img_grey, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgrey, img_grey, (l+1)*255/N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(img_grey, self.squares_storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0,0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly(contour, cv.sizeof_CvContour, self.squares_storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours)*0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if(result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if(i >= 2): t = abs(self.squares_angle(result[i], result[i-2], result[i-1])) if s<t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if(s < 0.3): for i in range(4): squares.append(result[i]) cv.cvReleaseImage(tgrey) return squares
highgui.cvMoveWindow ('Image Display Window', 10, 10) #load image image = highgui.cvLoadImage(sys.argv[1]); #create image arrays grayimage = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) cannyedges = cv.cvCreateImage(cv.cvGetSize(image), 8, 1) #convert to grayscale cv.cvCvtColor(image, grayimage, cv.CV_BGR2GRAY) #Canny #Canny(image, edges, threshold1, threshold2, aperture_size=3) = None #Implements the Canny algorithm for edge detection. cv.cvCanny(grayimage, cannyedges, 150, 450 , 3) #This is the line that throws the error storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) #circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 2, grayimage.height/4, 150, 40, long(sys.argv[2]), long(sys.argv[3])) #circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.height, 200, 40, long(sys.argv[2]), long(sys.argv[3])) circles = cv.cvHoughCircles(grayimage, storage, cv.CV_HOUGH_GRADIENT, 1, grayimage.width, 150, 40, long(sys.argv[2]), grayimage.width) print storage for i in storage: print i[0], i[1], i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1])
def detect_squares(self, img): """ Find squares within the video stream and draw them """ N = 11 thresh = 5 sz = cv.cvSize(img.width & -2, img.height & -2) timg = cv.cvCloneImage(img) gray = cv.cvCreateImage(sz, 8, 1) pyr = cv.cvCreateImage(cv.cvSize(sz.width / 2, sz.height / 2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.cvCreateSeq(0, cv.sizeof_CvSeq, cv.sizeof_CvPoint, self.storage) squares = cv.CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.cvGetSubRect(timg, cv.cvRect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.cvPyrDown(subimage, pyr, 7) cv.cvPyrUp(pyr, subimage, 7) tgray = cv.cvCreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.cvSplit(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if (l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.cvCanny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.cvDilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cv.cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.cvFindContours(gray, self.storage, cv.sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, cv.cvPoint(0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.cvApproxPoly( contour, cv.sizeof_CvContour, self.storage, cv.CV_POLY_APPROX_DP, cv.cvContourPerimeter(contours) * 0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if (result.total == 4 and abs(cv.cvContourArea(result)) > 1000 and cv.cvCheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if (i >= 2): t = abs( self.squares_angle(result[i], result[i - 2], result[i - 1])) if s < t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if (s < 0.3): for i in range(4): squares.append(result[i]) i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) # draw the square as a closed polyline cv.cvPolyLine(img, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 return img
def on_trackbar1(position): global pos1 global pos2 global pos3 global pos4 global pos5 global pos6 global pos7 global img global gray global edges print print position, pos2, pos3, pos4, pos5, pos6, pos7 temp = cv.cvCloneImage(img) gray = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) edges = cv.cvCreateImage(cv.cvGetSize(temp), 8, 1) dst = cv.cvCreateImage( cv.cvSize(256,256), 8, 3 ) src = cv.cvCloneImage(img) src2 = cv.cvCreateImage( cv.cvGetSize(src), 8, 3 ); cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvCanny(gray, edges, position, pos2, 3) cv.cvSmooth(edges, edges, cv.CV_GAUSSIAN, 9, 9) storage = cv.cvCreateMat(50, 1, cv.CV_32FC3) cv.cvSetZero(storage) try: circles = cv.cvHoughCircles(gray, storage, cv.CV_HOUGH_GRADIENT, 1, float(pos3), float(pos2), float(pos4), long(pos5),long(pos6) ) #print storage for i in storage: print "Center: ", i[0], i[1], " Radius: ", i[2] center = cv.cvRound(i[0]), cv.cvRound(i[1]) radius = cv.cvRound(i[2]) cv.cvCircle(temp, (center), radius, cv.CV_RGB(255, 0, 0), 1, cv.CV_AA, 0 ) cv.cvCircle(edges, (center), radius, cv.CV_RGB(255, 255, 255), 1, cv.CV_AA, 0 ) if radius > 200: print "Circle found over 200 Radius" center_crop_topleft = (center[0]-(radius - pos7)), (center[1]-(radius - pos7)) center_crop_bottomright = (center[0]+(radius - pos7)), (center[1]+(radius - pos7)) print "crop top left: ", center_crop_topleft print "crop bottom right: ", center_crop_bottomright center_crop = cv.cvGetSubRect(src, (center_crop_topleft[0], center_crop_topleft[1] , (center_crop_bottomright[0] - center_crop_topleft[0]), (center_crop_bottomright[1] - center_crop_topleft[1]) )) #center_crop = cv.cvGetSubRect(src, (50, 50, radius/2, radius/2)) cvShowImage( "center_crop", center_crop ) print "center_crop created" #mark found circle's center with blue point and blue circle of pos 7 radius cv.cvCircle(temp ,(center), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) cv.cvCircle(temp ,(center), (radius - pos7), cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cvLogPolar(src, dst, (center), 48, CV_INTER_LINEAR +CV_WARP_FILL_OUTLIERS ) #this will draw a smaller cirle outlining the center circle #pos7 = int(pos7 /2.5) #cv.cvCircle(dst ,(img_size.width-pos7, 0), 2, cv.CV_RGB(0, 0, 255), 3, cv.CV_AA, 0 ) #cv.cvLine(dst, (img_size.width-pos7-1, 0), (img_size.width-pos7-1, img_size.height), cv.CV_RGB(0, 0, 255),1,8,0) #cvShowImage( "log-polar", dst ) #print radius, (radius-pos7) #cropped = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #cropped2 = cv.cvCreateImage( (pos7, img_size.height), 8, 3) #coin_edge_img = cv.cvGetSubRect(dst, (img_size.width-pos7, 0, pos7 ,img_size.height )) #to create the center cropped part of coin #img_size = cvGetSize(scr) #cvCopy(coin_edge_img, cropped) #cvSaveImage("temp.png", cropped) #im = Image.open("temp.png").rotate(90) #print "pil image size = ", im.size[0], im.size[1] #im = im.resize((im.size[0]*2, im.size[1]*2)) #print "pil image size = ", im.size #im.show() #im.save("temp2.png") cropped2 = highgui.cvLoadImage("temp2.png") #cvShowImage( "cropped", cropped2) except: print "Exception:", sys.exc_info()[0] print position, pos2, pos3, pos4, pos5, pos6, pos7 pass highgui.cvShowImage("edges", edges) #cvShowImage( "log-polar", dst ) cvShowImage(wname, temp)