def run(self): read_color_data() self.cam=WebCam(info=self.cam_info) if self.debug: self.debug_thread.start() try: while self.pipe.recv() == True: im=self.cam.get_image() small_im=cv.CreateImage((im.width/2, im.height/2), cv.IPL_DEPTH_8U, 3) cv.PyrDown(im, small_im); smaller_im=cv.CreateImage((im.width/4, im.height/4), cv.IPL_DEPTH_8U, 3) cv.PyrDown(small_im, smaller_im); smallerer_im=cv.CreateImage((im.width/8, im.height/8), cv.IPL_DEPTH_8U, 3) cv.PyrDown(smaller_im, smallerer_im); colors=convert_to_colors(smallerer_im) closest_ball=self.find_closest_ball(colors) self.pipe.send({"closest_ball": closest_ball}) self.colors=colors #time.sleep(0.01) except KeyboardInterrupt: pass finally: if self.debug: self.debug_thread.stop() self.cam.stop()
def to_scale(self, img): ''' makes images smaller when needed. It is using PyrDown @param img: image to resize ''' for i in range(1, self.max_scale + 1): cv.PyrDown(self.imgs[i - 1], self.imgs[i])
def processFrames(self): self.vidcap = cv2.VideoCapture(self.path) count = 0 success, image = self.vidcap.read() print success self.createWindows() while True: success, image = self.vidcap.read() if not success: return spare = cv.fromarray(image) size = (spare.width / 2, spare.height / 2) cv.Smooth(spare, spare, cv.CV_GAUSSIAN, BLUR_SIZE, BLUR_SIZE) out = cv.CreateImage(size, 8, 3) cv.PyrDown(spare, out) yuv = cv.CreateImage(size, 8, 3) gray = cv.CreateImage(size, 8, 1) canny = cv.CreateImage(size, 8, 1) sobel = cv.CreateImage(size, 8, 1) harris = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1) cv.CvtColor(out, yuv, cv.CV_BGR2YCrCb) cv.Split(yuv, gray, None, None, None) cv.Canny(gray, canny, 50, 200, 3) cv.CornerHarris(gray, harris, 3) cv.Sobel(gray, sobel, 1, 0, 3) cv.ConvertScale(canny, canny, -1, 255) cv.ConvertScale(sobel, sobel, -1, 255) for y in range(0, out.height): for x in range(0, out.width): harr = cv.Get2D(sobel, y, x) if harr[0] < 10e-06: cv.Circle(out, (x, y), 2, cv.RGB(155, 0, 25)) #cv2.imwrite("frame%d.jpg" % count, np.asarray(canny[:,:])) cv.ShowImage('canny', canny) #cv.ShowImage( 'harris' , harris ) cv.ShowImage('sobel', sobel) cv.ShowImage('output', out) if cv2.waitKey(1) == 27: break count += 1 return
def doPyrDown(image, filter_type=cv.CV_GAUSSIAN_5x5): # assert if width and height of new image is not even assert (image.width % 2 == 0 and image.height % 2 == 0) # creating a new image of half the size of input to hold output out = cv.CreateImage((image.width / 2, image.height / 2), image.depth, image.nChannels) # Downsamples the image cv.PyrDown(image, out) return out
def scale(im, scale=2, imfilter=cv.CV_GAUSSIAN_5x5, out=None): assert im.width % scale == 0 and im.height % scale == 0 if not out: out = cv.CreateImage((im.width / scale, im.height / scale), im.depth, im.channels) cv.PyrDown(im, out) return out
def doPyrDown(inImg): """ Returns an image that has been subjected to Gaussian downsampling via pyrDown. Returned image is half the size of the original. """ (width,height)= cv.GetSize(inImg) outSize = (width/2, height/2) outImg = cv.CreateImage(outSize,8,1) cv.PyrDown(inImg,outImg,cv.CV_GAUSSIAN_5x5) return(outImg)
def find_squares4(color_img): """ Finds multiple squares in image Steps: -Use Canny edge to highlight contours, and dilation to connect the edge segments. -Threshold the result to binary edge tokens -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours -Filter each candidate: use Approx poly, keep only contours with 4 vertices, enough area, and ~90deg angles. Return all squares contours in one flat list of arrays, 4 x,y points each. """ #select even sizes only width, height = (color_img.width & -2, color_img.height & -2) timg = cv.CloneImage(color_img) # make a copy of input image gray = cv.CreateImage((width, height), 8, 1) # select the maximum ROI in the image cv.SetImageROI(timg, (0, 0, width, height)) # down-scale and upscale the image to filter out the noise pyr = cv.CreateImage((width / 2, height / 2), 8, 3) cv.PyrDown(timg, pyr, 7) cv.PyrUp(pyr, timg, 7) tgray = cv.CreateImage((width, height), 8, 1) squares = [] # Find squares in every color plane of the image # Two methods, we use both: # 1. Canny to catch squares with gradient shading. Use upper threshold # from slider, set the lower to 0 (which forces edges merging). Then # dilate canny output to remove potential holes between edge segments. # 2. Binary thresholding at multiple levels N = 11 for c in [0, 1, 2]: #extract the c-th color plane cv.SetImageCOI(timg, c + 1) cv.Copy(timg, tgray, None) cv.Canny(tgray, gray, 0, 50, 5) cv.Dilate(gray, gray) squares = squares + find_squares_from_binary(gray) # Look for more squares at several threshold levels for l in range(1, N): cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) squares = squares + find_squares_from_binary(gray) return squares
if k == 27: break fname = sys.argv[1] original= cv.LoadImage(fname) img = cv.CreateImage( cv.GetSize(original), cv.IPL_DEPTH_8U, 1) cv.CvtColor(original,img, cv.CV_BGR2GRAY) cv.AdaptiveThreshold(img, img, 255.0, cv.CV_THRESH_BINARY, cv.CV_ADAPTIVE_THRESH_MEAN_C,9) # down-scale and upscale the image to filter out the noise pyr = cv.CreateImage((img.width/2, img.height/2), cv.IPL_DEPTH_8U, 1) cv.PyrDown(img, pyr, 7) cv.PyrUp(pyr, img, 7) cv.Smooth(img, img, cv.CV_MEDIAN, 1, 5 ) #cv.Dilate(img,img,None,1) #cv.Erode(img,img,None,1) cv.AdaptiveThreshold(img, img, 255.0, cv.CV_THRESH_BINARY, cv.CV_ADAPTIVE_THRESH_MEAN_C,9) showme= cv.CloneImage(img) size = cv.GetSize(img) cv.ShowImage("Show Me", img ) cv.SaveImage("imgfix.jpg",img)
def find_object(img, colour): ''' Finds the objects in an image with given colour. Arguments: img -- the image to be processed colour -- the colour to look for (red, blue or yellow) Returns: Point representing object's centre of mass ''' # Convert to hsv size = cv.GetSize(img) tempImage = cv.CreateImage((size[0] / 2, size[1] / 2), 8, 3) # Reduce noise by down- and up-scaling input image cv.PyrDown(img, tempImage, 7) cv.PyrUp(tempImage, img, 7) hsv = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3) cv.CvtColor(img, hsv, cv.CV_BGR2HSV) # Convert to binary image based on colour mask = cv.CreateMat(size[1], size[0], cv.CV_8UC1) maskSize = cv.GetSize(mask) if (colour == "RED"): redLower = cv.Scalar(mods[0] * 256, mods[1] * 256, mods[2] * 256) redUpper = cv.Scalar(mods[3] * 256, mods[4] * 256, mods[5] * 256) cv.InRangeS(hsv, redLower, redUpper, mask) cv.ShowImage("Red:", mask) elif (colour == "BLUE"): blueLower = cv.Scalar(mods[6] * 256, mods[7] * 256, mods[8] * 256) blueUpper = cv.Scalar(mods[9] * 256, mods[10] * 256, mods[11] * 256) cv.InRangeS(hsv, blueLower, blueUpper, mask) cv.ShowImage("Blue:", mask) elif (colour == "YELLOW"): yellowLower = cv.Scalar(mods[12] * 256, mods[13] * 256, mods[14] * 256) yellowUpper = cv.Scalar(mods[15] * 256, mods[16] * 256, mods[17] * 256) cv.InRangeS(hsv, yellowLower, yellowUpper, mask) cv.ShowImage("Yellow:", mask) elif (colour == "YWHITE"): blackLower = cv.Scalar(mods[18] * 256, mods[19] * 256, mods[20] * 256) blackUpper = cv.Scalar(mods[21] * 256, mods[22] * 256, mods[23] * 256) cv.InRangeS(hsv, blackLower, blackUpper, mask) cv.ShowImage("YellowWhite:", mask) elif (colour == "BWHITE"): blackLower = cv.Scalar(mods[18] * 256, mods[19] * 256, mods[20] * 256) blackUpper = cv.Scalar(mods[21] * 256, mods[22] * 256, mods[23] * 256) cv.InRangeS(hsv, blackLower, blackUpper, mask) cv.ShowImage("BlueWhite:", mask) # Count white pixels to make sure program doesn't crash if it finds nothing if (cv.CountNonZero(mask) < 3): return ((0, 0), 0) # Clean up the image to reduce anymore noise in the binary image cv.Smooth(mask, mask, cv.CV_GAUSSIAN, 9, 9, 0, 0) convKernel = cv.CreateStructuringElementEx(9, 9, 0, 0, cv.CV_SHAPE_RECT) cv.Erode(mask, mask, convKernel, 1) cv.Dilate(mask, mask, convKernel, 1) moments = cv.Moments(mask, 1) M00 = cv.GetSpatialMoment(moments, 0, 0) M10 = cv.GetSpatialMoment(moments, 1, 0) M01 = cv.GetSpatialMoment(moments, 0, 1) if M00 == 0: M00 = 0.01 center_of_mass = (round(M10 / M00), round(M01 / M00)) if (colour == "BLUE" or colour == "YELLOW"): return (center_of_mass, find_orientation(mask, center_of_mass)) else: return (center_of_mass, 0)
def findSquares4(img, storage): N = 11 sz = (img.width & -2, img.height & -2) timg = cv.CloneImage(img) # make a copy of input image gray = cv.CreateImage(sz, 8, 1) pyr = cv.CreateImage((sz.width / 2, sz.height / 2), 8, 3) # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage) squares = CvSeq_CvPoint.cast(squares) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height)) # down-scale and upscale the image to filter out the noise cv.PyrDown(subimage, pyr, 7) cv.PyrUp(pyr, subimage, 7) tgray = cv.CreateImage(sz, 8, 1) # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cv.Split(subimage, channels[0], channels[1], channels[2], None) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if (l == 0): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cv.Canny(tgray, gray, 0, thresh, 5) # dilate canny output to remove potential # holes between edge segments cv.Dilate(gray, gray, None, 1) else: # apply threshold if l!=0: # tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0 cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255, cv.CV_THRESH_BINARY) # find contours and store them all as a list count, contours = cv.FindContours(gray, storage, sizeof_CvContour, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0, 0)) if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cv.ApproxPoly(contour, sizeof_CvContour, storage, cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours) * 0.02, 0) # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if (result.total == 4 and abs(cv.ContourArea(result)) > 1000 and cv.CheckContourConvexity(result)): s = 0 for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if (i >= 2): t = abs( angle(result[i], result[i - 2], result[i - 1])) if s < t: s = t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if (s < 0.3): for i in range(4): squares.append(result[i]) return squares