def onIdle(self, event): """ Event to grab and display a frame from the camera. (internal use). """ if self.cap == None: #Should be cvCameraCapture instance. #unbind the idle instance, change to click. highgui.cvReleaseCapture(self.cap) #release the old instance and self.cap = highgui.cvCreateCameraCapture( self.camera) #try new one. self.displayError(self.errorBitmap, (128, 128)) raise CameraError('Unable to open camera, retrying....') event.Skip() try: img = highgui.cvQueryFrame(self.cap) except cv2.error as e: raise CameraError('Error when querying for frame: {0}'.format(e)) self._error = 0 #worked successfully img = opencv.cvGetMat(img) cv.cvCvtColor(img, img, cv.CV_BGR2RGB) if conf.as_bool(conf.config['webcam']['cropBars']): #Draw cropping region cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0), 2) self.displayImage(img) event.RequestMore()
def clear(self): cv.cvRectangle(self.buffer, cv.cvPoint(0,0), cv.cvPoint(self.buffer.width, self.buffer.height), cv.cvScalar(255,255,255), cv.CV_FILLED) if self.draw_grid: line_color = 230 lc = cv.cvScalar(line_color,line_color,line_color) for i in xrange(1, as_int(self.meters_disp)+3): cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), as_int(self.pixels_per_meter * (i-.5)), #lc, 1) lc, 1, cv.CV_AA) cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), as_int(self.pixels_per_meter * i), #lc, 1) lc, 1, cv.CV_AA) for i in xrange(360/30): x = (self.w/2) + math.cos(math.radians(i*30)) * self.pixels_per_meter * (self.meters_disp+2) y = self.h + math.sin(math.radians(i*30)) * self.pixels_per_meter * (self.meters_disp+2) cv.cvLine(self.buffer, cv.cvPoint(self.w/2,self.h), cv.cvPoint(as_int(x),as_int(y)), lc, 1, cv.CV_AA) if self.draw_center: cv.cvCircle(self.buffer, cv.cvPoint(self.w/2,self.h), 3, cv.cvScalar(0,0,200), cv.CV_FILLED, cv.CV_AA)
def detect(image, cascade_file='haarcascade_data/haarcascade_frontalface_alt.xml'): image_size = cv.cvGetSize(image) # create grayscale version grayscale = cv.cvCreateImage(image_size, 8, 1) cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY) # create storage storage = cv.cvCreateMemStorage(0) cv.cvClearMemStorage(storage) # equalize histogram cv.cvEqualizeHist(grayscale, grayscale) # detect objects cascade = cv.cvLoadHaarClassifierCascade(cascade_file, cv.cvSize(1,1)) faces = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(50, 50)) positions = [] if faces: for i in faces: positions.append({'x': i.x, 'y': i.y, 'width': i.width, 'height': i.height,}) cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)), cv.cvPoint(int(i.x + i.width), int(i.y + i.height)), cv.CV_RGB(0, 255, 0), 3, 8, 0) return positions
def draw_bounding_boxes(cascade_list, img, r, g, b, width): if cascade_list: for rect in cascade_list: opencv.cvRectangle( img, opencv.cvPoint(int(rect.x), int(rect.y)), opencv.cvPoint(int(rect.x + rect.width), int(rect.y + rect.height)), opencv.CV_RGB(r, g, b), width)
def illuminate_faces(image): changed_image = threshold_image(image) faces = face_detector.detectObject(image) for face in faces: print( "Oject found at (x,y) = (%i,%i)" % (face.x*face_detector.image_scale,face.y*face_detector.image_scale) ) pt1 = cvPoint( int(face.x*face_detector.image_scale), int(face.y*face_detector.image_scale) ) pt2 = cvPoint( int((face.x*face_detector.image_scale + face.width*face_detector.image_scale)), int((face.y*face_detector.image_scale + face.height*face_detector.image_scale)) ) cvRectangle( changed_image, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 ) return changed_image
def show_rectangles(self, rectangles): """ Show the rectangles added. Arguments: - self: The main object pointer. """ #debug.debug("Camera", "Showing existing rectangles -> %d" % len(rectangles)) for rect in rectangles: cv.cvRectangle( self.__image, cv.cvPoint(rect.x, rect.y), cv.cvPoint(rect.size[0], rect.size[1]), cv.CV_RGB(255,0,0), 3, 8, 0 )
def drawBox(x): """ This is a template for a function that can be fed into VideoCapturePlayer It must take a CvMat, and return a CvMat. It draws a rectangle on the screen.""" pt1, pt2 = cv.CvPoint(), cv.CvPoint() pt1.x = pt1.y = 200 pt2.x = pt2.y = 250 cv.cvRectangle(x, pt1, pt2, cv.CV_RGB(30, 0, 200)) return x
def drawBox(x): """ This is a template for a function that can be fed into VideoCapturePlayer It must take a CvMat, and return a CvMat. It draws a rectangle on the screen.""" pt1, pt2 = cv.CvPoint(), cv.CvPoint() pt1.x = pt1.y = 200 pt2.x = pt2.y = 250 cv.cvRectangle( x, pt1, pt2, cv.CV_RGB(30,0,200) ) return x
def showRegionsForResultId(resultId, color): painting = getPaintingInResultId(resultId) title = Result.select(Result.q.id==resultId)[0].painting.title image = painting.getImage() regions = getRegionsForResultId(resultId) for region in regions: rect = region.getBoundingBox() p1 = cv.cvPoint(rect.x, rect.y) p2 = cv.cvPoint(rect.x + rect.width, rect.y + rect.height) cv.cvRectangle(image, p1, p2, color, 2) g.showImage(image, title)
def savePictureForResultId(resultId): painting = getPaintingInResultId(resultId) image = painting.getImage() regions = getRegionsForResultId(resultId) name = str(painting.id) + str(painting.title) + ".png" for region in regions: rect = region.getBoundingBox() p1 = cv.cvPoint(rect.x, rect.y) p2 = cv.cvPoint(rect.x + rect.width, rect.y + rect.height) cv.cvRectangle(image, p1, p2, color, 2) cv.highgui.cvSaveImage(name,painting)
def illuminate_faces(image): changed_image = threshold_image(image) faces = face_detector.detectObject(image) for face in faces: print("Oject found at (x,y) = (%i,%i)" % (face.x * face_detector.image_scale, face.y * face_detector.image_scale)) pt1 = cvPoint(int(face.x * face_detector.image_scale), int(face.y * face_detector.image_scale)) pt2 = cvPoint( int((face.x * face_detector.image_scale + face.width * face_detector.image_scale)), int((face.y * face_detector.image_scale + face.height * face_detector.image_scale))) cvRectangle(changed_image, pt1, pt2, CV_RGB(255, 0, 0), 3, 8, 0) return changed_image
def draw_gui(image): # Reverse areas cv.cvRectangle(image,box_backwards_left[0],box_backwards_left[1], cv.CV_RGB(255,0,0),3,8,0) cv.cvRectangle(image,box_backwards_right[0],box_backwards_right[1], cv.CV_RGB(255,0,0),3,8,0) # Forward areas cv.cvRectangle(image,box_forward_left[0],box_forward_left[1], cv.CV_RGB(0,255,0),3,8,0) cv.cvRectangle(image,box_forward_right[0],box_forward_right[1], cv.CV_RGB(0,255,0),3,8,0)
def draw_gui(image): # Reverse areas cv.cvRectangle(image, box_backwards_left[0], box_backwards_left[1], cv.CV_RGB(255, 0, 0), 3, 8, 0) cv.cvRectangle(image, box_backwards_right[0], box_backwards_right[1], cv.CV_RGB(255, 0, 0), 3, 8, 0) # Forward areas cv.cvRectangle(image, box_forward_left[0], box_forward_left[1], cv.CV_RGB(0, 255, 0), 3, 8, 0) cv.cvRectangle(image, box_forward_right[0], box_forward_right[1], cv.CV_RGB(0, 255, 0), 3, 8, 0)
def drawBoundingBoxes(out, component_dictionary, thickness=1, color=None, factor=1): """Given a dictionary of components, draw its bounding box on the outimage. If no color is supplied, the box will be the same color at the blob.""" # This is a bit hacky, but we need to keep track of the original argument inColor = color for entry in component_dictionary: component = component_dictionary[entry][1] rect = component.rect if inColor == None: color = component_dictionary[entry][0] p1 = cv.cvPoint(rect.x, rect.y) p2 = cv.cvPoint(rect.x + rect.width, rect.y + rect.height) p1 = transformer.translatePoint(p1, factor) p2 = transformer.translatePoint(p2, factor) cv.cvRectangle(out, p1, p2, color, thickness)
def detect_face(self, img): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20,20) image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1) small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(img.width/image_scale), cv.cvRound(img.height/image_scale)), 8, 1) cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.storage) if(self.cascade): t = cv.cvGetTickCount(); faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); if faces: for r in faces: pt1 = cv.cvPoint(int(r.x*image_scale), int(r.y*image_scale)) pt2 = cv.cvPoint(int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale)) cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) return img
def detect_face(self, img): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20, 20) image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1) small_img = cv.cvCreateImage( cv.cvSize(cv.cvRound(img.width / image_scale), cv.cvRound(img.height / image_scale)), 8, 1) cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.storage) if (self.cascade): t = cv.cvGetTickCount() faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); if faces: for r in faces: pt1 = cv.cvPoint(int(r.x * image_scale), int(r.y * image_scale)) pt2 = cv.cvPoint(int((r.x + r.width) * image_scale), int((r.y + r.height) * image_scale)) cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) return img
def onIdle(self, event): """ Event to grab and display a frame from the camera. (internal use). """ if self.cap == None: #Should be cvCameraCapture instance. #unbind the idle instance, change to click. highgui.cvReleaseCapture(self.cap) #release the old instance and self.cap = highgui.cvCreateCameraCapture(self.camera) #try new one. self.displayError(self.errorBitmap, (128, 128)) raise CameraError('Unable to open camera, retrying....') event.Skip() try: img = highgui.cvQueryFrame(self.cap) except cv2.error as e: raise CameraError('Error when querying for frame: {0}'.format(e)) self._error = 0 #worked successfully img = opencv.cvGetMat(img) cv.cvCvtColor(img, img, cv.CV_BGR2RGB) if conf.as_bool(conf.config['webcam']['cropBars']): #Draw cropping region cv.cvRectangle(img, (80, -1), (560, 480), (205.0, 0.0, 0.0, 0.0), 2) self.displayImage(img) event.RequestMore()
def clear(self): cv.cvRectangle(self.buffer, cv.cvPoint(0, 0), cv.cvPoint(self.buffer.width, self.buffer.height), cv.cvScalar(255, 255, 255), cv.CV_FILLED) if self.draw_grid: line_color = 230 lc = cv.cvScalar(line_color, line_color, line_color) for i in xrange(1, as_int(self.meters_disp) + 3): cv.cvCircle( self.buffer, cv.cvPoint(self.w / 2, self.h), as_int(self.pixels_per_meter * (i - .5)), #lc, 1) lc, 1, cv.CV_AA) cv.cvCircle( self.buffer, cv.cvPoint(self.w / 2, self.h), as_int(self.pixels_per_meter * i), #lc, 1) lc, 1, cv.CV_AA) for i in xrange(360 / 30): x = (self.w / 2) + math.cos(math.radians( i * 30)) * self.pixels_per_meter * (self.meters_disp + 2) y = self.h + math.sin(math.radians( i * 30)) * self.pixels_per_meter * (self.meters_disp + 2) cv.cvLine(self.buffer, cv.cvPoint(self.w / 2, self.h), cv.cvPoint(as_int(x), as_int(y)), lc, 1, cv.CV_AA) if self.draw_center: cv.cvCircle(self.buffer, cv.cvPoint(self.w / 2, self.h), 3, cv.cvScalar(0, 0, 200), cv.CV_FILLED, cv.CV_AA)
cv.cvLine (image, pt1, pt2, random_color (random), random.randrange (0, 10), line_type, 0) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # draw some rectangles for i in range (number): pt1 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) pt2 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) cv.cvRectangle (image, pt1, pt2, random_color (random), random.randrange (-1, 9), line_type, 0) highgui.cvShowImage (window_name, image) highgui.cvWaitKey (delay) # draw some ellipes for i in range (number): pt1 = cv.cvPoint (random.randrange (-width, 2 * width), random.randrange (-height, 2 * height)) sz = cv.cvSize (random.randrange (0, 200), random.randrange (0, 200)) angle = random.randrange (0, 1000) * 0.180 cv.cvEllipse (image, pt1, sz, angle, angle - 100, angle + 200, random_color (random), random.randrange (-1, 9),
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle( img_orig, cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])), cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0) # Draw squares if squares: i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
# compute the width for each bin do display bin_w = histimg.width / hdims for i in range(hdims): # for all the bins # get the value, and scale to the size of the hist image val = cv.cvRound( cv.cvGetReal1D(hist.bins, i) * histimg.height / 255) # compute the color color = hsv2rgb(i * 180. / hdims) # draw the rectangle in the wanted color cv.cvRectangle(histimg, cv.cvPoint(i * bin_w, histimg.height), cv.cvPoint((i + 1) * bin_w, histimg.height - val), color, -1, 8, 0) # Make the sweet negative selection box if mouse_select_object and mouse_selection.width > 0 and mouse_selection.height > 0: a = cv.cvGetSubRect(frame, mouse_selection) cv.cvXorS(a, cv.cvScalarAll(255), a) # Take the negative of the image.. del a # Carry out the histogram tracking... if track_object != 0: cv.cvInRangeS(hsv, cv.cvScalar(0, smin, min(vmin, vmax), 0), cv.cvScalar(180, 256, max(vmin, vmax), 0), mask) cv.cvSplit(hsv, hue, None, None, None) if track_object < 0:
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width,img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255,255,0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle(img_orig, cv.cvPoint(cv.cvRound(circle[0]),cv.cvRound(circle[1])),cv.cvRound(circle[2]),cv.CV_RGB(0,0,255),3,8,0) # Draw squares if squares: i = 0 while i<squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i+1]) pt.append(squares[i+2]) pt.append(squares[i+3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0,255,0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(),self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def draw_bounding_boxes(cascade_list, img, r,g,b, width): if cascade_list: for rect in cascade_list: opencv.cvRectangle(img, opencv.cvPoint( int(rect.x), int(rect.y)), opencv.cvPoint(int(rect.x + rect.width), int(rect.y + rect.height)), opencv.CV_RGB(r,g,b), width)
def depthmatch(x,y,leftimage,rightimage,roi=20,buf=10,debug=False): __doc__ = """depthmatch function x,y : (int) pixel position of target in left image leftimage, rightimage : (IplImage) stereo images roi: (int) region of interest around x,y to use in matching buf: (int) buffer outside of a straight horizontal search for a match """ info = cv.cvGetSize(leftimage) width = info.width height = info.height (y1,x1,y2,x2) = (y-roi,x-roi,y+roi,x+roi) #template = cv.cvCreateImage((roi*2,roi*2), 8, 3) if y1<0: y1 = 0 if x1<0: x1 = 0 if y2>height: y2 = height if x2>width: x2 = width #cv.cvSetZero(template) # copy subregion roi x roi template_rect = cv.cvRect(x1,y1,(x2-x1),(y2-y1)) template = cv.cvGetSubRect(leftimage, template_rect) (y3,x3,y4,x4) = (y-roi-buf,x-roi-buf,y+roi+buf,width) # +/- 20 pixels in vertical direction, -20 to the right edge if x3<0: x3 = 0 if y3<0: y3 = 0 if x4>=width: x4 = width-1 if y4>height: y4 = height #cv.cvSetImageROI(rightimage, (y3,x3,y4,x4)) rightsub_rect = cv.cvRect(x3,y3,(x4-x3),(y4-y3)) rightsub = cv.cvGetSubRect(rightimage, rightsub_rect) # result matrix should be (W - w + 1) x (H - h + 1) where WxH are template dimensions, wxh are rightsub dimensions W = x4-x3 H = y4-y3 w = x2-x1 h = y2-y1 resy = (y4-y3)-(y2-y1)+1 resx = (x4-x3)-(x2-x1)+1 resultmat = cv.cvCreateImage((resx, resy), 32, 1) cv.cvZero(resultmat) # match template image in a subportion of rightimage cv.cvMatchTemplate(rightsub, template, resultmat, cv.CV_TM_SQDIFF) min_val, max_val, min_point, max_point = cv.cvMinMaxLoc(resultmat) cv.cvNormalize(resultmat, resultmat, 1, 0, cv.CV_MINMAX) depth = stereo.depth(x, x3+min_point.x, max_pixels=width/2) if debug: print "Input image: %ix%i, target: (%i,%i)" % (width,height,x,y) print "Template box: (%i,%i) to (%i,%i)" % (x1, y1, x2, y2) print "Search area: (%i,%i) to (%i,%i)" % (x3, y3, x4, y4) print "%ix%i, %ix%i" % (W,H,w,h) print "Result matrix %ix%i" % (resx, resy) print "stereo.depth(%i,%i,max_pixels=%i)" % (x, min_point.x+x3,width/2) if depth[0]: print "Depth: ", depth[0], "(cm)" #cv.cvRectangle(rightimage, cv.cvPoint(x1,y1), cv.cvPoint(x2,y2), (255,0,0)) cv.cvRectangle(rightimage, cv.cvPoint(min_point.x+x3,min_point.y+y3), cv.cvPoint(min_point.x+x3+roi*2,min_point.y+y3+roi*2), (0,255,0)) cv.cvRectangle(rightimage, cv.cvPoint(x3,y3), cv.cvPoint(x4,y4), (0,0,255)) cv.cvRectangle(leftimage, cv.cvPoint(x1,y1), cv.cvPoint(x2,y2), (255,0,0)) #cv.cvRectangle(leftimage, cv.cvPoint(min_point.x+x3,min_point.y+y3), cv.cvPoint(min_point.x+x3+roi*2,min_point.y+y3+roi*2), (0,255,0)) cv.cvRectangle(leftimage, cv.cvPoint(x3,y3), cv.cvPoint(x4,y4), (0,0,255)) if depth[0]: cv.cvPutText(leftimage, "%5f(cm)" % depth[0], (x1,y1), font, (255,255,255)) highgui.cvShowImage("depthmatch - template", template) highgui.cvShowImage("depthmatch - match", resultmat) highgui.cvShowImage("depthmatch - right", rightimage) highgui.cvShowImage("depthmatch - left", leftimage)
# compute the width for each bin do display bin_w = histimg.width / hdims for i in range (hdims): # for all the bins # get the value, and scale to the size of the hist image val = cv.cvRound (cv.cvGetReal1D (hist.bins, i) * histimg.height / 255) # compute the color color = hsv2rgb (i * 180. / hdims) # draw the rectangle in the wanted color cv.cvRectangle (histimg, cv.cvPoint (i * bin_w, histimg.height), cv.cvPoint ((i + 1) * bin_w, histimg.height - val), color, -1, 8, 0) # we can now display the images highgui.cvShowImage ('Camera', frame) highgui.cvShowImage ('Histogram', histimg) # handle events k = highgui.cvWaitKey (10) if k == '\x1b': # user has press the ESC key, so exit break
# Don't normalize, use total mask pixels to calculate relative importance # cv.cvNormalizeHist(h_hue, 180) # cv.cvNormalizeHist(h_sat, 255) # cv.cvNormalizeHist(h_val, 255) # minv,maxv,minp,maxp = cv.cvMinMaxLoc(img_h) # print minv,maxv cv.cvZero(hist_hue_img) # hue_min,hue_max,min_loc,max_loc = cv.cvGetMinMaxHistValue(h_hue) for h in xrange(h_bins): hue = cv.cvGetReal1D(h_hue.bins, h) color = hsv2rgb(h * h_limit / h_bins) cv.cvRectangle( hist_hue_img, (h * scalewidth, 0), ((h + 1) * scalewidth, (hue / sample_pixels) * scaleheight), color, cv.CV_FILLED, ) cv.cvLine( hist_hue_img, (0, scaleheight * hue_cutoff / sample_pixels), (h_bins * scalewidth, scaleheight * hue_cutoff / sample_pixels), (255, 0, 0), 1, ) highgui.cvShowImage("Histogram - Hue", hist_hue_img) cv.cvZero(hist_val_img) # val_min,val_max,min_loc,max_loc = cv.cvGetMinMaxHistValue(h_val) for v in xrange(v_bins):