def detectObject(image): grayscale = cv.cvCreateImage(size, 8, 1) cv.cvFlip(image, None, 1) cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY) storage = cv.cvCreateMemStorage(0) cv.cvClearMemStorage(storage) cv.cvEqualizeHist(grayscale, grayscale) cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1,1)) objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(100,100)) # Draw dots where hands are if objects: for i in objects: #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)), # cv.cvPoint(int(i.x+i.width), int(i.y+i.height)), # cv.CV_RGB(0,255,0), 3, 8, 0) center = cv.cvPoint(int(i.x+i.width/2), int(i.y+i.height/2)) cv.cvCircle(image, center, 10, cv.CV_RGB(0,0,0), 5,8, 0) # Left side check if center.x > box_forward_left[0].x and center.x < box_backwards_left[1].x and center.y > box_forward_left[0].y and center.y < box_backwards_left[1].y: set_speed('left', center) # Right side check if center.x > box_forward_right[0].x and center.x < box_backwards_right[1].x and center.y > box_forward_right[0].y and center.y < box_backwards_right[1].y: set_speed('right', center)
def detectObject(image): grayscale = cv.cvCreateImage(size, 8, 1) cv.cvFlip(image, None, 1) cv.cvCvtColor(image, grayscale, cv.CV_BGR2GRAY) storage = cv.cvCreateMemStorage(0) cv.cvClearMemStorage(storage) cv.cvEqualizeHist(grayscale, grayscale) cascade = cv.cvLoadHaarClassifierCascade(haar_file, cv.cvSize(1, 1)) objects = cv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, cv.cvSize(100, 100)) # Draw dots where hands are if objects: for i in objects: #cv.cvRectangle(image, cv.cvPoint( int(i.x), int(i.y)), # cv.cvPoint(int(i.x+i.width), int(i.y+i.height)), # cv.CV_RGB(0,255,0), 3, 8, 0) center = cv.cvPoint(int(i.x + i.width / 2), int(i.y + i.height / 2)) cv.cvCircle(image, center, 10, cv.CV_RGB(0, 0, 0), 5, 8, 0) # Left side check if center.x > box_forward_left[ 0].x and center.x < box_backwards_left[ 1].x and center.y > box_forward_left[ 0].y and center.y < box_backwards_left[1].y: set_speed('left', center) # Right side check if center.x > box_forward_right[ 0].x and center.x < box_backwards_right[ 1].x and center.y > box_forward_right[ 0].y and center.y < box_backwards_right[1].y: set_speed('right', center)
def flip(self, flip): """ Flips the image Arguments: - self: The main object pointer. - flip: Dictionary with keys "hor" and "ver" with values True/False. """ if "hor" or "both" in flip: cv.cvFlip( self.__image, self.__image, 1) if "ver" or "both" in flip: cv.cvFlip( self.__image, self.__image, 0) return self.__image
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data #img = self.detect_face(frame_copy) img = self.detect_squares(frame_copy) img_pil = adaptors.Ipl2PIL(img) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data #img = self.detect_face(frame_copy) img = self.detect_squares(frame_copy) img_pil = adaptors.Ipl2PIL(img) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
hsv = cv.cvCreateImage (frame_size, 8, 3 ) # create the histogram hist = cv.cvCreateHist ([hdims], cv.CV_HIST_ARRAY, hranges, 1) while 1: # do forever # 1. capture the current image frame = highgui.cvQueryFrame (capture) if frame is None: # no image captured... end the processing break # mirror the captured image cv.cvFlip (frame, None, 1) # compute the hsv version of the image cv.cvCvtColor (frame, hsv, cv.CV_BGR2HSV) # compute which pixels are in the wanted range cv.cvInRangeS (hsv, hsv_min, hsv_max, mask) # extract the hue from the hsv array cv.cvSplit (hsv, hue, None, None, None) # select the rectangle of interest in the hue/mask arrays hue_roi = cv.cvGetSubRect (hue, selection) mask_roi = cv.cvGetSubRect (mask, selection) # it's time to compute the histogram
my_grayscale = cv.cvCreateImage(frame_size, 8, 1) mask = cv.cvCreateImage(frame_size, 8, 1) cv.cvSet(mask, 1) blob_overlay = False while True: # 1. capture the current image frame = highgui.cvQueryFrame(capture) if frame is None: # no image captured... end the processing break # mirror the captured image cv.cvFlip(frame, None, 1) cv.cvCvtColor(frame, my_grayscale, cv.CV_RGB2GRAY) cv.cvThreshold(my_grayscale, my_grayscale, 128, 255, cv.CV_THRESH_BINARY) if not blob_overlay: # Convert black-and-white version back into three-color representation cv.cvCvtColor(my_grayscale, frame, cv.CV_GRAY2RGB) myblobs = CBlobResult(my_grayscale, mask, 100, True) myblobs.filter_blobs(10, 10000) blob_count = myblobs.GetNumBlobs() for i in range(blob_count):
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width,img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255,255,0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle(img_orig, cv.cvPoint(cv.cvRound(circle[0]),cv.cvRound(circle[1])),cv.cvRound(circle[2]),cv.CV_RGB(0,0,255),3,8,0) # Draw squares if squares: i = 0 while i<squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i+1]) pt.append(squares[i+2]) pt.append(squares[i+3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0,255,0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(),self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def read(self): '''Flip the cvMat obtained from input using the flip_mode setting''' flip=self.input.read() if self.enabled: cv.cvFlip(flip,None,self.flip_mode) return flip
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle( img_orig, cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])), cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0) # Draw squares if squares: i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
if frame is None: # no image captured... end the processing break if image is None: # create the images we need image = cv.cvCreateImage (cv.cvGetSize (frame), 8, 3) image.origin = frame.origin hsv = cv.cvCreateImage( cv.cvGetSize(frame), 8, 3 ) hue = cv.cvCreateImage( cv.cvGetSize(frame), 8, 1 ) mask = cv.cvCreateImage( cv.cvGetSize(frame), 8, 1 ) backproject = cv.cvCreateImage( cv.cvGetSize(frame), 8, 1 ) hist = cv.cvCreateHist( [hdims], cv.CV_HIST_ARRAY, hranges, 1 ) # flip the image cv.cvFlip (frame, image, 1) cv.cvCvtColor( image, hsv, cv.CV_BGR2HSV) cv.cvLine(image, cv.cvPoint(0, image.height/2), cv.cvPoint(image.width, image.height/2), cv.CV_RGB(0,255,0), 2, 8, 0 ) cv.cvLine(image, cv.cvPoint(image.width/2, 0), cv.cvPoint(image.width/2, image.height), cv.CV_RGB(0,255,0), 2, 8, 0 ) if track_object: _vmin = vmin _vmax = vmax cv.cvInRangeS( hsv, cv.cvScalar( 0, smin,min(_vmin,_vmax),0),
highgui.cvNamedWindow (win_name, highgui.CV_WINDOW_AUTOSIZE) # create the trackbar highgui.cvCreateTrackbar (trackbar_name, win_name, 1, 100, on_trackbar) # show the image on_trackbar (0) frame_copy = None while True: image = highgui.cvQueryFrame( capture ); if( not image ): break; if( not frame_copy ): frame_copy = cv.cvCreateImage( cv.cvSize(image.width,image.height), cv.IPL_DEPTH_8U, image.nChannels ); if( image.origin == cv.IPL_ORIGIN_TL ): cv.cvCopy( image, frame_copy ); else: cv.cvFlip( image, frame_copy, 0 ); gray = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 1) edge = cv.cvCreateImage (cv.cvSize (image.width, image.height), 8, 1) cv.cvCvtColor (image, gray, cv.CV_BGR2GRAY) highgui.cvShowImage (win_name, col_edge) cv.cvFlip( image, frame_copy, 0 ); if( highgui.cvWaitKey( 10 ) >= 0 ): break;
if frame is None: # no image captured... end the processing break if image is None: # create the images we need image = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) image.origin = frame.origin hsv = cv.cvCreateImage(cv.cvGetSize(frame), 8, 3) hue = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) mask = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) backproject = cv.cvCreateImage(cv.cvGetSize(frame), 8, 1) hist = cv.cvCreateHist([hdims], cv.CV_HIST_ARRAY, hranges, 1) # flip the image cv.cvFlip(frame, image, 1) cv.cvCvtColor(image, hsv, cv.CV_BGR2HSV) cv.cvLine(image, cv.cvPoint(0, image.height / 2), cv.cvPoint(image.width, image.height / 2), cv.CV_RGB(0, 255, 0), 2, 8, 0) cv.cvLine(image, cv.cvPoint(image.width / 2, 0), cv.cvPoint(image.width / 2, image.height), cv.CV_RGB(0, 255, 0), 2, 8, 0) if track_object: _vmin = vmin _vmax = vmax