def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) HOST, PORT = 'localhost', 5000 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((HOST, PORT)) while not self.quit: frame = cv.QueryFrame(self.capture) track_box = None self.update_hue(frame) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CalcArrBackProject([self.hue], backproject, hist) if self.track_window and is_rect_nonzero(self.track_window): camshift = cv.CamShift(backproject, self.track_window, STOP_CRITERIA) (iters, (area, value, rect), track_box) = camshift self.track_window = rect if self.drag_start and is_rect_nonzero(self.selection): self.draw_mouse_drag_area(frame) self.recompute_histogram(hist) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(0, 255, 255), 3, cv.CV_AA, 0) if track_box: self.update_message(track_box) sock.send(json.dumps(self.message) + "\n") self.draw_target(frame, track_box) self.update_windows(frame, backproject, hist) self.handle_keyboard_input() track_box = None
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False while True: frame = cv.QueryFrame(self.capture) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) print(self.hue) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) if not backproject_mode: #frame=cv.Flip(frame) cv.ShowImage("CamShiftDemo", frame) else: cv.ShowImage("CamShiftDemo", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) if c == 27: break elif c == ord("b"): backproject_mode = not backproject_mode
def process_rgb(self, dev, data, timestamp): #global keep_running # get an opencv version of video_cv data frame = frame_convert.video_cv(data) frame_size = cv.GetSize(frame) # Convert to HSV and keep the hue hsv = cv.CreateImage(frame_size, 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(frame_size, 8, 1) # split the image into different hues cv.Split(hsv, self.hue, None, None, None) # Compute back projection # Run the cam-shift backproject = cv.CreateImage(frame_size, 8, 1) cv.CalcArrBackProject([self.hue], backproject, self.hist) # if we have a tracking window... shift it # Track_window => (rectangle of approx hue) if self.track_window and is_rect_nonzero(self.track_window): # set criteria for backproject iter # compute back projections - shifting rectangle in # appropriate direction crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), self.track_box) = cv.CamShift(backproject, self.track_window, crit) # set track_window to the newly selected rectangle self.track_window = rect # if a section is being selected - set the histogram if self.debug: sel = self.dbg_rgb.check_for_selection(self.track_window, self.track_box) # sets the histogram if there is a selection if sel: self.set_hist(frame, sel) self.dbg_rgb.update(frame) #if self.track_window: # self.dbg_rgb.add_box(self.track_box) self.dbg_rgb.render() # Bail out if ESC is pushed key = cv.WaitKey(3) char = chr(key & 255) # k is for KILL if char == 'k': self.keep_running = False else: self.curr_classifier().respond_to_key(char)
def do_camshift(self, cv_image): """ Get the image size """ image_size = cv.GetSize(cv_image) image_width = image_size[0] image_height = image_size[1] """ Convert to HSV and keep the hue """ hsv = cv.CreateImage(image_size, 8, 3) cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(image_size, 8, 1) cv.Split(hsv, self.hue, None, None, None) """ Compute back projection """ backproject = cv.CreateImage(image_size, 8, 1) """ Run the cam-shift algorithm """ cv.CalcArrBackProject( [self.hue], backproject, self.hist ) if self.track_window and is_rect_nonzero(self.track_window): crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect """ If mouse is pressed, highlight the current selected rectangle and recompute the histogram """ if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(cv_image, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(cv_image, cv_image, 0.5) cv.Copy(save, sub) x,y,w,h = self.selection cv.Rectangle(cv_image, (x,y), (x+w,y+h), (255,255,255)) sel = cv.GetSubRect(self.hue, self.selection ) cv.CalcArrHist( [sel], self.hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist) if max_val != 0: cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox( cv_image, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) roi = RegionOfInterest() roi.x_offset = int(min(image_width, max(0, track_box[0][0] - track_box[1][0] / 2))) roi.y_offset = int(min(image_height, max(0, track_box[0][1] - track_box[1][1] / 2))) roi.width = int(track_box[1][0]) roi.height = int(track_box[1][1]) self.ROI.publish(roi) cv.ShowImage("Histogram", self.hue_histogram_as_image(self.hist)) if not self.backproject_mode: return cv_image else: return backproject
def setFrameIdx(self, frameIdx): self.frameIdx = frameIdx # Display the frame image = self.inputSequence.cameraImages[frameIdx] imageWidth = image.shape[1] imageHeight = image.shape[0] imageStep = imageWidth * 3 self.cameraImagePixBuf = gtk.gdk.pixbuf_new_from_data( image.tostring(), gtk.gdk.COLORSPACE_RGB, False, 8, imageWidth, imageHeight, imageStep) # Track gripper imageRGB = cv.CreateImageHeader((imageWidth, imageHeight), cv.IPL_DEPTH_8U, 3) cv.SetData(imageRGB, image.data, imageStep) imageRGB = cv.CloneImage(imageRGB) r_plane = cv.CreateMat(imageRGB.height, imageRGB.width, cv.CV_8UC1) g_plane = cv.CreateMat(imageRGB.height, imageRGB.width, cv.CV_8UC1) b_plane = cv.CreateMat(imageRGB.height, imageRGB.width, cv.CV_8UC1) cv.Split(imageRGB, r_plane, g_plane, b_plane, None) planes = [r_plane, g_plane, b_plane] backproject = cv.CreateImage(cv.GetSize(imageRGB), 8, 1) # Run the cam-shift cv.CalcArrBackProject(planes, backproject, self.gripperHistogram) #cv.Threshold( backproject, backproject, 1, 255, cv.CV_THRESH_BINARY ) cv.CvtColor(backproject, imageRGB, cv.CV_GRAY2RGB) #self.cameraImagePixBuf = gtk.gdk.pixbuf_new_from_data( #imageRGB.tostring(), #gtk.gdk.COLORSPACE_RGB, #False, #8, #imageRGB.width, #imageRGB.height, #imageRGB.width*3 ) # Resize the drawing area if necessary if self.dwgCameraImage.get_size_request() != (imageWidth, imageHeight): self.dwgCameraImage.set_size_request(imageWidth, imageHeight) self.dwgCameraImage.queue_draw()
def cameraImageCallback( self, rosImage ): if rosImage.encoding == "rgb8" or rosImage.encoding == "bgr8": # Create an OpenCV image to process the data curImage = cv.CreateImageHeader( ( rosImage.width, rosImage.height ), cv.IPL_DEPTH_8U, 3 ) cv.SetData( curImage, rosImage.data, rosImage.step ) curImageGray = cv.CreateImage( ( rosImage.width, rosImage.height ), cv.IPL_DEPTH_8U, 1 ) if rosImage.encoding == "bgr8": cv.CvtColor( curImage, curImageGray, cv.CV_BGR2GRAY ) else: cv.CvtColor( curImage, curImageGray, cv.CV_RGB2GRAY ) # Look for optical flow between this image and the last one self.opticalFlowX, self.opticalFlowY = self.opticalFlowFilter.calcOpticalFlow( curImageGray ) # Draw the optical flow if it's available if self.opticalFlowX != None and self.opticalFlowY != None: lineColor = cv.CV_RGB( 0, 255, 0 ) blockCentreY = self.OPTICAL_FLOW_BLOCK_HEIGHT / 2 for y in range( self.opticalFlowX.shape[ 0 ] ): blockCentreX = self.OPTICAL_FLOW_BLOCK_WIDTH / 2 for x in range( self.opticalFlowX.shape[ 1 ] ): endX = blockCentreX + cv.Get2D( self.opticalFlowX, y, x )[ 0 ] endY = blockCentreY + cv.Get2D( self.opticalFlowY, y, x )[ 0 ] cv.Line( curImage, ( int( blockCentreX ), int( blockCentreY ) ), ( int( endX ), int( endY ) ), lineColor ) blockCentreX += self.OPTICAL_FLOW_BLOCK_WIDTH blockCentreY += self.OPTICAL_FLOW_BLOCK_HEIGHT # Save the image imageBGR = cv.CreateImage( ( rosImage.width, rosImage.height ), cv.IPL_DEPTH_8U, 3 ) cv.CvtColor( curImage, imageBGR, cv.CV_RGB2BGR ) cv.SaveImage( "/home/abroun/VideoTemp/frame{0:05}.png".format( self.imageIdx ), imageBGR ) self.imageIdx += 1 self.lastImage = curImage return # Use CAMShift to track the gripper gripperProbabilityImage = None if self.gripperHistogram != None and self.gripperTrackWindow != None: imageRGB = cv.CloneImage( curImage ) r_plane = cv.CreateMat( imageRGB.height, imageRGB.width, cv.CV_8UC1 ) g_plane = cv.CreateMat( imageRGB.height, imageRGB.width, cv.CV_8UC1 ) b_plane = cv.CreateMat( imageRGB.height, imageRGB.width, cv.CV_8UC1 ) cv.Split( imageRGB, r_plane, g_plane, b_plane, None ) planes = [ r_plane, g_plane, b_plane ] backproject = cv.CreateImage(cv.GetSize(imageRGB), 8, 1) # Run the cam-shift try: cv.CalcArrBackProject( planes, backproject, self.gripperHistogram ) except e: print "Got", e print "self.gripperHistogram =", self.gripperHistogram if self.gripperTrackWindow[ 2 ] > 0 and self.gripperTrackWindow[ 3 ] > 0: crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.gripperTrackWindow, crit) self.gripperTrackWindow = rect #print self.gripperTrackWindow if self.checkShowGripperProbability.get_active(): #cv.Threshold( backproject, backproject, 1, 255, cv.CV_THRESH_BINARY ) cv.Threshold( backproject, backproject, 128, 255, cv.CV_THRESH_TOZERO ) cv.CvtColor( backproject, imageRGB, cv.CV_GRAY2RGB ) gripperProbabilityImage = imageRGB # Display the image if gripperProbabilityImage != None: self.cameraImagePixBuf = gtk.gdk.pixbuf_new_from_data( gripperProbabilityImage.tostring(), gtk.gdk.COLORSPACE_RGB, False, 8, gripperProbabilityImage.width, gripperProbabilityImage.height, gripperProbabilityImage.width*3 ) else: self.cameraImagePixBuf = gtk.gdk.pixbuf_new_from_data( rosImage.data, gtk.gdk.COLORSPACE_RGB, False, 8, rosImage.width, rosImage.height, rosImage.step ) # Resize the drawing area if necessary if self.dwgCameraImage.get_size_request() != ( rosImage.width, rosImage.height ): self.dwgCameraImage.set_size_request( rosImage.width, rosImage.height ) self.dwgCameraImage.queue_draw() self.lastImage = curImage # Check to see if we did everything fast enough #if self.dataBuffersSetup: #testSampleIdx = self.curSampleIdx #if self.opticalFlowSampleIdx == None: #self.opticalFlowSampleIdx = testSampleIdx #else: ## We expect the sample idx to have advanced by 1 since we were last here #sampleIdxDiff = testSampleIdx - self.opticalFlowSampleIdx #if sampleIdxDiff > 1: #print "Missed {0} samples".format( sampleIdxDiff - 1 ) #elif sampleIdxDiff < 1: #print "Not sampling fast enough" #self.opticalFlowSampleIdx = testSampleIdx else: rospy.logerr( "Unhandled image encoding - " + rosImage.encoding )
def OnIdle( self, ): """Request refresh of the context whenever idle. track, get position, update camera, then redraw""" hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 ) backproject_mode = False while True: frame = cv.QueryFrame(self.capture) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject( [self.hue], backproject, hist ) if self.track_window and is_rect_nonzero(self.track_window): crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x,y,w,h = self.selection cv.Rectangle(frame, (x,y), (x+w,y+h), (0,0,255)) sel = cv.GetSubRect(self.hue, self.selection ) cv.CalcArrHist( [sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue( hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) # find centroid coordinate (x,y) and area (z) selection_centroid = track_box[0] global xposition xposition = selection_centroid[0] global yposition yposition = selection_centroid[1] width_height = track_box[1] # writes output of coordinates to seed file if needed # with open('seed.txt', 'a') as f: # value = (xposition, yposition) # s = str(value) + '\n' # f.write(s) # # f.write('end_of_session') # f.close() # print outs print "x: " + str(xposition) print "y: " + str(yposition) selection_area = width_height[0]*width_height[1] # print "The width is: " + str(width_height[0]) + " The height is: " + str(width_height[1]) # print "centroid is: " + str(selection_centroid) # return "centroid is: " + str(selection_centroid) print "area: " + str(selection_area) # return "area is: " + str(selection_area) if not backproject_mode: cv.ShowImage( "CamShiftDemo", frame ) else: cv.ShowImage( "CamShiftDemo", backproject) cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(10) if c == 27: # escape key break elif c == ord("b"): # show backproject mode with "b" key backproject_mode = not backproject_mode self.triggerRedraw(1) return 1
def compute_camshift_centroids(img, selection, hist): # capture = cv.CaptureFromCAM(0) # frame = cv.QueryFrame(capture) # isolate_hue # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(img), 8, 3) cv.CvtColor(img, hsv, cv.CV_BGR2HSV) hue = cv.CreateImage(cv.GetSize(img), 8, 1) cv.Split(hsv, hue, None, None, None) # end isolate_hue # Compute back projection backproject = cv.CreateImage(cv.GetSize(img), 8, 1) # end compute back projection # highlight the current selected rectangle and recompute the histogram # w = xmax - xmin # h = ymax - ymax # xmin,ymin,w,h = selection #cv.Rectangle(img, (selection[0],selection[1]), (selection[2], selection[3]), (0,0,255)) # end highlight # Run the camshift cv.CalcArrBackProject([hue], backproject, hist) # end run camshift # draw ellipse """" print "selection is" + str(selection) if selection: #and is_rect_nonzero(selection): # (iteration criteria, max, min) crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, selection, crit) selection = rect # treats selection as independent array sel = cv.GetSubRect(hue, selection) # calculate histogram for array cv.CalcArrHist( [sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue( hist) if max_val != 0: #cv.ConvertScale(src, dst, scale=1.0, shift=0.0) cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif selection: """ crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, selection, crit) #and is_rect_nonzero(selection): #cv.EllipseBox(img, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) # end draw ellipse # find centroid coordinate (x,y) and area (z) selection_centroid = track_box[0] xposition = selection_centroid[0] yposition = selection_centroid[1] width_height = track_box[1] #print selection rect = (int(xposition - width_height[0] / float(2)), int(yposition - width_height[1] / float(2)), int(width_height[0]), int(width_height[1])) #print rect # selection_area = width_height[0]*width_height[1] face_centroid_camshift = (xposition, yposition) if not is_rect_nonzero(rect): return None, None return face_centroid_camshift, rect
def backproject(self): """ do a backprojection of feature histogram on full image """ cv.CalcArrBackProject([self.hue, self.sat], self.bp, self.feature_hist) return self.bp
def camshift_track(roi_selection, img): """ setup_camshift: - is_rect_nonzero(r) - hue_histogram_as_image(hist) - making_selection(roi_selection) - create_hist() - placeholder() compute_camshift_centroid: - isolate_hue - compute_back_projection - recompute_histogram(roi_selection) - run_camshift - draw ellipse - find_centroid(track_box) - show_hist """ # setup_camshift def is_rect_nonzero(r): (_, _, w, h) = r return (w > 0) and (h > 0) def hue_histogram_as_image(hist): """ Returns representation of a hue histogram """ histimg_hsv = cv.CreateImage((320, 200), 8, 3) mybins = cv.CloneMatND(hist.bins) cv.Log(mybins, mybins) (_, hi, _, _) = cv.MinMaxLoc(mybins) cv.ConvertScale(mybins, mybins, 255. / hi) w, h = cv.GetSize(histimg_hsv) hdims = cv.GetDims(mybins)[0] for x in range(w): xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image val = int(mybins[int(hdims * x / w)] * h / 255) cv.Rectangle(histimg_hsv, (x, 0), (x, h - val), (xh, 255, 64), -1) cv.Rectangle(histimg_hsv, (x, h - val), (x, h), (xh, 255, 255), -1) histimg = cv.CreateImage((320, 200), 8, 3) cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR) return histimg # making_selection # print roi_selection[0] # print roi_selection[1] point1 = roi_selection[0] point2 = roi_selection[1] xmin = point1[0] ymin = point1[1] xmax = point2[0] ymax = point2[1] widthx = xmax - xmin heighty = ymax - ymin selection = (xmin, ymin, widthx, heighty) # end of making_selection # create_hist() hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) # end of create_hist # placeholder() backproject_mode = False # end of placeholder while True: # capture = cv.CaptureFromCAM(0) # frame = cv.QueryFrame(capture) # isolate_hue # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, hue, None, None, None) # end isolate_hue # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # end compute back projection # highlight the current selected rectangle and recompute the histogram w = xmax - xmin h = ymax - ymax xmin, ymin, w, h = selection cv.Rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255)) # end highlight # Run the camshift cv.CalcArrBackProject([hue], backproject, hist) # end run camshift # draw ellipse print "selection is" + str(selection) if selection and is_rect_nonzero(selection): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, selection, crit) selection = rect sel = cv.GetSubRect(hue, selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif selection and is_rect_nonzero(selection): cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) # end draw ellipse # find centroid coordinate (x,y) and area (z) selection_centroid = track_box[0] xposition = selection_centroid[0] yposition = selection_centroid[1] width_height = track_box[1] # selection_area = width_height[0]*width_height[1] face_centroid_camshift = (xposition, yposition) # return face_centroid_camshift # end find centroid # show hist if backproject_mode: # cv.ShowImage( "CamShiftDemo", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) # end show hist print "I made it this far" # print face_centroid_camshift return face_centroid_camshift
def pipeline(self): presentation = [] self.orig = self.source.grab_frame() cv.Resize(self.orig, self.small) cv.CvtColor(self.small, self.hsv, cv.CV_BGR2HSV) cv.Split(self.hsv, self.hue, self.sat, self.bw, None) cv.Copy(self.small, self.visualize) presentation.append((self.visualize, 'input')) face = self.find_face(self.small) if face: sub_face = self.face_region(face, FACE_BORDER) self.update_histogram(sub_face) self.draw_face(self.visualize, face, sub_face) hue_bg = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1) sat_bg = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1) (x, y, w, h) = face cv.Copy(self.hue, hue_bg) cv.Copy(self.sat, sat_bg) cv.Rectangle(hue_bg, (x, y), (x + w, y + h), 0, cv.CV_FILLED) cv.Rectangle(sat_bg, (x, y), (x + w, y + h), 0, cv.CV_FILLED) cv.CalcArrHist([self.hue, self.sat], self.bg_hist, 1) cv.NormalizeHist(self.bg_hist, 255) bp_bg = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1) cv.CalcArrBackProject([self.hue, self.sat], bp_bg, self.bg_hist) self.normalize(bp_bg) presentation.append((bp_bg, 'background bp')) bp = self.backproject() presentation.append((bp, 'forground bp')) self.normalize(bp) compare = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1) compare_th = cv.CreateImage(self.smallsize, cv.IPL_DEPTH_8U, 1) cv.Cmp(bp, bp_bg, compare, cv.CV_CMP_GT) #cv.AddS(bp_bg, 1, bp_bg) #cv.Div(bp, bp_bg, compare) self.normalize(compare) cv.Threshold(compare, compare_th, self.threshold_value, 255, cv.CV_THRESH_BINARY) presentation.append((compare_th, 'compare')) th = self.threshold(bp) presentation.append((th, 'normal th')) morphed = self.morphology(th) # make dark copy of original cv.Copy(self.small, self.result) cv.ConvertScale(self.result, self.result, 0.2) cv.Copy(self.small, self.result, morphed) contours = self.find_contours(morphed) self.draw_contours(self.result, contours) limbs = self.find_limbs(contours) limbs = self.sort_limbs(limbs) self.draw_limbs(self.result, limbs) presentation.append((self.result, 'result')) self.make_sound(limbs) # combine and show the results combined = self.combine_images(presentation) if face: sub_face = self.face_region(face, FACE_BORDER) self.draw_face(self.result, face, sub_face) cv.ShowImage('Skin Detection', combined) if STORE: cv.WriteFrame(self.writer, self.combined)
cv.SetImageROI(frameH, faceSubRect) cv.SetImageROI(frameS, faceSubRect) cv.CalcArrHist([frameH, frameS], hist, 0) # turn this on or off: #cv.NormalizeHist(hist, 1) cv.NormalizeHist(hist, 5000) cv.ResetImageROI(frameH) cv.ResetImageROI(frameS) #convert histogram to image histImg = cv.GetMat(hist.bins, True) #make backprojection cv.CalcArrBackProject([frameH, frameS], frameBP, hist) cv.Normalize(frameBP, frameBP, 0, 255, 32) cv.Smooth( frameBP, frameBlur, param1=31); cv.Threshold(frameBlur, frameTh, 30, 255, cv.CV_THRESH_BINARY); cv.Threshold(frameBP, frameThNoBlur, 30, 255, cv.CV_THRESH_BINARY); cv.AdaptiveThreshold(frameBlur, frameAdapTh, 255, blockSize=101, adaptive_method=cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C) # do morhphological close operation dia=15 center=(dia/2)+1 element = cv.CreateStructuringElementEx(dia, dia, center, center, cv.CV_SHAPE_ELLIPSE) cv.MorphologyEx(frameTh, frameClosed, temp, element, cv.CV_MOP_CLOSE) #cv.Dilate(frameTh, frameClosed, element, 1)