def detectLeftEye(self, originalImage, cascade2, pt1, centerX, centerY): leftEyeArea = cv.GetSubRect(originalImage, (pt1[0], pt1[1], centerX - pt1[0], centerY - pt1[1])) leftEye = cv.HaarDetectObjects(leftEyeArea, cascade2, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # in case of multiple find the maximum box minArea = 0 pt3 = (0, 0) pt4 = (0, 0) if leftEye: for ((x, y, w, h), n) in leftEye: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints if(w * h > minArea): minArea = w * h pt3 = (x, y) pt4 = (x + w, y + h) if(minArea > 0): cv.Rectangle(originalImage, (pt1[0] + pt3[0], pt1[1] + pt3[1]), (pt1[0] + pt4[0], pt1[1] + pt4[1]), cv.RGB(255, 255, 0)) pointX = pt1[0] + pt3[0] + 10 pointY = pt1[1] + pt3[1] + 10 distanceX = pt4[0] - pt3[0] - 10 distanceY = pt4[1] - pt3[1] - 10 eyePart = cv.GetSubRect(originalImage, (pointX, pointY, distanceX, distanceY))
def __init__(self, internalResolutionX, internalResolutionY, numCameras, configHolder): self._internalResolutionX = internalResolutionX self._internalResolutionY = internalResolutionY self._videoDir = configHolder.getVideoDir() self._selectedCameraId = 0 self._currentNumCameras = numCameras self._miniSizeX = self._internalResolutionX / 5 self._miniSizeY = self._internalResolutionY / 5 self._numMiniRows = int(self._internalResolutionY / self._miniSizeY) self._numMiniColumns = 1 + int(numCameras / self._numMiniRows) self._maxImages = self._numMiniColumns * self._numMiniRows self._miniAreaWidth = self._numMiniColumns * self._miniSizeX self._bigAreaWidth = self._internalResolutionX - self._miniAreaWidth self._bigAreaHeight = int((float(self._bigAreaWidth) / self._internalResolutionX) * self._internalResolutionY) self._bigAreaTop = int((self._internalResolutionY - self._bigAreaHeight) / 2) self._mixMat = createMat(self._internalResolutionX, self._internalResolutionY) self._convertedMat = createMat(self._internalResolutionX, self._internalResolutionY) cv.SetZero(self._mixMat) self._bigRegion = cv.GetSubRect(self._mixMat, (0, self._bigAreaTop, self._bigAreaWidth, self._bigAreaHeight)) self._smallImageAreaList = [] self._cameraBaseFileNameList = [] for i in range(self._maxImages): columnId = int(i / self._numMiniRows) xpos = self._bigAreaWidth + (columnId * self._miniSizeX) ypos = int(i % self._numMiniRows) * self._miniSizeY smallRegion = cv.GetSubRect(self._mixMat, (xpos, ypos, self._miniSizeX, self._miniSizeY)) self._smallImageAreaList.append(smallRegion) self._cameraBaseFileNameList.append("cam" + str(i) + "_") self._debugCounter = 0
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = True while True: frame = cv.QueryFrame(self.capture) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.CalcArrBackProject([self.hue], backproject, hist) # Run the cam-shift (if the a window is set and != 0) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) #Call the camshift !! self.track_window = rect #Put the current rectangle as the tracked area # If mouse is pressed, highlight the current selected rectangle and recompute histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) #Get specified area #Make the effect of background shadow when selecting a window save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) #Draw temporary rectangle x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) #Take the same area but in hue image to calculate histogram sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) #Used to rescale the histogram with the max value (to draw it later on) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero( self.track_window): #If window set draw an elipseBox cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) cv.ShowImage("CamShiftDemo", frame) cv.ShowImage("Backprojection", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: break
def update_mhi(img, dst, diff_threshold): global last global mhi global storage global mask global orient global segmask timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds size = cv.GetSize(img) # get current frame size idx1 = last if not mhi or cv.GetSize(mhi) != size: for i in range(N): buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1) cv.Zero(buf[i]) mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) cv.Zero(mhi) # clear MHI at the beginning orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1) mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1) cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale idx2 = (last + 1) % N # index of (last - (N-1))th frame last = idx2 silh = buf[idx2] cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI cv.CvtScale(mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION) cv.Zero(dst) cv.Merge(mask, None, None, None, dst) cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3) if not storage: storage = cv.CreateMemStorage(0) seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA) for (area, value, comp_rect) in seq: if comp_rect[2] + comp_rect[3] > 100: # reject very small components color = cv.CV_RGB(255, 0,0) silh_roi = cv.GetSubRect(silh, comp_rect) mhi_roi = cv.GetSubRect(mhi, comp_rect) orient_roi = cv.GetSubRect(orient, comp_rect) mask_roi = cv.GetSubRect(mask, comp_rect) angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION) count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI if count < (comp_rect[2] * comp_rect[3] * 0.05): continue magnitude = 30. center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2)) cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0) cv.Line(dst, center, (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)), cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))), color, 3, cv.CV_AA, 0)
def run(self): hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1) backproject_mode = False while True: frame = 0 frame = self.capture #cv.QueryFrame( self.capture ) # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, hist) # if self.track_window and is_rect_nonzero(self.track_window): # crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) # (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) # self.track_window = rect # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) #cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(hist) if max_val != 0: cv.ConvertScale(hist.bins, hist.bins, 255. / max_val) # elif self.track_window and is_rect_nonzero(self.track_window): # cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 ) if not backproject_mode: cv.ShowImage("SelectROI", frame) else: cv.ShowImage("SelectROI", backproject) cv.ShowImage("Histogram", self.hue_histogram_as_image(hist)) c = cv.WaitKey(7) % 0x100 if c == 27: f = open('newtree.yaml', "w") yaml.dump(self.selection, f) f.close() break elif c == ord("b"): backproject_mode = not backproject_mode
def detect_and_draw(self, imgmsg): if self.pause: return # frame = cv.QueryFrame( self.capture ) frame = self.br.imgmsg_to_cv(imgmsg, "bgr8") # Convert to HSV and keep the hue hsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, hsv, cv.CV_BGR2HSV) self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1) cv.Split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv.CreateImage(cv.GetSize(frame), 8, 1) # Run the cam-shift cv.CalcArrBackProject([self.hue], backproject, self.hist) if self.track_window and is_rect_nonzero(self.track_window): crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit) self.track_window = rect x, y, w, h = rect self.bbpub.publish(RegionOfInterest(x, y, w, h, False)) proba_msg = self.br.cv_to_imgmsg(backproject) proba_msg.header = imgmsg.header self.bppub.publish(proba_msg) # If mouse is pressed, highlight the current selected rectangle # and recompute the histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv.GetSubRect(frame, self.selection) save = cv.CloneMat(sub) cv.ConvertScale(frame, frame, 0.5) cv.Copy(save, sub) x, y, w, h = self.selection cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255)) sel = cv.GetSubRect(self.hue, self.selection) cv.CalcArrHist([sel], self.hist, 0) (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist) if max_val != 0: cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0) self.frame = frame self.backproject = backproject
def scanner_procces(self, frame, set_zbar): codigo = 0 set_width = 100.0 / 100 set_height = 90.0 / 100 coord_x = int(frame.width * (1 - set_width) / 2) coord_y = int(frame.height * (1 - set_height) / 2) width = int(frame.width * set_width) height = int(frame.height * set_height) get_sub = cv.GetSubRect( frame, (coord_x + 1, coord_y + 1, width - 1, height - 1)) cv.Rectangle(frame, (coord_x, coord_y), (coord_x + width, coord_y + height), (255, 0, 0)) cm_im = cv.CreateImage((get_sub.width, get_sub.height), cv.IPL_DEPTH_8U, 1) cv.ConvertImage(get_sub, cm_im) image = zbar.Image(cm_im.width, cm_im.height, 'Y800', cm_im.tostring()) set_zbar.scan(image) for symbol in image: print '\033[1;32mResult : %s symbol "%s" \033[1;m' % (symbol.type, symbol.data) global codigo codigo = symbol.data cv.ShowImage("webcame", frame) #cv.ShowImage("webcame2", get_sub) cv.WaitKey(500) return codigo
def eyeRemove(self, region): """ Crops an eye from the facePhoto and returns it as a seperate photo This method takes in a region which is interpreted to be a region representing and eye and crops the eye out. It then returns the cropped photo Args: region region - a region representing the eye Return: cv2.cv.cvmat eyePhoto - a photo of just the eye """ # really takes in four points per region crop = (region[0],region[1], region[2] - region[0], region[3] - region[1]) if DEBUG: print "Region passed to eye remove: " + str(region) print "And here's crop: " + str(crop) print "Before crop we have type: " + str(type(self.facePhoto)) print self.facePhoto cv.ShowImage("We're cropping", self.facePhoto) cv.WaitKey(0) cv.DestroyWindow("We're cropping") eye = cv.GetSubRect(self.facePhoto, crop) #eye = face.crop(region) if DEBUG: print "After crop we have type: " + str(type(eye)) cv.ShowImage("Cropped", eye) cv.WaitKey(0) cv.DestroyWindow("Cropped") return eye
def resize_crop_img(img, x, y, w, h): dest_size = 128 img_o = cv.GetSubRect(img, (x, y, w, h)) img_r = cv.CreateImage((dest_size, dest_size), 8, 1) cv.Resize(img_o, img_r) return img_r
def detectRightEye(self, img, rightEyeArea, centerX, centerY, pt1, cascade2): rightEye = cv.HaarDetectObjects(rightEyeArea, cascade2, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # in case of multiple find the maximum box minArea = 0 pt3 = (0, 0) pt4 = (0, 0) if rightEye: for ((x, y, w, h), n) in rightEye: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints if(w * h > minArea): minArea = w * h pt3 = (x, y) pt4 = (x + w, y + h) # pt3 = (int(x * image_scale), int(y * image_scale)) # pt4 = (int((x + w) * image_scale), int((y + h) * image_scale)) # print "point 3 " + str(pt3) # print "point 4 " + str(pt4) # # cv.Rectangle(img, (centerX + pt3[0], pt1[1] + pt3[1]),(centerX + pt4[0], pt1[1] + pt4[1]), cv.RGB(0, 255, 255)) # cv.Rectangle(img, pt3, pt4, cv.RGB(0, 0, 255)) if(minArea > 0): cv.Rectangle(img, (centerX + pt3[0], pt1[1] + pt3[1]), (centerX + pt4[0], pt1[1] + pt4[1]), cv.RGB(0, 255, 255)) pointX = centerX + pt3[0] + 10 pointY = pt1[1] + pt3[1] + 10 distanceX = pt4[0] - pt3[0] - 10 distanceY = pt4[1] - pt3[1] - 10 eyePart = cv.GetSubRect(img, (pointX, pointY, distanceX, distanceY))
class FaceDetect: def __init__(self, DEBUG=False): # Constructor for the videocapture, 0 because there is only one webcam connected # instead of 0, a video file could be passed as parameter self.vidCap = cv2.VideoCapture(0) #self.vidCap = cv2.VideoCapture(VIDEO_FILE) self.DEBUG = DEBUG print 'FaceDetect : DEBUG set to', self.DEBUG def getFrame(self): _, frame = self.vidCap.read( ) # tuple contains a return value and image return frame def detectFaces(self, img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # image to grayscale img = cv2.equalizeHist(img) # image equalized # cascade classifier for object detectio # HAAR_FRONTAL_FACE is the path to the file from which the classifier is loaded casClassif = cv2.CascadeClassifier(HAAR_FRONTAL_FACE) rectangles = casClassif.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=cv.CV_HAAR_SCALE_IMAGE) if len(rectangles) == 0: return if len(rectangles) > 1: for i in range(0, len(rectangles)): re = rectangles[i] if self.DEBUG: print re re[:, :2] += re[:, 2] rectangles[i] = re else: rectangles[:, :2] += rectangles[:, 2] return rectangles def cropRectangle(self, (x1, y1), (x2, y2), original): width = x2 - x1 height = y2 - y1 size = (width, height) if self.DEBUG: print 'W:', width, 'H:', height cropped = cv.CreateImage( (size), 8, 3) # CreateImage( CvSize size, int depth, int channels ) src_region = cv.GetSubRect( cv.fromarray(original), (x1, y1, width, height)) # GetSubRect( img, (pos_left, pos_top, width, height) ) cv.Copy(src_region, cropped) cropped = np.asarray(cropped[:, :]) return cropped
def test(): #start = time.time() # src1 = cv.LoadImage("all.png", 0) src2 = cv.LoadImage("dark3.png", 0) # crop area w = 100 pt1 = (535, 60) pt2 = (pt1[0] + w, pt1[1] + w) print pt1 print pt2 #get img size and compare # convert cvMat to IplImage crop1 = src1[pt1[1]:pt2[1], pt1[0]:pt2[0]] crop2 = src2[pt1[1]:pt2[1], pt1[0]:pt2[0]] crop3 = cv.GetImage(cv.GetSubRect(src1, (10, 10, 100, 100))) crop4 = cv.GetImage(cv.GetSubRect(src2, (10, 10, 100, 100))) # save image cv.SaveImage("c01.jpg", crop1) cv.SaveImage("c02.jpg", crop2) cv.SaveImage("c03.jpg", crop3) cv.SaveImage("c04.jpg", crop4) print type(src1) print type(crop3) #cv2.GetMat # compute #return 1 hist1 = compute_histogram(crop3) hist2 = compute_histogram(crop4) # compare sc = cv.CompareHist(hist1, hist2, cv.CV_COMP_CHISQR) print sc
def pupilRemove(image, region): """ Crops the eye photo to show only the pupil and then returns it. Args: tuple region - the coordinates of the pupil circle in the form (centerX, centerY, radius) Return: photo - TODO: I'm not sure of the type """ # Converting to (topLeftX, topLeftY, width, length) if region[0] - region[2] < 0: topLeftX = 0 else: topLeftX = region[0] - region[2] if region[1] - region[2] < 0: topLeftY = 0 else: topLeftY = region[1] - region[2] if region[2] < 0: width = 0 else: width = region[2] + region[2] if region[2] < 0: length = 0 else: length = region[2] + region[2] crop = (topLeftX, topLeftY, width, length) if DEBUG: print "Region passed to pupil remove: " + str(region) print "And here's crop: " + str(crop) print "Before crop we have type: " + str(type(image)) print image cv.ShowImage("We're cropping", image) cv.WaitKey(0) cv.DestroyWindow("We're cropping") if crop[0] < 0: crop[0] = 0 if crop[1] < 0: crop[1] = 0 if crop[2] < 0: crop[2] = abs(crop[2]) else: pupil = cv.GetSubRect(image, crop) if DEBUG: print "After crop we have type: " + str(type(pupil)) cv.ShowImage("Cropped", pupil) cv.WaitKey(0) cv.DestroyWindow("Cropped") return pupil return None
def DetectRedEyes(image, faceCascade, eyeCascade): min_size = (20,20) image_scale = 2 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Allocate the temporary images gray = cv.CreateImage((image.width, image.height), 8, 1) smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1) # Convert color input image to grayscale cv.CvtColor(image, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # If faces are found if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2))) cv.SetImageROI(image, (pt1[0], pt1[1], pt2[0] - pt1[0], int((pt2[1] - pt1[1]) * 0.7))) eyes = cv.HaarDetectObjects(image, eyeCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, (15,15)) if eyes: # For each eye found for eye in eyes: # Draw a rectangle around the eye cv.Rectangle(image, (eye[0][0], eye[0][1]), (eye[0][0] + eye[0][2], eye[0][1] + eye[0][3]), cv.RGB(255, 0, 0), 1, 8, 0) cv.ResetImageROI(image) return image
def extractEyeBrows(originalImage, pt1, centerX, centerY, eyeBallParams): (eyeBallCenterX, eyeBallCenterY, eyeBallRadius) = eyeBallParams # find good features # eig_image = cv.CreateMat(gray_im.rows, gray_im.cols, cv.CV_32FC1) # temp_image = cv.CreateMat(gray_im.rows, gray_im.cols, cv.CV_32FC1) # for (x,y) in cv.GoodFeaturesToTrack(gray_im, eig_image, temp_image, 10, 0.04, 1.0, useHarris = True): # print "good feature at", x,y # cv.Rectangle(img, (int(x), int(y)),(int(x) + 20, int(y) + 20), cv.RGB(255, 255, 255)) #find color of the skin #prepare histogram eyebrow_Area = cv.GetSubRect( originalImage, (int(pt1[0] * 1.1), int( pt1[1] * 1.2), centerX - pt1[0], int((centerY - pt1[1]) * 0.6))) eyebrow_Area2 = cv.CloneMat(eyebrow_Area) cv.Smooth(eyebrow_Area2, eyebrow_Area2, cv.CV_GAUSSIAN, 9, 1) hsv_image = cv.CreateMat(eyebrow_Area.height, eyebrow_Area.width, cv.CV_8UC3) imageArray = np.asarray(eyebrow_Area2, dtype=np.uint8) hsv_image = cv2.cvtColor(imageArray, cv2.COLOR_BGR2HSV) # histogram2 = hs_histogram(leftEyeArea) # print(histogram2) # imageArray2 = np.asarray(histogram2, dtype=np.uint8) # cv2.imshow("histo " , histogram2) # #dark = imageArray[...,2] < 32 #set not frequent to dark #imageArray[dark] = 0 #histogram = cv.CreateHist(2, cv.CV_HIST_ARRAY) histogram = cv2.calcHist([hsv_image], [0, 1], None, [180, 256], [0, 180, 0, 256]) h1 = np.clip(histogram * 0.005 * hist_scale, 0, 1) vis = hsv_map * h1[:, :, np.newaxis] / 255.0 #print type(vis) #cv2.imshow('hist', vis) #backproj = None #cv.CalcBackProject(hsv_image, backproj, histogram) ranges = [0, 180, 0, 256] backproj = cv2.calcBackProject([hsv_image], [0, 1], histogram, ranges, 10) cv2.imshow("back proj ", backproj)
def get_predator_distance(self, bb, depth): self.logger.debug("Bounding Box: " + str(bb)) if bb[0] < 0: bb[0] = 0 if bb[2] >= self.res['width']: bb[2] = self.res['width'] - 1 if bb[1] < 0: bb[1] = 0 if bb[3] >= self.res['height']: bb[3] = self.res['height'] - 1 dist_rect = cv.CreateImage((bb[2] - bb[0], bb[3] - bb[1]), cv.IPL_DEPTH_8U, 1) dist_rect = cv.GetSubRect(depth, (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1])) return cv.Avg(dist_rect)[0]
def findImageEx(self, source, x, y, width, height): hdc = win32gui.GetWindowDC(self.hwnd) dc_obj = win32ui.CreateDCFromHandle(hdc) memorydc = dc_obj.CreateCompatibleDC() data_bitmap = win32ui.CreateBitmap() data_bitmap.CreateCompatibleBitmap(dc_obj, self.width, self.height) memorydc.SelectObject(data_bitmap) memorydc.BitBlt((0, 0), (self.width, self.height), dc_obj, (self.dx, self.dy), win32con.SRCCOPY) bmpheader = struct.pack("LHHHH", struct.calcsize("LHHHH"), self.width, self.height, 1, 24) c_bmpheader = ctypes.create_string_buffer(bmpheader) # padded_length = (string_length + 3) & -3 for 4-byte aligned. c_bits = ctypes.create_string_buffer(" " * (self.width * ((self.height * 3 + 3) & -3))) res = ctypes.windll.gdi32.GetDIBits(memorydc.GetSafeHdc(), data_bitmap.GetHandle(), 0, self.height, c_bits, c_bmpheader, win32con.DIB_RGB_COLORS) win32gui.DeleteDC(hdc) win32gui.ReleaseDC(self.hwnd, hdc) memorydc.DeleteDC() win32gui.DeleteObject(data_bitmap.GetHandle()) cv_im = cv.CreateImageHeader((self.width, self.height), cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, c_bits.raw) # flip around x-axis cv.Flip(cv_im, None, 0) im_region = cv.GetSubRect(cv_im, (x, y, width, height)) #cv.SaveImage('aaak.bmp', im_region) template_source = cv.LoadImage(source) # From the manual of MatchTemplate result_width = im_region.width - template_source.width + 1 result_height = im_region.height - template_source.height + 1; result = cv.CreateImage((result_width, result_height), 32, 1) cv.MatchTemplate(im_region, template_source, result, cv2.TM_CCOEFF_NORMED) minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(result) #print minVal, maxVal, minLoc, maxLoc minLoc2 = minLoc[0] + x, minLoc[1] + y maxLoc2 = maxLoc[0] + x, maxLoc[1] + y return minVal, maxVal, minLoc2, maxLoc2
def set_alpha(self, a): """ Set the alpha value for the calibrated camera solution. The alpha value is a zoom, and ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). """ # NOTE: Prior to Electric, this code was broken such that we never actually saved the new # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) ncm = cv.GetSubRect(self.P, (0, 0, 3, 3)) cv.GetOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a, ncm) cv.InitUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.mapx, self.mapy)
def cvShiftDFT(src_arr, dst_arr): size = cv.GetSize(src_arr) dst_size = cv.GetSize(dst_arr) if dst_size != size: cv.Error(cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__) if (src_arr is dst_arr): tmp = cv.CreateMat(size[1] / 2, size[0] / 2, cv.GetElemType(src_arr)) cx = size[0] / 2 cy = size[1] / 2 # image center q1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) q2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) q3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) q4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) d1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) d2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) d3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) d4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) if (src_arr is not dst_arr): if (not cv.CV_ARE_TYPES_EQ(q1, d1)): cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__) cv.Copy(q3, d1) cv.Copy(q4, d2) cv.Copy(q1, d3) cv.Copy(q2, d4) else: cv.Copy(q3, tmp) cv.Copy(q1, q3) cv.Copy(tmp, q1) cv.Copy(q4, tmp) cv.Copy(q2, q4) cv.Copy(tmp, q2)
def scanner_procces(frame, set_zbar): global x right = 'correct' fail = 'fail' set_width = 100.0 / 100.0 set_height = 100.0 / 100.0 global count coord_x = int(frame.width * (1 - set_width) / 2) coord_y = int(frame.height * (1 - set_height) / 2) width = int(frame.width * set_width) height = int(frame.height * set_height) get_sub = cv.GetSubRect(frame, (coord_x + 1, coord_y + 1, width - 1, height - 1)) cv.Rectangle(frame, (coord_x, coord_y), (coord_x + width, coord_y + height), (255, 0, 0)) cm_im = cv.CreateImage((get_sub.width, get_sub.height), cv.IPL_DEPTH_8U, 1) cv.ConvertImage(get_sub, cm_im) image = zbar.Image(cm_im.width, cm_im.height, 'Y800', cm_im.tostring()) set_zbar.scan(image) for symbol in image: print '\033[1;32mResult : %s symbol "%s" \033[1;m' % (symbol.type, symbol.data) data = symbol.data if x != data and len(data) > 0: x = data if x == "a_medicine": pwm.write(0.5) print x else: pwm.write(0.9) print x cv.ShowImage("webcame", frame) #cv.ShowImage("webcame2", get_sub) cv.WaitKey(10)
def __init__(self, ns_list): print "Creating aggregator for ", ns_list self.lock = threading.Lock() # image w = 640 h = 480 #self.image_out = cv.CreateMat(h, w, cv.CV_8UC3) self.image_out = numpy.zeros((h, w, 3), numpy.uint8) self.pub = rospy.Publisher('aggregated_image', Image) self.bridge = CvBridge() self.image_captured = get_image(["Successfully captured checkerboard"]) self.image_optimized = get_image(["Successfully ran optimization"]) self.image_failed = get_image(["Failed to run optimization"], False) # create render windows layouts = [ (1,1), (2,2), (2,2), (2,2), (3,3), (3,3), (3,3), (3,3), (3,3) ] layout = layouts[len(ns_list)-1] sub_w = w / layout[0] sub_h = h / layout[1] self.windows = [] cvmat = cv.fromarray(self.image_out) for j in range(layout[1]): for i in range(layout[0]): self.windows.append( cv.GetSubRect(cvmat, (i*sub_w, j*sub_h, sub_w, sub_h) ) ) # create renderers self.renderer_list = [] for ns in ns_list: self.renderer_list.append(ImageRenderer(ns)) # subscribers self.capture_time = rospy.Time(0) self.calibrate_time = rospy.Time(0) self.captured_sub = rospy.Subscriber('robot_measurement', RobotMeasurement, self.captured_cb) self.optimized_sub = rospy.Subscriber('camera_calibration', CameraCalibration, self.calibrated_cb)
def mouse_Callback(event, x,y, flags, im): global drawingBox, extension, crop, box, cropedFolder name = 'crop' if event==cv.CV_EVENT_MOUSEMOVE: if (drawingBox == True): box[2] = x - box[0] box[3] = y - box[1] elif event == cv.CV_EVENT_LBUTTONDOWN: drawingBox = True [box[0], box[1], box[2], box[3]] = [x, y, 0, 0] elif event == cv.CV_EVENT_LBUTTONUP: drawingBox = False if box[2] < 0: box[0] += box[2] box[2] *= -1 if box[3] < 0: box[1] += box[3] box[3] *= -1 rect = (box[0], box[1], box[2], box[3]) roi = cv.GetSubRect(im, rect) image = name+str(crop)+extension crop += 1 cv.SaveImage(cropedFolder+'/'+image,roi)
def crop_and_save(img, faces, square_size): face_counter = 0 for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints x_center = int((float(x*image_scale)+float((x+w)*image_scale))/2.0) y_center = int((float(y*image_scale)+float((y+h)*image_scale))/2.0) top_left = (int(x_center-square_size/2), int(y_center-square_size/2)) bottom_right = (int(x_center+square_size/2), int(y_center+square_size/2)) imgWidth, imgHeight = cv.GetSize(img) croppedX = max(0, top_left[0]) croppedY = max(0, top_left[1]) croppedW = min(imgWidth, bottom_right[0]-croppedX) croppedH = min(imgHeight, bottom_right[1]-croppedY) imgCropped = cv.CreateImage((croppedW, croppedH), img.depth, img.nChannels) srcRegion = cv.GetSubRect(img, (croppedX, croppedY, croppedW, croppedH)) cv.Copy(srcRegion, imgCropped) cv.SaveImage("im_crop.jpg", imgCropped) face_counter += 1
def run(self): copy = cv.CloneImage(self.image) while True: if self.drag_start and is_rect_nonzero(self.selection): copy = cv.CloneImage(self.image) sub = cv.GetSubRect(copy, self.selection) #Get specified area #Make the effect of background shadow when selecting a window save = cv.CloneMat(sub) cv.ConvertScale(copy, copy, 0.5) cv.Copy(save, sub) #Draw temporary rectangle x, y, w, h = self.selection cv.Rectangle(copy, (x, y), (x + w, y + h), (255, 255, 255)) cv.ShowImage("Image", copy) c = cv.WaitKey(1) if c == 27 or c == 1048603 or c == 10: #Break if user enters 'Esc'. break
def detect(image): image_size = cv.GetSize(image) # to grayscale grayscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(image, grayscale, cv.CV_RGB2GRAY) # equalize cv.EqualizeHist(grayscale, grayscale) # detections faces = cv.HaarDetectObjects(grayscale, cascade, cv.CreateMemStorage(), 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (15, 15)) if faces: print 'face detected!' for faceN, ((x, y, w, h), n) in enumerate(faces): y_offset = y - height_offset h_expanded = h + expansion h_expanded_offset = h_expanded + height_offset if y_offset + h_expanded_offset > FRAME_HEIGHT: h_expanded_offset = (y_offset + h_expanded_offset) - FRAME_HEIGHT if y_offset < 0: y_offset = 0 windowName = "Face %d" % faceN sub = cv.GetSubRect(image, (x, y_offset, w, h_expanded_offset)) grow = cv.CreateMat(h_expanded * 4, w * 4, cv.CV_8UC3) cv.Resize(sub, grow) print "Showing for ", windowName cv.ShowImage(windowName, grow) # cv.ShowImage("Face at (%d, %d)" % (x, y), grow) cv.Rectangle(image, (x, y - height_offset), (x + w, y + h + expansion), cv.RGB(0, 255, 0), 3, 8, 0)
def detectNose(self, originalImage, cascade2, centerX, centerY): widthX = 150 widthY = 150 noseArea = cv.GetSubRect(originalImage, (centerX - widthX / 2 , centerY - widthY / 2, widthX, widthY)) nose = cv.HaarDetectObjects(noseArea, cascade2, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # in case of multiple find the maximum box minArea = 0 pt3 = (0, 0) pt4 = (0, 0) if nose: for ((x, y, w, h), n) in nose: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints if(w * h > minArea): minArea = w * h pt3 = (x, y) pt4 = (x + w, y + h) if(minArea > 0): cv.Rectangle(originalImage, (centerX - widthX / 2 + pt3[0], centerY - widthY / 2 + pt3[1]), (centerX - widthX / 2 + pt4[0], centerY - widthY / 2 + pt4[1]), cv.RGB(255, 0, 255))
def DetectRedEyes(image, faceCascade, smileCascade, eyeCascade): min_size = (20,20) image_scale = 2 haar_scale = 1.1 min_neighbors = 2 haar_flags = 0 # Allocate the temporary images gray = cv.CreateImage((image.width, image.height), 8, 1) smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1) # Convert color input image to grayscale cv.CvtColor(image, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) global norm # If faces are found if faces: #print faces ratio = 1. for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints #print "face" if h!=0: ratio = h/norm pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # print pt1 # print pt2 #cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0) #cv.PutText(image, "face"+str(h), pt1, font, cv.RGB(255, 0, 0)) face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2))) #split face #cv.Rectangle(image, (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), pt2, cv.RGB(0,255,0), 1, 8, 0) #cv.PutText(image, "lower", (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), font, cv.RGB(0, 255, 0)) cv.SetImageROI(image, (pt1[0], (pt1[1] + int(abs(pt1[1]-pt2[1]) * 0.625 )), pt2[0] - pt1[0], int((pt2[1] - (pt1[1] + int(abs(pt1[1]-pt2[1]) * 0.625 )))))) smiles = cv.HaarDetectObjects(image, smileCascade, cv.CreateMemStorage(0), 1.1, 5, 0, (15,15)) if smiles: #print smiles for smile in smiles: cv.Rectangle(image, (smile[0][0],smile[0][1]), (smile[0][0] + smile[0][2], smile[0][1] + smile[0][3]), cv.RGB(0, 0, 255), 1, 8, 0) sizer = (smile[0][2]/ratio+smile[0][3]/ratio)#+(smile[1]/ratio)) #sizer = math.trunc(sizer) #cv.PutText(image, "smile", (smile[0][0],smile[0][1]), font, cv.RGB(0, 0, 255)) cv.PutText(image,str(math.trunc(sizer**2)), (smile[0][0], smile[0][1] + smile[0][3] + 10), font, cv.RGB(0, 0, 255)) #print ((abs(smile[0][1] - smile[0][2]) / abs(pt1[0] - pt2[0])) * 100) global smileneighbour smileneighbour = sizer**2*2 cv.ResetImageROI(image) ############################################################################# ############################################################################# cv.SetImageROI(image, (pt1[0], pt1[1], int(pt2[0]-pt1[0]), int(pt2[1] - pt1[1])) ) eyes = cv.HaarDetectObjects(image, eyeCascade,cv.CreateMemStorage(0),haar_scale, 5,haar_flags, (15,15)) if eyes: # For each eye found iii = 0 #print eyes for eye in eyes: # Draw a rectangle around the eye cv.Rectangle(image,(eye[0][0],eye[0][1]),(eye[0][0] + eye[0][2],eye[0][1] + eye[0][3]), cv.RGB(0, 0, 255), 1, 8, 0) a = math.trunc(float(eye[1])/ratio) cv.PutText(image,str(a), (eye[0][0], eye[0][1] + eye[0][3]), font, cv.RGB(0, 0, 255)) global eyetot eyetot += float(eye[1]*eye[1])/ratio iii+=1 if iii==2: iii = 0 break cv.ResetImageROI(image) cv.ResetImageROI(image) return image
def match_template(self, cv_image): frame = np.array(cv_image, dtype=np.uint8) W,H = frame.shape[1], frame.shape[0] w,h = self.template.shape[1], self.template.shape[0] width = W - w + 1 height = H - h + 1 # Make sure that the template image is smaller than the source if W < w or H < h: rospy.loginfo( "Template image must be smaller than video frame." ) return False if frame.dtype != self.template.dtype: rospy.loginfo("Template and video frame must have same depth and number of channels.") return False # Create copies of the images to modify frame_copy = frame.copy() template_copy = self.template.copy() # Down pyramid the images for k in range(self.numDownPyrs): # Start with the source image W = (W + 1) / 2 H = (H + 1) / 2 frame_small = np.array([H, W], dtype=frame.dtype) frame_small = cv2.pyrDown(frame_copy) # frame_window = "PyrDown " + str(k) # cv.NamedWindow(frame_window, cv.CV_NORMAL) # cv.ShowImage(frame_window, cv.fromarray(frame_small)) # cv.ResizeWindow(frame_window, 640, 480) # Prepare for next loop, if any frame_copy = frame_small.copy() #Next, do the target w = (w + 1) / 2 h = (h + 1) / 2 template_small = np.array([h, w], dtype=self.template.dtype) template_small = cv2.pyrDown(template_copy) # template_window = "Template PyrDown " + str(k) # cv.NamedWindow(template_window, cv.CV_NORMAL) # cv.ShowImage(template_window, cv.fromarray(template_small)) # cv.ResizeWindow(template_window, 640, 480) # Prepare for next loop, if any template_copy = template_small.copy() # Perform the match on the shrunken images small_frame_width = frame_copy.shape[1] small_frame_height = frame_copy.shape[0] small_template_width = template_copy.shape[1] small_template_height = template_copy.shape[0] result_width = small_frame_width - small_template_width + 1 result_height = small_frame_height - small_template_height + 1 result_mat = cv.CreateMat(result_height, result_width, cv.CV_32FC1) result = np.array(result_mat, dtype = np.float32) cv2.matchTemplate(frame_copy, template_copy, cv.CV_TM_CCOEFF_NORMED, result) cv2.imshow("Result", result) return (0, 0, 100, 100) # # Find the best match location # (minValue, maxValue, minLoc, maxLoc) = cv2.minMaxLoc(result) # # # Transform point back to original image # target_location = Point() # target_location.x, target_location.y = maxLoc # # return (target_location.x, target_location.y, w, h) # Find the top match locations locations = self.MultipleMaxLoc(result, self.numMaxima) foundPointsList = list() confidencesList = list() W,H = frame.shape[1], frame.shape[0] w,h = self.template.shape[1], self.template.shape[0] # Search the large images at the returned locations for currMax in range(self.numMaxima): # Transform the point to its corresponding point in the larger image #locations[currMax].x *= int(pow(2.0, self.numDownPyrs)) #locations[currMax].y *= int(pow(2.0, self.numDownPyrs)) locations[currMax].x += w / 2 locations[currMax].y += h / 2 searchPoint = locations[currMax] print "Search Point", searchPoint # If we are searching for multiple targets and we have found a target or # multiple targets, we don't want to search in the same location(s) again # if self.findMultipleTargets and len(foundPointsList) != 0: # thisTargetFound = False # numPoints = len(foundPointsList) # # for currPoint in range(numPoints): # foundPoint = foundPointsList[currPoint] # if (abs(searchPoint.x - foundPoint.x) <= self.searchExpansion * 2) and (abs(searchPoint.y - foundPoint.y) <= self.searchExpansion * 2): # thisTargetFound = True # break # # # If the current target has been found, continue onto the next point # if thisTargetFound: # continue # Set the source image's ROI to slightly larger than the target image, # centred at the current point searchRoi = RegionOfInterest() searchRoi.x_offset = searchPoint.x - w / 2 - self.searchExpansion searchRoi.y_offset = searchPoint.y - h / 2 - self.searchExpansion searchRoi.width = w + self.searchExpansion * 2 searchRoi.height = h + self.searchExpansion * 2 #print (searchRoi.x_offset, searchRoi.y_offset, searchRoi.width, searchRoi.height) # Make sure ROI doesn't extend outside of image if searchRoi.x_offset < 0: searchRoi.x_offset = 0 if searchRoi.y_offset < 0: searchRoi.y_offset = 0 if (searchRoi.x_offset + searchRoi.width) > (W - 1): numPixelsOver = (searchRoi.x_offset + searchRoi.width) - (W - 1) print "NUM PIXELS OVER", numPixelsOver searchRoi.width -= numPixelsOver if (searchRoi.y_offset + searchRoi.height) > (H - 1): numPixelsOver = (searchRoi.y_offset + searchRoi.height) - (H - 1) searchRoi.height -= numPixelsOver mask = (searchRoi.x_offset, searchRoi.y_offset, searchRoi.width, searchRoi.height) frame_mat = cv.fromarray(frame) searchImage = cv.CreateMat(searchRoi.height, searchRoi.width, cv.CV_8UC3) searchImage = cv.GetSubRect(frame_mat, mask) searchArray = np.array(searchImage, dtype=np.uint8) # Perform the search on the large images result_width = searchRoi.width - w + 1 result_height = searchRoi.height - h + 1 result_mat = cv.CreateMat(result_height, result_width, cv.CV_32FC1) result = np.array(result_mat, dtype = np.float32) cv2.matchTemplate(searchArray, self.template, cv.CV_TM_CCOEFF_NORMED, result) # Find the best match location (minValue, maxValue, minLoc, maxLoc) = cv2.minMaxLoc(result) maxValue *= 100 # Transform point back to original image target_location = Point() target_location.x, target_location.y = maxLoc target_location.x += searchRoi.x_offset - w / 2 + self.searchExpansion target_location.y += searchRoi.y_offset - h / 2 + self.searchExpansion if maxValue >= self.matchPercentage: # Add the point to the list foundPointsList.append(maxLoc) confidencesList.append(maxValue) # If we are only looking for a single target, we have found it, so we # can return if not self.findMultipleTargets: break if len(foundPointsList) == 0: rospy.loginfo("Target was not found to required confidence") return (target_location.x, target_location.y, w, h)
def store_proba(self, proba): # print "Got Image" if not self.info: return # print "Processing" self.timestamp = proba.header.stamp I = self.br.imgmsg_to_cv(proba, "8UC1") self.proba = cv.CloneMat(I) cv.Threshold(I, self.proba, 0xFE, 0xFE, cv.CV_THRESH_TRUNC) try: # (trans,rot) = self.listener.lookupTransform(proba.header.frame_id, '/world', proba.header.stamp) self.listener.waitForTransform(proba.header.frame_id, self.target_frame, proba.header.stamp, rospy.Duration(1.0)) trans = numpy.mat( self.listener.asMatrix(self.target_frame, proba.header)) # print "Transformation" # print trans dstdir = [trans * v for v in self.dirpts3d] # print "Destination dir" # print dstdir origin = trans * self.origin origin = origin / origin[3, 0] # origin = numpy.matrix([0.0, 0.0, origin[2,0] / origin[3,0], 1.0]).T # print "Origin" # print origin self.dstpts2d = cv.CreateMat(4, 2, cv.CV_32F) for i in range(4): self.dstpts2d[i, 0] = self.x_floor + (origin[0, 0] - dstdir[i][ 0, 0] * origin[2, 0] / dstdir[i][2, 0]) * self.floor_scale self.dstpts2d[i, 1] = self.y_floor - (origin[1, 0] - dstdir[i][ 1, 0] * origin[2, 0] / dstdir[i][2, 0]) * self.floor_scale # print numpy.asarray(self.dstpts2d) # print "Source points" # print numpy.asarray(self.srcpts2d) # print "Dest points" # print numpy.asarray(self.dstpts2d) self.H = cv.CreateMat(3, 3, cv.CV_32F) cv.FindHomography(self.srcpts2d, self.dstpts2d, self.H) # print "Homography" # print numpy.asarray(self.H) cv.WarpPerspective(cv.GetSubRect( self.proba, (0, self.horizon_offset, self.proba.width, self.proba.height - self.horizon_offset)), self.floor_map, self.H, flags=cv.CV_INTER_NN + cv.CV_WARP_FILL_OUTLIERS, fillval=0xFF) msg = self.br.cv_to_imgmsg(self.floor_map) msg.header.stamp = proba.header.stamp msg.header.frame_id = self.target_frame self.pub.publish(msg) # print "Publishing image" except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): print "Exception while looking for transform" return
def DetectRedEyes(image, faceCascade, smileCascade): min_size = (20, 20) image_scale = 2 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Allocate the temporary images gray = cv.CreateImage((image.width, image.height), 8, 1) smallImage = cv.CreateImage((cv.Round( image.width / image_scale), cv.Round(image.height / image_scale)), 8, 1) # Convert color input image to grayscale cv.CvtColor(image, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # If faces are found if faces: #print faces for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints #print "face" pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # print pt1 # print pt2 #cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0) #cv.PutText(image, "face", pt1, font, cv.RGB(255, 0, 0)) face_region = cv.GetSubRect(image, (x, int(y + (h / 4)), w, int(h / 2))) #split face #cv.Rectangle(image, (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), pt2, cv.RGB(0,255,0), 1, 8, 0) #cv.PutText(image, "lower", (pt1[0],(pt1[1] + (abs(pt1[1]-pt2[1]) / 2 ))), font, cv.RGB(0, 255, 0)) cv.SetImageROI( image, (pt1[0], (pt1[1] + (abs(pt1[1] - pt2[1]) / 2)), pt2[0] - pt1[0], int((pt2[1] - (pt1[1] + (abs(pt1[1] - pt2[1]) / 2)))))) smiles = cv.HaarDetectObjects(image, smileCascade, cv.CreateMemStorage(0), 1.1, 5, 0, (15, 15)) if smiles: #print smiles for smile in smiles: cv.Rectangle( image, (smile[0][0], smile[0][1]), (smile[0][0] + smile[0][2], smile[0][1] + smile[0][3]), cv.RGB(0, 0, 255), 1, 8, 0) cv.PutText(image, "smile", (smile[0][0], smile[0][1]), font, cv.RGB(0, 0, 255)) cv.PutText(image, str(smile[1]), (smile[0][0], smile[0][1] + smile[0][3]), font, cv.RGB(0, 0, 255)) #print ((abs(smile[0][1] - smile[0][2]) / abs(pt1[0] - pt2[0])) * 100) global smileness smileness = smile[1] cv.ResetImageROI(image) #if smile[1] > 90: # mqttc.publish("smiles", "got smile", 1) # time.sleep(5) #eyes = cv.HaarDetectObjects(image, eyeCascade, #cv.CreateMemStorage(0), #haar_scale, min_neighbors, #haar_flags, (15,15)) #if eyes: # For each eye found #print eyes #for eye in eyes: # Draw a rectangle around the eye # cv.Rectangle(image, # (eye[0][0], # eye[0][1]), # (eye[0][0] + eye[0][2], # eye[0][1] + eye[0][3]), # cv.RGB(255, 0, 0), 1, 8, 0) cv.ResetImageROI(image) return image