def getIris(frame): iris = [] copyImg = cv.CloneImage(frame) resImg = cv.CloneImage(frame) grayImg = cv.CreateImage(cv.GetSize(frame), 8, 1) mask = cv.CreateImage(cv.GetSize(frame), 8, 1) storage = cv.CreateMat(frame.width, 1, cv.CV_32FC3) cv.CvtColor(frame,grayImg,cv.CV_BGR2GRAY) cv.Canny(grayImg, grayImg, 5, 70, 3) cv.Smooth(grayImg,grayImg,cv.CV_GAUSSIAN, 7, 7) circles = getCircles(grayImg) iris.append(resImg) for circle in circles: rad = int(circle[0][2]) global radius radius = rad cv.Circle(mask, centroid, rad, cv.CV_RGB(255,255,255), cv.CV_FILLED) cv.Not(mask,mask) cv.Sub(frame,copyImg,resImg,mask) x = int(centroid[0] - rad) y = int(centroid[1] - rad) w = int(rad * 2) h = w cv.SetImageROI(resImg, (x,y,w,h)) cropImg = cv.CreateImage((w,h), 8, 3) cv.Copy(resImg,cropImg) cv.ResetImageROI(resImg) return(cropImg) return (resImg)
def sample_frame(self, frame): # Get an average of the green channel in on the forehead cv.SetImageROI(frame, self.face_tracker.get_forehead()) sample = cv.Avg(frame)[1] cv.ResetImageROI(frame) return sample
def get_card(color_capture, corners): target = [(0, 0), (223, 0), (223, 310), (0, 310)] mat = cv2.createMat(3, 3, cv2.CV_32FC1) cv2.GetPerspectiveTransform(corners, target, mat) warped = color_capture.clone() #cv2.CloneImage(color_capture) cv2.WarpPerspective(color_capture, warped, mat) cv2.SetImageROI(warped, (0, 0, 223, 310)) return warped
def getAverage(self, distances, segRange): """ calculate the average pixel value over a perticular range in the image """ # get segment information segWidth, segHeight = self.segmentSize pixelsPerSegment = segWidth * segHeight # calculate average cv.SetImageROI(distances, segRange) average = cv.Avg(distances) return average[0]
def detectFace(self, cam_img, faceCascade, eyeCascade, mouthCascade): # cam_img should be cv2.cv.iplcam_img min_size = (20, 20) image_scale = 2 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 image_width = int(cam_img.get(cv.CV_CAP_PROP_FRAME_WIDTH)) image_height = int(cam_img.get(cv.CV_CAP_PROP_FRAME_HEIGHT)) # Allocate the temporary images gray = cv.CreateImage((image_width, image_height), 8, 1) # tuple as the first arg smallImage = cv.CreateImage((cv.Round(image_width / image_scale), cv.Round(image_height / image_scale)), 8, 1) (ok, img) = cam_img.read() # print 'gray is of ',type(gray) >>> gray is of <type 'cv2.cv.iplimage'> # print type(smallImage) >>> <type 'cv2.cv.iplimage'> # print type(image) >>> <type 'cv2.VideoCapture'> # print type(img) >>> <type 'numpy.ndarray'> # convert numpy.ndarray to iplimage ipl_img = cv2.cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3) cv2.cv.SetData(ipl_img, img.tostring(), img.dtype.itemsize * 3 * img.shape[1]) # Convert color input image to grayscale cv.CvtColor(ipl_img, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # => The function returns a list of tuples, (rect, neighbors) , where rect is a CvRect specifying the object’s extents and neighbors is a number of neighbors. # => CvRect cvRect(int x, int y, int width, int height) # If faces are found if faces: face = faces[0] self.faceX = face[0][0] self.faceY = face[0][1] for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(ipl_img, pt1, pt2, cv.RGB(0, 0, 255), 3, 8, 0) # face_region = cv.GetSubRect(ipl_img,(x,int(y + (h/4)),w,int(h/2))) cv.SetImageROI(ipl_img, (pt1[0], pt1[1], pt2[0] - pt1[0], int((pt2[1] - pt1[1]) * 0.7))) eyes = cv.HaarDetectObjects(ipl_img, eyeCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, (15, 15)) if eyes: # For each eye found for eye in eyes: # Draw a rectangle around the eye cv.Rectangle(ipl_img, # image (eye[0][0], # vertex pt1 eye[0][1]), (eye[0][0] + eye[0][2], # vertex pt2 opposite to pt1 eye[0][1] + eye[0][3]), cv.RGB(255, 0, 0), 1, 4, 0) # color,thickness,lineType(8,4,cv.CV_AA),shift cv.ResetImageROI(ipl_img) return ipl_img
"/usr/local/Cellar/opencv/2.4.11/share/OpenCV/haarcascades/haarcascade_eye.xml" ) frameN = 0 # frameカウント用に while (1): ret, im = cap.read() # 1フレームずつ読み込み? if ret: # 読み込まれている時のみ処理 frameN = frameN + 1 im = cv2.resize(im, (im.shape[1] / 2, im.shape[0] / 2)) # 動画サイズを半分に gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) # 画像をグレーに gray = cv2.equalizeHist(gray) # ヒストグラムの均一化 # 顔探索(画像,縮小スケール,最低矩形数) face = cascade.detectMultiScale(gray, 1.1, 3) face_ROI = cv2.SetImageROI(gray, face) # ROIの設定 eye = cascade_eye.detectMultiScale(face_ROI, 1.1, 3) # 目の探索 # 顔検出した部分を長方形で囲う for (x, y, w, h) in face: cv2.rectangle(im, (x, y), (x + w, y + h), (0, 50, 255), 3) out_list.append([videos] + [frameN] + [x] + [y] + [w] + [h]) # データのリストを作成 # 画像表示(リアルタイム表示:重い) # cv2.imshow('Video Stream', gray) out.write(im) # 動画の書き込み else: break
win = dlib.image_window() image = np.copy(frame)#cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) detected_faces = face_detector(image, 1) #print("Total number of faces detected: {}".format(len(detected_faces))) for d in detected_faces: rr,cc = polygon_perimeter([d.top(), d.top(), d.bottom(), d.bottom()], [d.right(), d.left(), d.left(), d.right()]) if max(rr)<image.shape[0] and max(cc)<image.shape[1]: image[rr, cc] = (255, 0, 0) #cv2.imshow('Detected_frame', image) # Define an initial bounding box bbox = (d.left(), d.top(), d.right(), d.bottom()) # Uncomment the line below to select a different bounding box #bbox = cv2.selectROI(image) cv2.SetImageROI(frame, bbox) #frame = image # Initialize tracker with first frame and bounding box ok = tracker.init(frame, bbox) while True: # Read a new frame ok, frame = video.read() if not ok: break # Start timer timer = cv2.getTickCount() # Update tracker ok, bbox = tracker.update(frame)
import cv2 as cv img = cv.LoadImage("friend1.jpg") image_size = cv.GetSize(img) #获取图片的大小 greyscale = cv.CreateImage(image_size, 8, 1) #建立一个相同大小的灰度图像 cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY) #将获取的彩色图像,转换成灰度图像 storage = cv.CreateMemStorage(0) #创建一个内存空间,人脸检测是要利用,具体作用不清楚 cv.EqualizeHist(greyscale, greyscale) #将灰度图像直方图均衡化,貌似可以使灰度图像信息量减少,加快检测速度 # detect objects cascade = cv.Load('haarcascade_frontalface_alt2.xml') #加载Intel公司的训练库 #检测图片中的人脸,并返回一个包含了人脸信息的对象faces faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50)) #获得人脸所在位置的数据 j = 0 #记录个数 for (x, y, w, h), n in faces: j += 1 cv.SetImageROI(img, (x, y, w, h)) #获取头像的区域 cv.SaveImage("face" + str(j) + ".jpg", img) #保存下来