def DetectRedEyes(image, faceCascade, eyeCascade): min_size = (20,20) image_scale = 2 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Allocate the temporary images gray = cv.CreateImage((image.width, image.height), 8, 1) smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1) # Convert color input image to grayscale cv.CvtColor(image, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # If faces are found if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2))) cv.SetImageROI(image, (pt1[0], pt1[1], pt2[0] - pt1[0], int((pt2[1] - pt1[1]) * 0.7))) eyes = cv.HaarDetectObjects(image, eyeCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, (15,15)) if eyes: # For each eye found for eye in eyes: # Draw a rectangle around the eye cv.Rectangle(image, (eye[0][0], eye[0][1]), (eye[0][0] + eye[0][2], eye[0][1] + eye[0][3]), cv.RGB(255, 0, 0), 1, 8, 0) cv.ResetImageROI(image) return image
def ifFace(img, size): gray = cv.CreateImage(size, 8, 1) cv.CvtColor(img, gray, cv.CV_BGR2GRAY) newMem = cv.CreateMemStorage(0) cv.EqualizeHist(gray, gray) face = cv.HaarDetectObjects(gray, c_f, newMem, 1.2, 3, cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50)) mouth = cv.HaarDetectObjects(gray, c_m, newMem, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (0, 0)) if face and mouth: print "有脸" cv.SaveImage("img/out.jpg", img) sys.exit(0)
def detect_and_draw(img, front_cascade, profile_cascade): # allocate temporary images gray = cv.CreateImage((img.width,img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(front_cascade): # Test for frontal face faces = cv.HaarDetectObjects(small_img, front_cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) if faces: # we've detected a face return [faces, FRONTAL] # Test for profile face faces = cv.HaarDetectObjects(small_img, profile_cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) if faces: # we've detected a face return [faces, PROFILE] #t = cv.GetTickCount() - t #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.)) #if faces: #for ((x, y, w, h), n) in faces: ## the input to cv.HaarDetectObjects was resized, so scale the ## bounding box of each face and convert it to two CvPoints #pt1 = (int(x * image_scale), int(y * image_scale)) #pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) #imgWidth, imgHeight = cv.GetSize(img) #croppedX = max(0, x*image_scale-w*image_scale/2) #croppedY = max(0, y*image_scale-h*image_scale/2) #croppedW = min(imgWidth, (2*w)*image_scale) #croppedH = min(imgHeight, (2*h)*image_scale) #imgCropped = cv.CreateImage((croppedW, croppedH), img.depth, img.nChannels) #srcRegion = cv.GetSubRect(img, (croppedX, croppedY, croppedW, croppedH)) #cv.Copy(srcRegion, imgCropped) #cv.ShowImage("cropped", imgCropped) #cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) return []
def ifFace(img,size): gray=cv.CreateImage(size,8,1) cv.CvtColor(img,gray,cv.CV_BGR2GRAY) newMem1=cv.CreateMemStorage(0) newMem2=cv.CreateMemStorage(0) newMem3=cv.CreateMemStorage(0) cv.EqualizeHist(gray,gray) face=cv.HaarDetectObjects(gray,c_f,newMem1,1.2,3,cv.CV_HAAR_DO_CANNY_PRUNING,(50,50)) mouth=cv.HaarDetectObjects(gray,c_m,newMem2,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(10,10)) body=cv.HaarDetectObjects(gray,c_m,newMem3,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(100,100)) if face and mouth or body: cv.SaveImage("img/out.jpg",img) return 1 else: return 0
def capture(): """ Using the intel training set to capture the face in the video. Most of them are frameworks in OpenCV. """ j = 0 g = os.walk("origin") for path, d, filelist in g: for filename in filelist: img = cv.LoadImage(os.path.join(path, filename)) image_size = cv.GetSize(img) greyscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY) storage = cv.CreateMemStorage(0) cv.EqualizeHist(greyscale, greyscale) cascade = cv.Load('haarcascade_frontalface_alt2.xml') faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50)) for (x, y, w, h), n in faces: j += 1 cv.SetImageROI(img, (x, y, w, h)) cv.SaveImage("captured/face" + str(j) + ".png", img)
def DetectFace(image, faceCascade, returnImage=False): # This function takes a grey scale cv image and finds # the patterns defined in the haarcascade function # modified from: http://www.lucaamore.com/?p=638 #variables min_size = (30, 30) #image_scale = 2 haar_scale = 1.1 min_neighbors = 2 haar_flags = 0 # Equalize the histogram cv.EqualizeHist(image, image) # Detect the faces faces = cv.HaarDetectObjects(image, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # If faces are found if faces and returnImage: for ((x, y, w, h), n) in faces: # Convert bounding box to two CvPoints pt1 = (int(x), int(y)) pt2 = (int(x + w), int(y + h)) cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 5, 8, 0) if returnImage: return image else: return faces
def handel_camera_image(img, hc): #resize it img2 = cv.CreateMat( cv.GetSize(img)[1] / 2, cv.GetSize(img)[0] / 2, cv.CV_8UC3) cv.Resize(img, img2) #convert to grayscale img_gray = cv.CreateImage(cv.GetSize(img2), 8, 1) cv.CvtColor(img2, img_gray, cv.CV_RGB2GRAY) #set the final image img_f = img_gray #detect faces from it objects = cv.HaarDetectObjects(img_f, hc, cv.CreateMemStorage()) number_of_faces = len(objects) if number_of_faces != 1: if debug: print "Error! Number of detected faces: " + str(number_of_faces) return None else: for (x, y, w, h), n in objects: #annotate the image cv.Rectangle(img_f, (x, y), (x + w, y + h), 255) if debug: print "FACE -> h: " + str(h) + ", w: " + str( w) + ", r(w/h): " + str(float(w) / float(h)) #resize to 64 to 64 img_r = resize_crop_img(img_f, x, y, w, h) return (img_f, img_r)
def detect_and_draw(self, img, cascade, camera_position=0): min_size = (20, 20) image_scale = self.horizontalSlider_3.value() haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img_height = cv.Round(img.height / image_scale) small_img = cv.CreateImage( (cv.Round(img.width / image_scale), small_img_height), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) if faces: for ((x, y, w, h), n) in faces: if self.face_cert < n: x2, y2, w2, h2 = self.make_the_rectangle_bigger( x, y, w, h, 1.22, small_img_height, image_scale) self.create_person_and_add_to_room(img, (x2, y2, w2, h2), camera_position) if self.mark_detected_objects[camera_position]: pt2 = (int(x2 + w2), int(y2 + h2)) cv.Rectangle(img, (x2, y2), pt2, cv.RGB(255, 0, 0), 3, 8, 0) if self.show_main_view[camera_position]: cv.ShowImage("result" + str(camera_position), img)
def pav_dejimas_ant_veido(self, frame): """Paveikslelio dejimas ant veido""" frame_cvmat = cv.fromarray(frame) #sukuriamas kadras kitu formatu webcam_width = self.webcam.get(3) #cameros resoliuzijos gavimas webcam_height = self.webcam.get(4) # running the classifiers detectedFace = cv.HaarDetectObjects(frame_cvmat, haarFace, storage) #rasta veida pakeicia paveiksleliu if detectedFace: for face in detectedFace: s_img = self.image s_img = cv2.resize(s_img, (face[0][2], face[0][3])) if face[0][2] > webcam_width - face[0][0] and face[0][ 3] > webcam_height - face[0][1]: s_img = s_img[0:webcam_width - face[0][0], 0:webcam_height - face[0][1]] print s_img.shape[0], s_img.shape[1] x_offset = face[0][0] y_offset = face[0][1] for c in range(0, 3): frame[ y_offset:y_offset + s_img.shape[0], x_offset:x_offset + s_img.shape[1], c] = s_img[:, :, c] * (s_img[:, :, 3] / 255.0) + frame[ y_offset:y_offset + s_img.shape[0], x_offset:x_offset + s_img.shape[1], c] * (1.0 - s_img[:, :, 3] / 255.0) return frame
def detect_and_draw(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.)) if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.ShowImage("result", img)
def detectFace(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage( (cv.Round(img.width / imageScale), cv.Round(img.height / imageScale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haarScale, minNeighbors, haarFlags, minSize) if faces: print "\tDetected ", len(faces), " object(s)" for ((x, y, w, h), n) in faces: #the input to cv.HaarDetectObjects was resized, scale the #bounding box of each face and convert it to two CvPoints pt1 = (int(x * imageScale), int(y * imageScale)) pt2 = (int((x + w) * imageScale), int((y + h) * imageScale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) return img else: return False
def detect_and_draw(img, face_cascade): gray = cv.CreateImage((img.width, img.height), 8, 1) image_scale = img.width / smallwidth small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # gray = cv.CreateImage((img.width,img.height), 8, 1) image_scale = img.width / smallwidth # small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) faces = cv.HaarDetectObjects(small_img, face_cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) if opencv_preview and faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) if verbose: print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[ 1] return True if faces else False
def detect_face(self, image): min_size = (20, 20) image_scale = 2 haar_scale = 1.1 min_neighbors = 2 haar_flags = 0 # Allocate the temporary images gray = cv.CreateImage((image.width, image.height), 8, 1) smallImage = cv.CreateImage((cv.Round( image.width / image_scale), cv.Round(image.height / image_scale)), 8, 1) # Convert color input image to grayscale cv.CvtColor(image, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, self.cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) return faces
def findEyes(self): """ Detects eyes in a photo and initializes relevant attributes Uses opencv libarary methods to detect a face and then detect the eyes in that face. If there are exactly two eye regions found it populates the region attributes. If not exactly two eye regions are found the method returns false. Args: None Return: bool - True if there were no issues. False for any error """ #imcolor = cv.LoadImage(self.path) imcolor = self.facePhoto #Path setups cwd = os.path.dirname(os.path.abspath(sys.argv[0])) cwd += "/opencv/haarcascades/" frontalface = cwd + "haarcascade_frontalface_default.xml" eye = cwd + "haarcascade_eye.xml" faceCascade = cv.Load(frontalface) eyeCascade = cv.Load(eye) haarEyes = cv.Load(eye) storage = cv.CreateMemStorage() detectedEyes = cv.HaarDetectObjects(imcolor,haarEyes,storage) if DEBUG: print "detectedEyes = " + str(detectedEyes) if len(detectedEyes) == 2: if DEBUG: # Draw the rectangle cv.Rectangle(imcolor,(detectedEyes[0][0][0], detectedEyes[0][0][1]), (detectedEyes[0][0][0] + detectedEyes[0][0][2], detectedEyes[0][0][1] + detectedEyes[0][0][3]),cv.RGB(155,155,200),2) cv.Rectangle(imcolor,(detectedEyes[1][0][0], detectedEyes[1][0][1]), (detectedEyes[1][0][0] + detectedEyes[1][0][2], detectedEyes[1][0][1] + detectedEyes[1][0][3]),cv.RGB(155,155,200),2) cv.ShowImage("Face with eyes",imcolor) cv.WaitKey(0) cv.DestroyWindow("Face with eyes") left = (detectedEyes[0][0][0], detectedEyes[0][0][1], detectedEyes[0][0][0] + detectedEyes[0][0][2], detectedEyes[0][0][1] + detectedEyes[0][0][3]) right = (detectedEyes[1][0][0], detectedEyes[1][0][1], detectedEyes[1][0][0] + detectedEyes[1][0][2], detectedEyes[1][0][1] + detectedEyes[1][0][3]) if DEBUG: print "left: " + str(left) print "right: " + str(right) self.setEyes(left, right) return True if DEBUG: print "Found more or less than 2 eyes, returning false" return False
def detectLeftEye(self, originalImage, cascade2, pt1, centerX, centerY): leftEyeArea = cv.GetSubRect(originalImage, (pt1[0], pt1[1], centerX - pt1[0], centerY - pt1[1])) leftEye = cv.HaarDetectObjects(leftEyeArea, cascade2, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # in case of multiple find the maximum box minArea = 0 pt3 = (0, 0) pt4 = (0, 0) if leftEye: for ((x, y, w, h), n) in leftEye: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints if(w * h > minArea): minArea = w * h pt3 = (x, y) pt4 = (x + w, y + h) if(minArea > 0): cv.Rectangle(originalImage, (pt1[0] + pt3[0], pt1[1] + pt3[1]), (pt1[0] + pt4[0], pt1[1] + pt4[1]), cv.RGB(255, 255, 0)) pointX = pt1[0] + pt3[0] + 10 pointY = pt1[1] + pt3[1] + 10 distanceX = pt4[0] - pt3[0] - 10 distanceY = pt4[1] - pt3[1] - 10 eyePart = cv.GetSubRect(originalImage, (pointX, pointY, distanceX, distanceY))
def detectRightEye(self, img, rightEyeArea, centerX, centerY, pt1, cascade2): rightEye = cv.HaarDetectObjects(rightEyeArea, cascade2, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # in case of multiple find the maximum box minArea = 0 pt3 = (0, 0) pt4 = (0, 0) if rightEye: for ((x, y, w, h), n) in rightEye: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints if(w * h > minArea): minArea = w * h pt3 = (x, y) pt4 = (x + w, y + h) # pt3 = (int(x * image_scale), int(y * image_scale)) # pt4 = (int((x + w) * image_scale), int((y + h) * image_scale)) # print "point 3 " + str(pt3) # print "point 4 " + str(pt4) # # cv.Rectangle(img, (centerX + pt3[0], pt1[1] + pt3[1]),(centerX + pt4[0], pt1[1] + pt4[1]), cv.RGB(0, 255, 255)) # cv.Rectangle(img, pt3, pt4, cv.RGB(0, 0, 255)) if(minArea > 0): cv.Rectangle(img, (centerX + pt3[0], pt1[1] + pt3[1]), (centerX + pt4[0], pt1[1] + pt4[1]), cv.RGB(0, 255, 255)) pointX = centerX + pt3[0] + 10 pointY = pt1[1] + pt3[1] + 10 distanceX = pt4[0] - pt3[0] - 10 distanceY = pt4[1] - pt3[1] - 10 eyePart = cv.GetSubRect(img, (pointX, pointY, distanceX, distanceY))
def detect_face(img): haarFace = cv.Load('./haarcascade_frontalface_default.xml') #RGB_img = cv.fromarray(np.array(rgb[:,:,::-1])) allFaces = cv.HaarDetectObjects(cv.fromarray(img), haarFace, cv.CreateMemStorage(), scale_factor=1.1, min_neighbors=3, flags=0, min_size=(50, 50)) # Get confidences if (allFaces != []): count_no_face = 0 #print(allFaces) face_confid = [c for ((x, y, w, h), c) in allFaces] max_ind = np.argmax(face_confid) FINAL_FACE = allFaces[max_ind] return FINAL_FACE[0] else: return []
def detect_face(RGB_img): # Image Properties #print('Image Size: ({},{})'.format(cv.GetCaptureProperty(CAM_CAPT, cv.CV_CAP_PROP_FRAME_HEIGHT), cv.GetCaptureProperty(CAM_CAPT, cv.CV_CAP_PROP_FRAME_WIDTH))) #print('FPS: {}'.format(cv.GetCaptureProperty(CAM_CAPT, cv.CV_CAP_PROP_FPS))) RGB_img_mat = cv.fromarray(RGB_img) haarFace = cv.Load('haarcascade_frontalface_default.xml') #RGB_img = cv.fromarray(np.array(rgb[:,:,::-1])) allFaces = cv.HaarDetectObjects(RGB_img_mat, haarFace, cv.CreateMemStorage(), scale_factor=1.1, min_neighbors=10, flags=0, min_size=(50, 50)) # Get confidences if (allFaces != []): #print(allFaces) face_confid = [c for ((x, y, w, h), c) in allFaces] area = [w * h for ((x, y, w, h), c) in allFaces] #max_ind = np.argmax(face_confid) max_ind = np.argmax(area) FINAL_FACE = allFaces[max_ind] x0 = FINAL_FACE[0][0] y0 = FINAL_FACE[0][1] w = FINAL_FACE[0][2] h = FINAL_FACE[0][3] # Show detected face print('Face Detected!!') #cv.Rectangle(RGB_img_mat, (x0, y0), (x0+w, y0+h), cv.RGB(0,0,255), 2) # Detect eyes only in given face region print('Face: ' + str(FINAL_FACE)) cropped_img = RGB_img[y0:y0 + h, x0:x0 + w] #cv.Smooth(cropped_img, cropped_img, cv.CV_GAUSSIAN, 15, 15) #print(cv.GetSize(cropped_img)) #cv.ShowImage('crop', cropped_img) #cv.SaveImage('IMAGE.png', cropped_img) #allEyes = detect_eyes(cropped_img) #print('Eyes: '+str(allEyes)) #for eye in allEyes: # eye = eye[0] # eye=(eye[0]+x0, eye[1]+y0, eye[2], eye[3]) # cv.Rectangle(RGB_img, (eye[0], eye[1]), (eye[0]+eye[2], eye[1]+eye[3]), cv.RGB(255,0,0), 2) return np.asarray(cropped_img[:, :]) else: print('No Face!!') return RGB_img
def detect_and_draw(img, cascade, jpg_cnt): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if (cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "detection time = %gms" % (t / (cv.GetTickFrequency() * 10000)) if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) if jpg_cnt % 50 == 1: print('capture completed') cv.SaveImage('test_' + str(jpg_cnt) + '.jpg', img) print("aaa1") url = 'http://210.94.185.52:8080/upload.php' #files={ 'upfiles' : open('/home/lee/test_'+str(jpg_cnt)+'.jpg','rb')} files = { 'upfiles': open('/home/lee/test_' + str(jpg_cnt) + '.jpg', 'rb') } print("aaa2") r = requests.post(url, files=files) print("aaa3") print(r.text) for i in r.text.split(): try: op = float(i) break except: continue print(op) #LED if op >= 0.9: lock_on() else: print('no') cv.ShowImage("result", img)
def HaarDetect(image, casc): features = [] detected = cv.HaarDetectObjects(image, casc, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (100, 100)) if detected: for (x, y, w, h), n in detected: features.append((x, y, w, h)) return features
def track(img, threshold=100): '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100). Returns: (x,y) coordinates of centroid if found (-1,-1) if no centroid was found None if user hit ESC ''' cascade = cv.Load("haarcascade_frontalface_default.xml") gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) center = (-1, -1) faces = [] original_size_faces = [] #import ipdb; ipdb.set_trace() if (cascade): t = cv.GetTickCount() # HaarDetectObjects takes 0.02s faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) #cv.Rectangle(img, (x,y), (x+w,y+h), 255) # get the xy corner co-ords, calc the center location x1 = pt1[0] x2 = pt2[0] y1 = pt1[1] y2 = pt2[1] centerx = x1 + ((x2 - x1) / 2) centery = y1 + ((y2 - y1) / 2) center = (centerx, centery) scaled = ((x1, y1, x2 - x1, y2 - y1), n) original_size_faces.append(scaled) # print scaled # cv.NamedWindow(WINDOW_NAME, 1) # cv.ShowImage(WINDOW_NAME, img) # if cv.WaitKey(5) == 27: # center = None return (center, original_size_faces)
def OnPaint(self, evt): if not self.timer.IsRunning() : dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap, wx.BUFFER_VIRTUAL_AREA) dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0))) return # Capture de l'image frame = cv.QueryFrame(CAMERA) cv.CvtColor(frame, frame, cv.CV_BGR2RGB) Img = wx.EmptyImage(frame.width, frame.height) Img.SetData(frame.tostring()) self.bmp = wx.BitmapFromImage(Img) width, height = frame.width, frame.height # Détection des visages min_size = (20, 20) image_scale = 2 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.CreateImage((frame.width, frame.height), 8, 1) small_img = cv.CreateImage((cv.Round(frame.width / image_scale), cv.Round (frame.height / image_scale)), 8, 1) cv.CvtColor(frame, gray, cv.CV_BGR2GRAY) cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) listeVisages = cv.HaarDetectObjects(small_img, CASCADE, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # Affichage de l'image x, y = (0, 0) try: dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap, wx.BUFFER_VIRTUAL_AREA) try : dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0))) except : pass dc.Clear() dc.DrawBitmap(self.bmp, x, y) # Dessin des rectangles des visages if listeVisages : for ((x, y, w, h), n) in listeVisages : dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 2)) dc.DrawRectangle(x* image_scale, y* image_scale, w* image_scale, h* image_scale) self.listeVisages = listeVisages del dc del Img except TypeError: pass except wx.PyDeadObjectError: pass
def detect_and_draw(img, cascade, detected): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) image_scale = img.width / smallwidth small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if (cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # t = cv.GetTickCount() - t # print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.)) if faces: if detected == 0: # os.system('festival --tts hi &') detected = 1 for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[ 1] # find amount needed to pan/tilt span = (pt1[0] + pt2[0]) / 2 stlt = (pt1[1] + pt2[1]) / 2 mid = smallwidth / 2 if span < mid: print "left", mid - span else: print "right", span - mid #os.system('echo "6="' + str(valTilt) + ' > /dev/pi-blaster') #os.system('echo "7="' + str(valPan) + ' > /dev/pi-blaster') else: if detected == 1: #print "Last seen at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[1] #os.system('festival --tts bye &') status = "just disappeared" detected = 0 cv.ShowImage("result", img) return detected
def detect_and_draw(img, cascade, c): # allocate temporary images gray = cv.CreateImage((img.width, img.height), 8, 1) small_img = cv.CreateImage((cv.Round( img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) face_flag = False if (cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.)) if faces: face_flag = True for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # ある程度顔が検出されたら if c > 4: # 画像の保存 global counter counter = -1 d = datetime.today() datestr = d.strftime('%Y-%m-%d_%H-%M-%S') outputname = '/home/pi/fd/fd_' + datestr + '.jpg' cv.SaveImage(outputname, img) print 'Face Detect' # 読み込みと切り取り fimg = cv.LoadImage(outputname) fimg_trim = fimg[pt1[1]:pt2[1], pt1[0]:pt2[0]] outputname2 = '/home/pi/fd/face_' + datestr + '.jpg' cv.SaveImage(outputname2, fimg_trim) print 'Face Image Save' cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) cv.ShowImage("result", img) return face_flag
def detect_and_draw(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width,img.height), 8, 1) small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(cascade): t = cv.GetTickCount() faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.)) if faces: count = 0 stop = 1 name = 1 no = 1 dict = {} for num in range(14): dict[name] = no name += 1 print dict f = open('no.json','w') json.dump(dict,f) #for count in range(14): #time.sleep(stop) #count += 1 #print(count) #time.sleep(stop) #cv.PutText(img, "SAMPLE_TEXT", (0, 50), cv.CV_FONT_HERSHEY_PLAIN, cv.RGB(255, 255, 255)) #cv.PutText(img, "SAMPLE_TEXT", (0, 50), cv.CV_FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 2, cv.CV_AA ) for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints #for count in range(14): count += 1 print(count) pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) #count = count + 1 #print(count) # cv.putText(img, "SAMPLE_TEXT", (0, 50), FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 2, cv.CV_AA) cv.ShowImage("result", img)
def findface(image): #人脸识别,获取脸在图片中的坐标 grayscale = cv.CreateImage((image.width, image.height), 8, 1) cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY) cascade = cv.Load(OPCV_PATH+"/data/haarcascades/haarcascade_frontalface_alt_tree.xml") rect = cv.HaarDetectObjects(grayscale, cascade, cv.CreateMemStorage(), 1.015, 2,cv.CV_HAAR_DO_CANNY_PRUNING, (10,10)) result = [] for r in rect: result.append([(r[0][0], r[0][1]), (r[0][0]+r[0][2], r[0][1]+r[0][3])]) return result
def detect(img, cascade): rects = cv.HaarDetectObjects(img, cascade, cv.CreateMemStorage(), 1.1, 2, cv.CV_HAAR_DO_CANNY_PRUNING, (255, 255)) if len(rects) == 0: return [] result = [] for r in rects: result.append((r[0][0], r[0][1], r[0][0] + r[0][2], r[0][1] + r[0][3])) if result[0][2] > 300 and result[0][3] > 300: return result else: return []
def DetectRedEyes(image, faceCascade): min_size = (20, 20) image_scale = 2 haar_scale = 1.1 min_neighbors = 2 haar_flags = 0 # Allocate the temporary images gray = cv.CreateImage((image.width, image.height), 8, 1) smallImage = cv.CreateImage((cv.Round( image.width / image_scale), cv.Round(image.height / image_scale)), 8, 1) # Convert color input image to grayscale cv.CvtColor(image, gray, cv.CV_BGR2GRAY) # Scale input image for faster processing cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR) # Equalize the histogram cv.EqualizeHist(smallImage, smallImage) # Detect the faces faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) # If faces are found if faces: #print faces for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints #print "face" global line2 line2 = n pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) # print pt1 # print pt2 cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0) cv.PutText(image, "face" + str(h), pt1, font, cv.RGB(255, 0, 0)) cv.PutText(image, "Come close.", (0, 20), font, cv.RGB(255, 0, 0)) cv.PutText(image, "Ensure your forehead is well lit.", (0, 40), font, cv.RGB(255, 0, 0)) cv.PutText(image, "Hit escape when done.", (0, 60), font, cv.RGB(255, 0, 0)) cv.ResetImageROI(image) return image
def detect_and_draw(img, cascade): # allocate temporary images gray = cv.CreateImage((img.width,img.height), 8, 1) small_img = cv.CloneMat(img)# cv.CreateImage((img.width,img.height)) # (cv.Round(img.width / image_scale),cv.Round (img.height / image_scale)), 8, 1) # convert color input image to grayscale cv.CvtColor(img, gray, cv.CV_BGR2GRAY) # scale input image for faster processing cv.Resize(gray, small_img, cv.CV_INTER_LINEAR) cv.EqualizeHist(small_img, small_img) if(cascade): t = cv.GetTickCount() #Scan image and get an array of faces faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size) t = cv.GetTickCount() - t #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.)) if faces: for ((x, y, w, h), n) in faces: # the input to cv.HaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints pt1 = (int(x * image_scale), int(y * image_scale)) pt2 = (int((x + w) * image_scale), int((y + h) * image_scale)) cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0) print "X " , x if int(x * image_scale) > (img.width * 0.45): #print "X " , x #print steppera.IsTurning() if (steppera.IsTurning() == False): if (stepperInUse[STEPPERA] == True): sensor_value = "-4" if isNumeric(sensor_value): print "Moving to" , sensor_value steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0)) while (steppera.IsTurning() == True): cv.WaitKey(100) if int((x + w) * image_scale) < (img.width * 0.55): #print "X " , x #print steppera.IsTurning() if (steppera.IsTurning() == False): if (stepperInUse[STEPPERA] == True): sensor_value = "4" if isNumeric(sensor_value): print "Moving to" , sensor_value steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0)) while (steppera.IsTurning() == True): cv.WaitKey(100) cv.ShowImage("result", img)
def detect1(image, haar_feat, rect=None): #t0=time.time() rects = list() img = cv.fromarray(image) containers = cv.HaarDetectObjects(img, haar_feat, storage, 1.1, 2, cv.CV_HAAR_DO_CANNY_PRUNING, rect) if containers: for f in containers: rects.append( (f[0][0], f[0][1], f[0][0] + f[0][2], f[0][1] + f[0][3])) return rects else: return None