def callback(image_msg): bridge = CvBridge(); cv2_image = bridge.imgmsg_to_cv2(image_msg,"bgr8") cv2_image = np.array(cv2_image,dtype=np.uint8) (bbox,detected_flag) = fd.faceDetect(cv2_image,classifier_xml_dir); cv2.imshow('image_subscriber',cv2_image) cv2.waitKey(1000/30); # rospy.loginfo(str(detected_flag)) print detected_flag
def execute_cb(self, goal): print 'goal rx' self.feedback_.busy_code = 0 face_found = 0 while not face_found: self.action_server_.publish_feedback(self.feedback_) print '[FD] wait for image' image_msg = rospy.wait_for_message("/usb_cam/image_raw", Image) print '[FD] Got image' cv_image_gray = image_msg_to_grayscale(image_msg) (bbox, face_found) = faceDetect.faceDetect(cv_image_gray, classifier_xml_dir) print('[FD] result ', face_found) self.result_.detected_gender = face_found self.action_server_.set_succeeded(self.result_)
import numpy as np import cv2 import faceDetect as fd #Create Video capture variable camera_id = 1 # 0 - default webcam, 1 - usb webcam cap = cv2.VideoCapture(camera_id) # cv2.namedWindow('image'); FPS = 30; while (True): #Capture frames retvar,img = cap.read() print img.shape # (480,640,3) print type(img) #Compute gist #gistfeat = computeGist(img) #Classify the image from the above gist #sceneClass = classifyMulticlass(svmModel,gistfeat) bbox,flag = fd.faceDetect(img) # print type(bbox) # print(bbox) print(flag) #Display the frame' # if flag: # cv2.rectangle(img,(bbox[0,1],bbox[0,1]),(bbox[0,2],bbox[0,3]),1) # cv2.imshow('image',img) cv2.waitKey(1000/FPS) cap.release() cv2.destroyAllWindows()
import cv2 import gorgeous import faceDetect import picKit import edge import copy img = cv2.imread('test.jpeg') height, width = img.shape[:2] img = picKit.resize(img, 500, width * 500 / height) faces = faceDetect.faceDetect(img) newimg = copy.copy(img) for (x, y, w, h) in faces: newimg = cv2.rectangle(newimg, (x, y), (x + w, y + h), (255, 0, 0), 2) edgeImage = edge.canny(img[(y - 20):(y + h + 20), (x - 20):(x + w + 20)]) image, contours, hierarchy = cv2.findContours(copy.copy(edgeImage), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) image = cv2.drawContours(edgeImage, contours, 1, (255, 255, 255), 3) cv2.imshow('edges', image) cv2.waitKey() cv2.imshow('image', newimg) cv2.waitKey() img = gorgeous.gorgeous(img, faces, 8) cv2.imshow('image2', img) cv2.waitKey()
#!/usr/bin/env python import numpy as np import cv2 import faceDetect as fd #Create Video capture variable camera_id = 1 # 0 - default webcam, 1 - usb webcam cap = cv2.VideoCapture(camera_id) # cv2.namedWindow('image'); FPS = 30 while (True): #Capture frames retvar, img = cap.read() print img.shape # (480,640,3) print type(img) #Compute gist #gistfeat = computeGist(img) #Classify the image from the above gist #sceneClass = classifyMulticlass(svmModel,gistfeat) bbox, flag = fd.faceDetect(img) # print type(bbox) # print(bbox) print(flag) #Display the frame' # if flag: # cv2.rectangle(img,(bbox[0,1],bbox[0,1]),(bbox[0,2],bbox[0,3]),1) # cv2.imshow('image',img) cv2.waitKey(1000 / FPS) cap.release() cv2.destroyAllWindows()