frame_width = 1920 frame_height = 1080 # Create capture cap = cv2.VideoCapture(4) # Set camera properties cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) cap.set(cv2.CAP_PROP_FPS, fps) # Create Window for stream display cv2.namedWindow('video_realtime', cv2.WINDOW_NORMAL) # Initialize face recognizer fr = FaceRecognition(True) if (not cap.isOpened()): print "could not open webcam" else: print("Warning: face representation are build fresh!!") print("Train with images from /facedatabase/images/*") print("ID and names provided by folder name") fr.svm = None fr.images = {} fr.people = {} fr.learnFromPics() while 1:
init_frame = np.zeros((1080, 1920, 3), np.uint8) #out.write(init_frame) out_cap.write(init_frame) FPS = 5 FPS_LAST_MAIN = 5 DISTANCE_TO_FACE = 42 #print("Calling subprocess to open gst_rtsp_server") BASE_DIR = os.path.dirname(__file__) + '/' #p = subprocess.Popen(['python', BASE_DIR + 'gst_rtsp_server.py']) #pp = subprocess.Popen(['python', BASE_DIR + 'webstream.py']) fr = FaceRecognition(False) cap = cv2.VideoCapture( "shmsrc socket-path=/tmp/camera_1m ! video/x-raw, format=BGR ,height=1920,width=1080,framerate=30/1 ! videoconvert ! video/x-raw, format=BGR ! appsink drop=true", cv2.CAP_GSTREAMER) #cap = cv2.VideoCapture("shmsrc socket-path=/tmp/camera_image ! video/x-raw, format=BGR ,height=1920,width=1080,framerate=30/1 ! videoconvert ! video/x-raw, format=BGR ! appsink drop=true", cv2.CAP_GSTREAMER) #cap = cv2.VideoCapture("shmsrc socket-path=/tmp/camera_image ! video/x-raw, format=BGR ,height=1920,width=1080,framerate=30/1 ! videoconvert ! video/x-raw, format=BGR ! appsink drop=true", cv2.CAP_GSTREAMER) #so_controller = ImageSMController("test") #so_controller.connectreceiver() def to_node(type, message): # convert to json and print (node helper will read from stdout) try: print(json.dumps({type: message})) except Exception:
frame_width = 1920 frame_height = 1080 # Create capture cap = cv2.VideoCapture(4) # Set camera properties cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) cap.set(cv2.CAP_PROP_FPS, fps) # Create Window for stream display cv2.namedWindow('video_realtime', cv2.WINDOW_NORMAL) # Initialize face recognizer fr = FaceRecognition(False) if (not cap.isOpened()): print("could not open webcam") else: while 1: ret, frame = cap.read() if ret: rot_frame = cv2.flip(np.rot90(frame, 1), 1) identies, identies_bb, confidences, new_frame, cap_image = fr.processframe( rot_frame) cv2.imshow('video_realtime', new_frame)
frame_width = 1920 frame_height = 1080 # Create capture cap = cv2.VideoCapture(3) # Set camera properties cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) cap.set(cv2.CAP_PROP_FPS, fps) # Create Window for stream display cv2.namedWindow('video_realtime', cv2.WINDOW_NORMAL) # Initialize face recognizer fr = FaceRecognition(True) if (not cap.isOpened()): print "could not open webcam" else: while 1: ret, frame = cap.read() if ret: identies, confidences, new_frame, cap_image = fr.processframe( frame) cv2.imshow('video_realtime', new_frame) if cv2.waitKey(1) & 0xFF == ord('q'):
frame_width = 1920 frame_height = 1080 # Create capture cap = cv2.VideoCapture(4) # Set camera properties cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) cap.set(cv2.CAP_PROP_FPS, fps) # Create Window for stream display cv2.namedWindow('video_realtime', cv2.WINDOW_NORMAL) # Initialize face recognizer fr = FaceRecognition(False) if (not cap.isOpened()): print "could not open webcam" else: fr.training = True fr.training_name = "Timm" fr.training_id = 13 number_of_training_images = 60 print("Starting Training for: " + fr.training_name + " with ID: " + str(fr.training_id)) print("Taking " + str(number_of_training_images) + "pictures") while 1: