def produce_data_samp(raw_input_queue, detection_queue, time_control_queue, filename, resolution): if filename == "no_filename": cap = cv2.VideoCapture(0) else: cap = cv2.VideoCapture(filename) cap.set(3, resolution[0]) cap.set(4, resolution[1]) while True: process_start = time.time() # Read the frame valid, img = cap.read() if not valid: print("ERROR: Image Error on VideoCapture\n") log(time.asctime(), "produce_data_samp;", "No image input") break time_control_queue.put(time.time()) raw_input_queue.put(img) detection_queue.put(img) log(time.asctime(), "produce_data_samp;", "Raw image has been captured and stored") # time.sleep(0.01) # Stop if escape key is pressed k = cv2.waitKey(1) & 0xff if k == 27: break process_duration = time.time() - process_start log_process_duration("produce_data_samp", process_duration) # Release the VideoCapture object cap.release()
def modify_alt(processed_image_pipe_out, faces_pipe_out, time_control_pipe_out): # create window cv2.namedWindow('image_display', cv2.WINDOW_AUTOSIZE) start_time = time.time() mode = 1 number_of_seconds = 1 counter = 0 frame_number = 0 fps = "FPS: " while True: process_start = time.time() image = processed_image_pipe_out.recv() faces = faces_pipe_out.recv() for (x, y, w, h) in faces: if mode < 0: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) else: image = blur_fragment(image, x, y, w, h) counter += 1 if (time.time() - start_time) > number_of_seconds: fps = "FPS: " fps += str(round(counter / (time.time() - start_time))) counter = 0 start_time = time.time() cv2.putText(image, fps, (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.imshow('image_display', image) time_since_captured = time_control_pipe_out.recv() difference = time.time() - time_since_captured frame_number += 1 log_delay(frame_number, difference) log(time.asctime(), "modify_alt;", "Effect applied and image shown") k = cv2.waitKey(1) & 0xff if k == 27: cv2.destroyAllWindows() break elif k == 32: mode *= -1 process_duration = time.time() - process_start log_process_duration("modify_alt", process_duration) time_control_pipe_out.close() processed_image_pipe_out.close() faces_pipe_out.close()
def detect_faces_std(in_queue, out_queue, faces_queue): faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') while True: process_start = time.time() image = in_queue.get() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.1, 4) faces_queue.put(faces) out_queue.put(image) log(time.asctime(), "detect_faces_std;", "Face detection on frame completed") k = cv2.waitKey(1) & 0xff if k == 27: break process_duration = time.time() - process_start log_process_duration("detect_faces_std", process_duration)
def detect_faces_samp(queue, faces_queue, sampling_step): faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') counter = 0 while True: process_start = time.time() image = queue.get() if counter == 0: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.1, 4) log(time.asctime(), "detect_faces_samp;", "Face detection on frame completed") faces_queue.put(faces) counter += 1 counter %= sampling_step k = cv2.waitKey(1) & 0xff if k == 27: break process_duration = time.time() - process_start log_process_duration("detect_faces_samp", process_duration)
def detect_faces_alt(raw_image_pipe_out, processed_image_pipe_in, faces_pipe_in): faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') while True: process_start = time.time() image = raw_image_pipe_out.recv() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.1, 4) faces_pipe_in.send(faces) processed_image_pipe_in.send(image) log(time.asctime(), "detect_faces_alt;", "Face detection on frame completed") k = cv2.waitKey(1) & 0xff if k == 27: break process_duration = time.time() - process_start log_process_duration("detect_faces_alt", process_duration) raw_image_pipe_out.close() processed_image_pipe_in.close() faces_pipe_in.close()