def test(request): # 获取 上传的 图片信息 img = request.FILES.get('img') # 获取上传图片的名称 img_name = img.name # 获取后缀 ext = os.path.splitext(img_name)[1] # 加时间防止重名 now = time.time() # 重新规定图片名称,图片类型 img_name = f'imgs{now}{ext}' # 图片保存路径 # img_path = os.path.join(settings.IMG_ROOT, img_name) BASE_DIR = Path(__file__).resolve().parent.parent img_path = os.path.join(os.path.join(BASE_DIR, "imgs/input"), img_name) # 写入 上传图片的 内容 with open(img_path, 'ab') as fp: # 如果上传的图片非常大, 那么通过 img对象的 chunks() 方法 分割成多个片段来上传 for chunk in img.chunks(): fp.write(chunk) face_detector = FaceDetector() print(img_path) img = cv2.imread(img_path) rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB) bboxes = face_detector.predict(rgb_img, 0.8) ann_img = annotate_image(img, bboxes) cv2.imwrite('C:/Users/Administrator/Desktop/HelloWorld/static/test1.png', ann_img) return render(request, 'face.html')
def Image_Detection_Recognition(image_path, database, FRmodel): img = cv2.imread(image_path) image = Image.open(image_path) image.save(image_path) bboxes = Detection_Faces(image_path) font = cv2.FONT_HERSHEY_SIMPLEX if not bboxes: StartingPoint_x = 0 StartingPoint_y = 0 FinalPoint_x = 0 FinalPoint_y = 0 Color = 0 thickness = 0 Name_Placer = "No one..." ann_img = annotate_image(img, bboxes) cv2.putText(ann_img, Name_Placer, (470, 70), font, 1, (0, 0, 0), 2) cv2.imshow("test", ann_img) cv2.waitKey(0) cv2.destroyAllWindows() else: ann_img = annotate_image(img, bboxes) StartingPoint_x, StartingPoint_y, FinalPoint_x, FinalPoint_y, Color, thickness = FaceDetection_to_Cv2Rectangles( bboxes) Crop_Image_to_Face(image_path, i) min_dist, identity = who_is_it(image_path, database, FRmodel) if min_dist > 0.96: Name_Placer = "Not Known" cv2.putText(ann_img, Name_Placer, (470, 70), font, 1, (0, 0, 0), 2) cv2.imshow("test", ann_img) print("Not in the database.") print(min_dist) cv2.waitKey(0) cv2.destroyAllWindows() else: print("it's " + str(identity) + ", the distance is " + str(min_dist)) Name_Placer = identity cv2.putText(ann_img, Name_Placer, (470, 70), font, 1, (0, 0, 0), 2) cv2.imshow("test", ann_img) cv2.waitKey(0) cv2.destroyAllWindows()
def facedect(self): face_detector = FaceDetector() self.progressbar.setValue(0) if self.videoname != None: cap = cv2.VideoCapture(self.videoname) frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) filename = self.videoname filename = filename.split('/') filename = filename[-1].split('.') fourcc = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter( '{0}/masked_{1}.avi'.format(self.outputpath, filename[0]), fourcc, 25.0, (frame_width, frame_height)) ret = True i = 0 frames = [] num_frame = 0 while ret: ret, frame = cap.read() if ret: frames.append(frame) num_frame += 1 step = 1 for i, frame in enumerate(frames): rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB) # Receives RGB numpy image (HxWxC) and # returns (x_center, y_center, width, height, prob) tuples. bboxes = face_detector.predict(rgb_img, 0.8) # Use this utils function to annotate the image. ann_img = annotate_image(frame, bboxes) out.write(ann_img) if (i / num_frame * 100) - step > 0: step += 1 print(step) self.progressbar.setValue(step) cap.release() out.release()
def process_video(vidfile): # start processing video input_movie = cv2.VideoCapture(vidfile) length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT)) # Write the resulting image to the output video file codec = int(input_movie.get(cv2.CAP_PROP_FOURCC)) fps = int(input_movie.get(cv2.CAP_PROP_FPS)) frame_width = int(input_movie.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(input_movie.get(cv2.CAP_PROP_FRAME_HEIGHT)) output_movie = cv2.VideoWriter('output.mp4', codec, fps, (frame_width, frame_height)) frame_num = 0 while frame_num < length: ret, frame = input_movie.read() frame_num += 1 if not ret: continue # bgr to rgb # rgb_img = frame[:, :, ::-1] rgb_img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB) # Receives RGB numpy image (HxWxC) and # returns (x_center, y_center, width, height, prob) tuples. bboxes = face_detector.predict(rgb_img, 0.7) # Use this utils function to annotate the image. frame = annotate_image(frame, bboxes) print("Writing frame {} / {}".format(frame_num, length)) output_movie.write(frame) # Show the image input_movie.release() output_movie.release() cv2.destroyAllWindows()
bg = np.zeros((map_height, map_width, 3)) for box in bboxes: actual_face_size = math.sqrt(box[2] * box[3]) # use area of the face size as a measure of distance distance = max_face_size - actual_face_size # closest faces should be lowest values x = box[0] / max_x # scale to 0-1 y = distance / max_y # scale to 0-1 # print(str(x) + ", " + str(y)) cv2.circle(bg, (int(map_width - (map_width * x)), int((map_height - map_height * y))), 8, (255, 255, 255), -1) if show_map: cv2.imshow('map', bg) if show_webcam: ann_frame = annotate_image(frame, bboxes) frame = cv2.resize(ann_frame, (640, 480)) / 255. cv2.imshow('annotated_webcam', frame) if print_fps: fps = (fps + (1. / (time.time() - t1))) / 2 print("fps = %f"%(fps)) if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() cv2.destroyAllWindows()
from faced.utils import annotate_image from time import process_time #___________________________________________________For Image______________________________________________________ face_detector = FaceDetector() img = cv2.imread("face_det.jpg") rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB) # Receives RGB numpy image (HxWxC) and # returns (x_center, y_center, width, height, prob) tuples. bboxes = face_detector.predict(rgb_img, 0.7) # Use this utils function to annotate the image. ann_img = annotate_image(img, bboxes) #save img cv2.imwrite('face_detd.jpg', ann_img) # Show the image cv2.imshow('Result', ann_img) cv2.waitKey(0) cv2.destroyAllWindows() #____________________________________________________For Video_______________________________________________________ video = 'Vid.mp4' cap = cv2.VideoCapture(video) face_detector = FaceDetector() frame_width = int(cap.get(3))
from faced import FaceDetector from faced.utils import annotate_image face_detector = FaceDetector() cap = cv.VideoCapture(0) while True: ret, frame = cap.read() if frame is None: print('Failed to capture camera') break rgb_img = cv.cvtColor(frame.copy(), cv.COLOR_BGR2RGB) bboxes = face_detector.predict(rgb_img) ann_img = annotate_image(rgb_img, bboxes) cv.imshow('image', ann_img) # img = cv2.imread(img_path) # rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB) # # # Receives RGB numpy image (HxWxC) and # # returns (x_center, y_center, width, height, prob) tuples. # bboxes = face_detector.predict(rgb_img) # # # Use this utils function to annotate the image. # ann_img = annotate_image(img, bboxes) # # # Show the image # cv2.imshow('image',ann_img) # cv2.waitKey(0)
# cv2.imshow('Video', ann_img) # # if cv2.waitKey(1) & 0xFF == ord('q'): # break # # # When everything is done, release the capture # video_capture.release() # cv2.destroyAllWindows() import cv2 from faced import FaceDetector from faced.utils import annotate_image face_detector = FaceDetector() img = cv2.imread("frame.jpg") rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB) # Receives RGB numpy image (HxWxC) and # returns (x_center, y_center, width, height, prob) tuples. bboxes = face_detector.predict(rgb_img) # Use this utils function to annotate the image. ann_img = annotate_image(img, bboxes) # Show the image cv2.imshow('image', ann_img) cv2.waitKey(0) cv2.destroyAllWindows()
def on_created(self, event): print(f'event type: {event.event_type} path : {event.src_path}') time.sleep(2) try: # the "on_created" event is called by a partially upload file # cut excess filename after '.png' #/var/nextcloud_data/c4p/files/camera_footage/Ko-retina.png.ocTransferId1983807786.part # /camera_footage/camera_1/raw_footage # /camera_footage/camera_1/anonymized_footage # todo : anonymizing more than 1 face # todo : put face over face sucessful_anonymization = False filetype = find_filetype(event.src_path) print("filetype", filetype) path_to_file = event.src_path.split(filetype, 1)[0] + filetype print("path to file", path_to_file) camera_folder = get_camera_folder(path_to_file) print("camera_id", camera_folder) picture_id = get_picture_id(path_to_file, camera_folder, filetype) print("picture_id", picture_id) an_path = get_path_for_anonymous_pic(anonymous_folder, camera_folder, picture_id, filetype) print("path to anonymous file", an_path) face_detector = FaceDetector() print("reading image", path_to_file) img = cv2.imread(path_to_file) rgb_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2RGB) if thresh: bboxes = face_detector.predict(rgb_img, thresh) else: bboxes = face_detector.predict(rgb_img) # anonymize if not bboxes == []: try: print("bboxes containing face", bboxes) print("creating anonymous picture") ann_img = annotate_image(img, bboxes) print("write anonymized version to anonymous folder") cv2.imwrite(an_path, ann_img) sucessful_anonymization = True except Exception as ex: print(ex) print("Anonymizing failed") print("writing anonymized version failed") sucessful_anonymization = False # delete original if sucessfully anonymized if sucessful_anonymization: if os.path.exists(path_to_file): os.remove(path_to_file) else: print("Tried deleting, but the file does not exist", path_to_file) # no faces found, picture is already anonymous else: print("no face found") if os.path.exists(path_to_file): os.rename(path_to_file, an_path) print("refreshing owncloud") subprocess.call(cwd + "/refresh_nextcloud.sh", shell=True) except Exception as e: print(e) print("Anonymizing failed")
import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logging.basicConfig(format='%(asctime)s %(message)s') # for local testing model_path = "../faced/models/" face_detector = FaceDetector() img_path = "faces.jpg" thresh = 0.8 bgr_img = cv2.imread(img_path) rgb_img = cv2.cvtColor(bgr_img.copy(), cv2.COLOR_BGR2RGB) # Receives RGB numpy image (HxWxC) and # returns (x_center, y_center, width, height, prob) tuples. logger.info('starting face detection ...') start_time = time() face_detection_list = face_detector.predict(frame=rgb_img, thresh=thresh) detected_faces = len(face_detection_list) end_time = time() duration = end_time - start_time logger.info(f'face detection took {duration:.2f} seconds') logger.info(f'found boxes: {detected_faces}') increase_box_percentage = 0.5 for i in range(detected_faces): logger.info(face_detection_list[i]) ann_img = utils.annotate_image(bgr_img, face_detection_list) cv2.imwrite("found_boxes.jpg", ann_img)