コード例 #1
0
def blur_face(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock
    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # grab the dimensions of the frame and then construct a blob
        # from it
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
                                     (104.0, 177.0, 123.0))

        # pass the blob through the network and obtain the face detections
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the detection
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                # compute the (x, y)-coordinates of the bounding box for
                # the object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # extract the face ROI
                face = frame[startY:endY, startX:endX]

                # check to see if we are applying the "simple" face
                # blurring method
                if args["method"] == "simple":
                    face = anonymize_face_simple(face, factor=3.0)

                # otherwise, we must be applying the "pixelated" face
                # anonymization method
                else:
                    face = anonymize_face_pixelate(face, blocks=args["blocks"])

                # store the blurred face in the output image
                frame[startY:endY, startX:endX] = face

        # show the output frame
        # cv2.imshow("Frame", frame)
        # key = cv2.waitKey(1) & 0xFF
        with lock:
            outputFrame = frame.copy()
コード例 #2
0
    def predict(self, payload):
        # load the input image from disk, clone it, and grab the image spatial
        # dimensions
        img = imread(io.BytesIO(base64.b64decode(
            payload["base64"])))  # numpy array (width, hight, 3)
        image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        orig = image.copy()
        (h, w) = image.shape[:2]

        # construct a blob from the image
        blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
                                     (104.0, 177.0, 123.0))

        # pass the blob through the network and obtain the face detections
        print("[INFO] computing face detections...")
        self.net.setInput(blob)
        detections = self.net.forward()

        # loop over the detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with the
            # detection
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is greater
            # than the minimum confidence
            if confidence > args["confidence"]:
                # compute the (x, y)-coordinates of the bounding box for the
                # object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # extract the face ROI
                face = image[startY:endY, startX:endX]

                # check to see if we are applying the "simple" face blurring
                # method
                if args["method"] == "simple":
                    face = anonymize_face_simple(face, factor=3.0)

                # otherwise, we must be applying the "pixelated" face
                # anonymization method
                else:
                    face = anonymize_face_pixelate(face, blocks=args["blocks"])

                # store the blurred face in the output image
                image[startY:endY, startX:endX] = face

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        img = Image.fromarray(image)

        im_file = io.BytesIO()
        img.save(im_file, format="PNG")
        im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")

        return im_bytes
コード例 #3
0
ファイル: blur_face.py プロジェクト: tonhathuy/PTS-clone-API
def blur(img_name, load_folder, save_folder):
    load_path = os.path.join(load_folder, img_name)
    print(load_path)
    save_path = os.path.join(save_folder, img_name)

    print("[INFO] loading face detector model...")
    prototxtPath = "face_detector/deploy.prototxt"
    weightsPath = "face_detector/res10_300x300_ssd_iter_140000.caffemodel"
    net = cv2.dnn.readNet(prototxtPath, weightsPath)

    # load the input image from disk, clone it, and grab the image spatial
    # dimensions
    image = cv2.imread(load_path)
    orig = image.copy()
    (h, w) = image.shape[:2]

    # construct a blob from the image
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the face detections
    print("[INFO] computing face detections...")
    net.setInput(blob)
    detections = net.forward()

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with the
        # detection
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the confidence is greater
        # than the minimum confidence
        if confidence > 0.5:
            # compute the (x, y)-coordinates of the bounding box for the
            # object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # extract the face ROI
            face = image[startY:endY, startX:endX]

            face = anonymize_face_pixelate(face, blocks=10)

            # store the blurred face in the output image
            image[startY:endY, startX:endX] = face
    return save_path
コード例 #4
0
ファイル: app.py プロジェクト: rajeevnag/video_editor
def getFirstWidthHeight(cap, net):
    ret, frame = cap.read()
    if ret == False:
        #if done
        return -1, -1, -1, 1  #return nonreal width height image and code 1
    image = frame
    (h, w) = image.shape[:2]

    # construct a blob from the image
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the face detections
    net.setInput(blob)
    detections = net.forward()

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with the
        # detection
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the confidence is greater
        # than the minimum confidence
        if confidence > 0.5:
            # compute the (x, y)-coordinates of the bounding box for the
            # object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # extract the face ROI
            face = image[startY:endY, startX:endX]

            face = anonymize_face_pixelate(face, blocks=20)

            # store the blurred face in the output image
            image[startY:endY, startX:endX] = face

    # display the original image and the output image with the blurred
    # face(s) side by side
    # output = np.hstack([orig, image])
    # cv2.imshow("Output", output)
    # cv2.imshow("Output",image)
    # cv2.waitKey(0)
    return w, h, image, 0
コード例 #5
0
ファイル: main.py プロジェクト: pribaqnet/PixelFaceBot
def censura(imgpath):

	# VARIABLES
	face = "face_detector" # Métode de detecció facial
	method = "pixelated" # Tipus de censura: "simple" o "pixelated"
	blocks = 20 # Número de blocs en el mode pixelated
	preconfidence = "0.5" # Confidencialitat (Entre 0 i 1)

	# SCRIPT DE RECONEIXEMENT FACIAL
	prototxtPath = os.path.sep.join([face, "deploy.prototxt"])
	weightsPath = os.path.sep.join([face,"res10_300x300_ssd_iter_140000.caffemodel"])
	net = cv2.dnn.readNet(prototxtPath, weightsPath)
	image = cv2.imread(imgpath)
	orig = image.copy()
	(h, w) = image.shape[:2]
	blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),(104.0, 177.0, 123.0))
	net.setInput(blob)
	detections = net.forward()

	for i in range(0, detections.shape[2]):
		confidence = detections[0, 0, i, 2]
		if confidence > float(preconfidence):
			box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
			(startX, startY, endX, endY) = box.astype("int")

			# EXTREU LA CARA (ROI)
			face = image[startY:endY, startX:endX]

			# APICA LA CENSURA FACIAL
			if method == "simple":
				face = anonymize_face_simple(face, factor=3.0)
			else:
				face = anonymize_face_pixelate(face,
					blocks=blocks)

			# ENGANXA LA CARA CENSURADA A LA IMATGE
			image[startY:endY, startX:endX] = face

	# GUARDAR
	cv2.imwrite(imgpath, image)

	# LOG
	print("Nova imatge censurada!")
コード例 #6
0
		confidence = detections[0, 0, i, 2]

		
		if confidence > args["confidence"]:
			
			box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
			(startX, startY, endX, endY) = box.astype("int")

			
			face = frame[startY:endY, startX:endX]

			
			if args["method"] == "simple":
				face = anonymize_face_simple(face, factor=3.0)

			
			else:
				face = anonymize_face_pixelate(face,
					blocks=args["blocks"])

			frame[startY:endY, startX:endX] = face

	cv2.imshow("Frame", frame)
	key = cv2.waitKey(1) & 0xFF

	if key == ord("q"):
		break

cv2.destroyAllWindows()
vs.stop()