Example #1
0
def process_video():
    """Process video and handle user commands."""
    # Feed from computer camera with threading
    cap = video.VideoStream(src=0, resolution=(200, 150)).start()

    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # Be sure to use the lower case
    out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (200, 150))
    while True:
        # Read image, anonymize, and wait for user quit.
        image = cap.read()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        nb_rects = anonymizer(image, gray)
        image = cv2.convertScaleAbs(image, alpha=1.0, beta=0)
        if nb_rects > 3:
            image = cv2.putText(image, 'GOOD', (10, 500), font, 5,
                                (255, 255, 255), 4, cv2.LINE_AA)
        else:
            image = cv2.putText(image, 'BAD', (10, 500), font, 5, (0, 0, 255),
                                4, cv2.LINE_AA)
        out.write(image)
        cv2.imshow("Realtime Anon", image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    out.release()
    cv2.destroyAllWindows()
    cap.stop()
Example #2
0
def facial_landmarks():

    # Feed from computer camera with threading
    cap = video.VideoStream(src=0).start()

    while True:
        # Getting out image by webcam
        image = cap.read()

        # Converting the image to gray scale
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        if face_detection_method == 'hog':
            hog_detector(image, gray)
        else:
            dl_detector(image, gray)

        # show the output frame
        # adding brightness and contrast -> α⋅p(i,j)+β where p(i.j) is pixel value for each point
        image = cv2.convertScaleAbs(image, alpha=1.0, beta=0)
        cv2.imshow("Facial Landmarks", image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    cap.stop()
Example #3
0
def stream(camera_url):
    print('[INFO] starting video stream...')

    if camera_url == None:
        print('[ERROR] video .....')

    vs = video.VideoStream(camera_url).start()
    vd = stream_processing(vs)
    return vd
Example #4
0
def face_detection_realtime():

    # Feed from computer camera with threading
    # total = video.count_frames(video_patpyth)
    cap = video.VideoStream(0).start()
    # cap = cv2.VideoCapture(video_path)

    count = 0
    while True:
        try:
            count += 1
            print(f"{count}")
            # Getting out image frame by webcam
            # _, img = cap.read()
            img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#ga29f34df9376379a603acd8df581ac8d7
            inputBlob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1,
                                              (300, 300), (104, 177, 123))

            detector.setInput(inputBlob)
            detections = detector.forward()
            img, age, gender = find_faces(img, detections, gray)

            # return stream video to client
            imgencode = cv2.imencode('.jpg', img)[1]
            stringData = imgencode.tostring()
            yield (b'--frame\r\n'
                   b'Content-Type: text/plain\r\n\r\n' + stringData + b'\r\n')
            yield age
            yield gender
            # cv2.imshow('img', img)
            # if cv2.waitKey(1) & 0xFF == ord("q"):
            #     break
        except:
            pass
    del (cap)

    cv2.destroyAllWindows()
    cap.stop()
def face_detection_realtime():

    # Feed from computer camera with threading
    cap = video.VideoStream(src=0).start()

    while True:

        # Getting out image frame by webcam
        img = cap.read()

        # https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#ga29f34df9376379a603acd8df581ac8d7
        inputBlob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1,
                                          (300, 300), (104, 177, 123))

        detector.setInput(inputBlob)
        detections = detector.forward()
        find_faces(img, detections)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cv2.destroyAllWindows()
    cap.stop()
def process_video():
    """Process video and handle user commands."""
    # Feed from computer camera with threading
    cap = video.VideoStream(src=0, resolution=(200, 150)).start()

    while True:
        # Read image, anonymize, and wait for user quit.
        image = cap.read()
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        nb_rects = anonymizer(image, gray)
        image = cv2.convertScaleAbs(image, alpha=1.0, beta=0)
        if nb_rects > 5:
            image = cv2.putText(image, 'GOOD', (10, 500), font, 5,
                                (255, 255, 255), 4, cv2.LINE_AA)
        else:
            image = cv2.putText(image, 'BAD', (10, 500), font, 5, (0, 0, 255),
                                4, cv2.LINE_AA)
        cv2.imshow("Realtime Anon", image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    cap.stop()
Example #7
0
    def run(self):
        print("started facial recognition")
        vs = video.VideoStream(src=0).start()
        match_found = False
        while not match_found and self.heartbeat():
            frame = vs.read()

            # raspberry pi camera sensor is flipped 180
            frame = cv2.rotate(frame, cv2.ROTATE_180)

            # convert from bgr to rgb then shrink to speed up detection
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            rgb = imutils.resize(rgb, width=self.RESIZE)

            # get bounding box coordinates for each face in image
            boxes = face_recognition.face_locations(
                rgb, model=self.DETECTION_METHOD)
            # compute facial embeddings for each face
            encodings = face_recognition.face_encodings(rgb, boxes)

            # look for user in faces found in video frame
            for encoding in encodings:
                # find how many user face encodings match with the captured face
                matches = face_recognition.compare_faces(
                    self.user_encodings, encoding)
                perc = matches.count(True) / len(matches)
                print("{} % match for user found".format(perc * 100))
                # matches with atleast 25% of user encodings
                if perc >= 0.25:
                    # match found
                    print("sending unlock request")
                    match_found = True
                    break
        if match_found:
            # execute callback
            self.callback()
        self.killed()
# that are viewing the stream)
output_frame = None
lock = threading.Lock()

# Get the relative path to this file (we will use it later)
FILE_PATH = os.path.dirname(os.path.realpath(__file__))

# initialize the facial recognizer
f_r = facial_recognition.FacialRecognizer()

# * ---------- Create App --------- *
app = Flask(__name__)
CORS(app, support_credentials=True)

# Initialize the video stream
video_stream = video.VideoStream(src=0).start()
time.sleep(2.0)

# # * ---------- DATABASE CONFIG --------- *
# DATABASE_USER = os.environ['DATABASE_USER']
# DATABASE_PASSWORD = os.environ['DATABASE_PASSWORD']
# DATABASE_HOST = os.environ['DATABASE_HOST']
# DATABASE_PORT = os.environ['DATABASE_PORT']
# DATABASE_NAME = os.environ['DATABASE_NAME']


def DATABASE_CONNECTION():
    try:
        return psycopg2.connect(
            user="******",
            password="",