Пример #1
0
def video_stream():
    global video_camera
    global global_frame

    if video_camera == None:
        video_camera = VideoCamera()

    while True:
        frame = video_camera.get_frame()

        if frame != None:
            global_frame = frame
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
        else:
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + global_frame + b'\r\n\r\n')
Пример #2
0
def camera_handler(camera: VideoCamera):
    while True:
        frame, face = camera.get_frame()
        # frame, faces = camera.get_frames()
        # if hasattr(face, 'shape'):
        # if hasattr(faces, 'append'):
        #     [Detected_face.append(face) for face in faces]
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
Пример #3
0
def video_feed():
    return Response(gen(VideoCamera(source=-1)),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Пример #4
0
                (startX, startY) = (max(0, startX), max(0, startY))
                (endX, endY) = (min(w - 1, endX), min(h - 1, endY))
                # Preprocessing the live feed frame
                face = img[startY:endY, startX:endX]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)
                face = np.expand_dims(face, axis=0)
                # Predicting the frame from the loaded model
                (mask, withoutMask) = model.predict(face)[0]

                label = "Mask" if mask > withoutMask else "No Mask"
                color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

                label = "{}: {:.2f}%".format(label,
                                             max(mask, withoutMask) * 100)

                cv2.putText(img, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
                cv2.rectangle(img, (startX, startY), (endX, endY), color, 10)
        cv2.imshow("Frame", img)
        interrupt = cv2.waitKey(10)
        if interrupt & 0xFF == 27:
            break


# Calling the above function
gen(VideoCamera())
Пример #5
0
from Camera import VideoCamera
from logFiles import Log
from MessageFormat import Format
from Email import Emails
'''
Acknowledgements:
-   Hacker Shack
-   PyImageSearch blog posts
-   StackOverflow community 
'''
record = None
DateNTime = time.asctime(time.localtime(
    time.time()))  # Time stamp on the image
logAction = Log(DateNTime)  # logs: errors or successes
em = Emails(Format.message)  # getting the image captured
piVCam = VideoCamera(DateNTime, record)
previousTime = 0  # temporary storage of the time
app = Flask(__name__)  # creation of Webserver
Threading = False


def get_PersonInFrame():
    # Stop sending emails of person after 9 minutes
    global timeCaptured
    timeCaptured = time.time()
    threadLock = threading.Lock()
    print("Person in frame" + str(record))
    while True:
        while record != True:
            try:
                frame, found_person = piVCam.get_PersonInFrame()
Пример #6
0
def color_detect():
    return Response(detect(VideoCamera()),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Пример #7
0
def video_streamer():
    return Response(
        camera_handler(VideoCamera('F:\FSR\FERNLV\\AFW1.mp44')),
        # return Response(camera_handler(VideoCamera(1)),
        mimetype='multipart/x-mixed-replace; boundary=frame')
Пример #8
0
from flask import Flask, render_template, Response
from Camera import VideoCamera
'''
 Having problem with the webcam: Not retrieving any frame but the webcam is active. It takes 2 days and no clue.
  Better not to continue it. Start with QT and try to displaying the detected face
'''

app = Flask(__name__)

Detected_face = []
VID = VideoCamera(1)


def camera_handler(camera: VideoCamera):
    while True:
        frame, face = camera.get_frame()
        # frame, faces = camera.get_frames()
        # if hasattr(face, 'shape'):
        # if hasattr(faces, 'append'):
        #     [Detected_face.append(face) for face in faces]
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')


def display_face():
    while True:
        for image in Detected_face:
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n\r\n')