Example #1
0
import time
import cv2
from imutils.video import FPS

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p",
                "--picamera",
                type=int,
                default=1,
                help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())

# initialize the video stream and allow the cammera sensor to warmup
vs = VideoStream(usePiCamera=args["picamera"] > 0,
                 resolution=(320, 240),
                 framerate=60).start()
time.sleep(2.0)
fps = FPS().start()

# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of X pixels
    frame = vs.read()

    # No need to resize if the stream is already the correct size...
    #frame = imutils.resize(frame, width=320)

    # Rotate (we could do that during read to improve performance)
    #	frame = imutils.rotate( frame, 180 )
Example #2
0
def main(prototxt, model, min_confidence=0.5):
    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(prototxt, model)

    # initialize the video stream, allow the cammera sensor to warmup,
    # and initialize the FPS counter
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    fps = FPS().start()

    # NN: open a csv file to write data; 'a' to append and not overwrite
    path_to_data = "../data/output"
    results = DataHandler(measure="persons", path=path_to_data, method='csv')
    results.makefile()

    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and
        # predictions
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > min_confidence:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])

                # if the predicted class label is in the set of classes
                # we want to ignore then skip the detection
                if CLASSES[idx] in IGNORE:
                    continue

                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = CLASSES[idx]
                prediction = "{}: {:.2f}%".format(label, confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, prediction, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

                # NN: write to output file
                now = int(time.time())
                data = "{},{},{},{:.2f},{},{},{},{}".format(
                    now, label, i, confidence, startX, startY, endX, endY)
                results.write(data)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # NN: close the outfile
    results.close()

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #3
0
# MobileNet SSD Classes
CLASSES = [
    "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
    "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
    "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]

print("load the model")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# dict.get(key, return value if the specified key doesn't exit)
# which means if "input" is None, it will return False
if not args.get("input", False):
    print("starting video stream")
    vs = VideoStream(src=0).start()
    # webcam warmup time
    time.sleep(2.0)
else:
    print("opening the specified video file")
    vs = cv2.VideoCapture(args["input"])

writer = None
W = None
H = None

cent_tracker = CentroidTracker(max_frames_to_disappear=40, max_distance=50)
trackers = []
trackable_objs_dict = {}

total_frames = 0
Example #4
0
    return img_gamma_corrected


# inicializa las librerías de reconocimiento facial
print("[INFO] cargando librería de detección facial...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

# extrae los índices de correspondientes a los ojos y la boca
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

# iniciar la captura de video
print("[INFO] inciando video...")
vs = VideoStream(0).start()
time.sleep(1.0)

# procesar los frames recibidos desde la cámara
while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=450)
    frame = gammaCorrection()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # detecta las caras en la imagen
    rects = detector(gray, 0)

    # procesa cada cara reconocida, identificando los puntos de referencia
    # y extrayendo las áreas de los ojos y boca
    for rect in rects:
Example #5
0
def startup_event():
    global vs
    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] camera sensor warming up...")
    vs = VideoStream(usePiCamera=usePiCamera, resolution=(960, 720)).start()
    time.sleep(2.0)
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
args = vars(ap.parse_args())

# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (37, 88, 69)
greenUpper = (158, 215, 255)
pts = deque(maxlen=args["buffer"])

# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
    vs = VideoStream(src=0, usePiCamera=True, framerate=30).start()

# otherwise, grab a reference to the video file
else:
    vs = cv2.VideoCapture(args["video"])

GPIO.setmode(GPIO.BCM)
TRIG = 23
ECHO = 24
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)

# allow the camera or video file to warm up
time.sleep(2.0)

# keep looping
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model',
        help=
        'Path of the detection model, it must be a SSD model with postprocessing operator.',
        required=True)
    parser.add_argument('--label', help='Path of the labels file.')
    parser.add_argument(
        '--keep_aspect_ratio',
        dest='keep_aspect_ratio',
        action='store_true',
        help=
        ('keep the image aspect ratio when down-sampling the image by adding '
         'black pixel padding (zeros) on bottom or right. '
         'By default the image is resized and reshaped without cropping. This '
         'option should be the same as what is applied on input images during '
         'model training. Otherwise the accuracy may be affected and the '
         'bounding box of detection result may be stretched.'))
    parser.set_defaults(keep_aspect_ratio=False)
    args = parser.parse_args()

    EYE_ASPECT_RATIO_THRESHOLD = 0.3
    #Minimum consecutive frames for which eye ratio is below threshold for alarm to be triggered
    EYE_ASPECT_RATIO_CONSEC_FRAMES = 50

    #COunts no. of consecutuve frames below threshold value
    COUNTER = 0

    # Initialize engine.
    print("[INFO] loading Coral model...")
    engine = DetectionEngine(args.model)
    labels = dataset_utils.read_label_file(args.label) if args.label else None

    # initialize the video stream and allow the camera sensor to warmup
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    #vs = VideoStream(usePiCamera=True).start()
    time.sleep(2.0)

    print("[INFO] loading shape predictor dlib model...")
    ### Inserting code for detecting face landmarks here
    predict = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    print("[INFO] loaded shape predictor dlib model...")

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 500 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=500)
        orig = frame.copy()

        # prepare the frame for object detection by converting (1) it
        # from BGR to RGB channel ordering and then (2) from a NumPy
        # array to PIL image format
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = Image.fromarray(frame)

        # make predictions on the input frame
        start = time.time()
        print("Face detection started at: ", start)
        results = engine.DetectWithImage(frame,
                                         threshold=0.3,
                                         keep_aspect_ratio=True,
                                         relative_coord=False)
        #print(results)
        end = time.time()
        print("Face detection ended at: ", end)

        # loop over the results
        for r in results:
            # extract the bounding box and box and predicted class label
            box = r.bounding_box.flatten().astype("int")

            (startX, startY, endX, endY) = box

            # draw the bounding box on the image
            cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)
            y = startY - 15 if startY - 15 > 15 else startY + 15
            text = "{:.2f}%".format(r.score * 100)
            cv2.putText(orig, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 255, 0), 2)

            left = box[0]
            top = box[1]
            right = box[2]
            bottom = box[3]

            box1 = dlib.rectangle(int(left), int(top), int(right), int(bottom))

            shape = predict(orig, box1)
            shape = face_utils.shape_to_np(shape)

            #Get array of coordinates of leftEye and rightEye
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]

            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            frame = np.array(frame)

            #Use hull to remove convex contour discrepencies and draw eye shape around eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            #Detect if eye aspect ratio is less than threshold
            if (ear < EYE_ASPECT_RATIO_THRESHOLD):
                COUNTER += 1
                #If no. of frames is greater than threshold frames,
                if COUNTER >= EYE_ASPECT_RATIO_CONSEC_FRAMES:
                    cv2.putText(frame, "You are Drowsy!", (150, 200),
                                cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
            else:
                COUNTER = 0

        # show the output frame and wait for a key press
        frame = np.array(frame)
        cv2.imshow('Frame', frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #8
0
def predict():
    shape_predictor = 'shape_predictor_68_face_landmarks.dat'
    cascade = 'haarcascade_frontalface_default.xml'
    # construct the argument parser and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-n", "--name", required=True,
    # 	help="path to output directory")
    # args = vars(ap.parse_args())

    # load OpenCV's Haar cascade for face detection from disk
    detector = cv2.CascadeClassifier(cascade)
    # for detecting facial features
    detector2 = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(shape_predictor)

    print("Starting video stream...")
    vs = VideoStream(src=0).start()

    time.sleep(2.0)
    total = 0
    time_to_recognize = 0

    # coordinates of eyes and mouth
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

    directory = args["name"]
    parent_dir = "./dataset/"

    # Path
    path = os.path.join(parent_dir, directory)

    # create a directory of the person
    try:
        os.mkdir(path)
    except:
        pass

    # loop over the frames from the video stream
    while True:

        time_to_recognize += 1

        frame = vs.read()
        orig = frame.copy()
        frame = imutils.resize(frame, width=2000)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = detector2(gray, 0)

        for rect in rects:
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]

            mouth = shape[mStart:mEnd]

            mouthHull = cv2.convexHull(mouth)
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            # draw
            cv2.drawContours(frame, [mouthHull], -1, (211, 211, 211), 1)
            cv2.drawContours(frame, [leftEyeHull], -1, (211, 211, 211), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (211, 211, 211), 1)

        cv2.putText(frame, "Move your head slowly", (30, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 1.4, (0, 0, 255), 2)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # To collect images
        if time_to_recognize > 50:
            break

        elif time_to_recognize <= 50:
            p = os.path.sep.join([path, "{}.png".format(str(total).zfill(5))])
            cv2.imwrite(p, orig)
            total += 1

    # do a bit of cleanup
    print("{} face images stored".format(total))

    cv2.destroyAllWindows()
    vs.stop()
    "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
    "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
    "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe("MobileNetSSD_deploy.prototxt.txt",
                               "MobileNetSSD_deploy.caffemodel")

# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
vs = VideoStream(
    "udp://@0.0.0.0:11111?overrun_nonfatal=1&fifo_size=50000000").start()
time.sleep(2.0)
fps = FPS().start()
isExecuted = False
isDone = False

# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
def rechCodeBarre(): 
	# construct the argument parser and parse the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-o", "--output", type=str, default="barcodes.csv",
		help="path to output CSV file containing barcodes")
	args = vars(ap.parse_args())

	# initialize the video stream and allow the camera sensor to warm up
	print("[INFO] starting video stream...")
	# vs = VideoStream(src=0).start()
	vs = VideoStream(usePiCamera=True).start()
	time.sleep(2.0)
	 
	# open the output CSV file for writing and initialize the set of
	# barcodes found thus far
	csv = open(args["output"], "w")
	found = set()

	# loop over the frames from the video stream
	a = True
	while a:
		# grab the frame from the threaded video stream and resize it to
		# have a maximum width of 400 pixels
		frame = vs.read()
		frame = imutils.resize(frame, width=400)
	 
		# find the barcodes in the frame and decode each of the barcodes
		barcodes = pyzbar.decode(frame)
		
		# loop over the detected barcodes
		for barcode in barcodes:
			# extract the bounding box location of the barcode and draw
			# the bounding box surrounding the barcode on the image
			(x, y, w, h) = barcode.rect
			cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
	 
			# the barcode data is a bytes object so if we want to draw it
			# on our output image we need to convert it to a string first
			barcodeData = barcode.data.decode("utf-8")
			barcodeType = barcode.type
	 
			# draw the barcode data and barcode type on the image
			text = "{} ({})".format(barcodeData, barcodeType)
			cv2.putText(frame, text, (x, y - 10),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
	 
			# if the barcode text is currently not in our CSV file, write
			# the timestamp + barcode to disk and update the set
			if barcodeData not in found:
				csv.write("{},{}".format(datetime.datetime.now(),
					barcodeData))
				csv.flush()
				found.add(barcodeData)

		# show the output frame
		# cv2.imshow("Barcode Scanner", frame)
		# key = cv2.waitKey(1) & 0xFF
	 
		# if the `q` key was pressed, break from the loop
		# if key == ord("q"):
		#
		a = False

	# close the output CSV file do a bit of cleanup
	print("[INFO] cleaning up...")
	csv.close()
	cv2.destroyAllWindows()
	vs.stop()
Example #11
0
def add_user(usuario, depto, mail, debt):
    # Abre el archivo de datos de usuario.
    wb = openpyxl.load_workbook("data/info.xlsx")
    ws = wb.active
    encontrado = False
    act_data = True
    capturar = True
    actualizar = False

    # Recorre el archivo y lo imprime fila por fila.
    for i in range(1, ws.max_row):
        usr_cell = ws.cell(row=i, column=1).value
        dep_cell = ws.cell(row=i, column=2).value
        mail_cell = ws.cell(row=i, column=3).value
        debt_cell = ws.cell(row=i, column=4).value

        # En caso de que el usuario a registrar ya se encuentre en la base de datos.
        if (usr_cell == usuario and dep_cell == depto):
            encontrado = True
            if verbose: print("USUARIO YA SE ENCUENTRA REGISTRADO")
            actualizar_datos = messagebox.askyesno(
                'Usuario ya registrado',
                'Usuario ya registrado\n ¿Desea actualizar las deudas y correos del usuario?'
            )
            actualizar = True if actualizar_datos else False
            actualizar_foto = messagebox.askyesno(
                'Usuario ya registrado',
                'Usuario ya registrado\n ¿Desea actualizar la imagen del usuario?'
            )
            capturar = True if actualizar_foto else False

    # Capturar de imagen de usuario.
    if capturar:
        messagebox.showwarning(
            'Captura fotográfica',
            'Presionar Espacio para capturar, "ESC" para salir.')
        video = True
        #vs = VideoStream(src=0).start()
        # Inicia feed de video.
        vs = VideoStream(0).start()
        cv2.startWindowThread()
        cv2.namedWindow("REGISTRAR USUARIO")

        # Pausa para el inicio correcto del sensor de imagen.
        time.sleep(0.5)

        while video:
            # Variable que determina si existe una cara detectada.
            cara = False
            frame = vs.read()
            # Reflejar la imagen horizontalmente (espejo).
            frame = cv2.flip(frame, 1)
            picture = frame.copy()
            # Escalar la imagen a 1/4 de la original
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

            rgb_small_frame = small_frame[:, :, ::-1]
            face_locations = face_recognition.face_locations(rgb_small_frame)

            for (top, right, bottom, left) in face_locations:
                top *= 4
                right *= 4
                bottom *= 4
                left *= 4
                picture = picture[top:bottom, left:right]

                # Proporción utilizada para desplazar los cuadros de texto respecto al cuadro que encierra la cara.
                proporcion = 8
                borde = proporcion
                escala = proporcion / 10
                salto = proporcion * 5
                ancho = int((right - left) / proporcion)
                pos = bottom + 20
                blanco = (255, 255, 255)
                verde = (0, 255, 200)
                azul = (0, 100, 255)
                rojo = (255, 0, 100)
                aceptado = azul
                denegado = rojo

                # Dibujar cuadrado.
                cv2.rectangle(frame, (left, top), (right, bottom),
                              (250, 100, 0), 2)

                # Dibujar etiqueta con nombre.
                with suppress(Exception):
                    ps.putBText(frame,
                                str(usuario),
                                text_offset_x=left + ancho,
                                text_offset_y=pos,
                                vspace=borde,
                                hspace=borde,
                                font_scale=escala,
                                background_RGB=aceptado,
                                text_RGB=blanco)

                # Dibujar etiqueta con departamento.
                with suppress(Exception):
                    ps.putBText(frame,
                                "Depto. " + str(depto),
                                text_offset_x=left + ancho,
                                text_offset_y=pos + 50,
                                vspace=borde,
                                hspace=borde,
                                font_scale=escala,
                                background_RGB=aceptado,
                                text_RGB=blanco)

                if len(face_locations) == 1:
                    cara = True

            frame_show = frame.copy()
            # Texto de cierre de programa.
            ps.putBText(frame_show,
                        '"ESC" para salir',
                        text_offset_x=50,
                        text_offset_y=frame_show.shape[0] - 50,
                        vspace=10,
                        hspace=10,
                        font_scale=1.0,
                        background_RGB=(228, 225, 222),
                        text_RGB=(1, 1, 1))

            # Texto de captura de imagen.
            ps.putBText(frame_show,
                        '"Espacio" para capturar',
                        text_offset_x=frame_show.shape[1] - 450,
                        text_offset_y=frame_show.shape[0] - 50,
                        vspace=10,
                        hspace=10,
                        font_scale=1.0,
                        background_RGB=(228, 225, 222),
                        text_RGB=(1, 1, 1))
            frame_show = cv2.resize(frame_show, (0, 0), fx=ventana, fy=ventana)

            cv2.imshow('REGISTRAR USUARIO', frame_show)
            #cv2.startWindowThread()

            key = cv2.waitKey(1) & 0xFF

            # Presionar "c" para capturar imagen
            #if key == ord('c') and cara == True:
            if (key == ord(' ') or key == ord('c')) and cara == True:
                cv2.imwrite('data/dataset/' + usuario + "-" + depto + '.jpg',
                            picture)
                if verbose: print("IMAGEN CAPTURADA")
                cv2.destroyAllWindows()
                #VideoStream(0).stop()
                vs.stop()
                video = False
                #messagebox.showwarning('Captura fotográfica','Imagen capturada')
                break

            # Presionar "q" para salir
            #if key == ord("q")
            if key == 27:
                if verbose: print("ENROLAMIENTO CANCELADO")
                cv2.destroyAllWindows()
                #VideoStream(0).stop()
                vs.stop()
                video = False
                act_data = False
                print('\007')
                messagebox.showwarning('Captura fotográfica',
                                       'Enrolamiento Cancelado')
                break

    if ((not encontrado and act_data) or actualizar):
        if actualizar:
            for i in range(1, ws.max_row):
                usr_cell = ws.cell(row=i, column=1).value
                dep_cell = ws.cell(row=i, column=2).value
                if (usr_cell == usuario and dep_cell == depto):
                    encontrado = True
                    ws.delete_rows(i, 1)
                    wb.save('data/info.xlsx')
        ws.insert_rows(2)
        ws.cell(row=2, column=1).value = usuario
        ws.cell(row=2, column=2).value = depto
        now = time.time()
        tiempo = time.localtime(now)
        time_log = time.strftime("%Y/%m/%d, %H:%M:%S", tiempo)
        ws.cell(row=2, column=5).value = time_log
        try:
            ws.cell(row=2, column=3).value = int(mail)
            ws.cell(row=2, column=4).value = int(debt)
        except:
            ws.cell(row=2, column=3).value = 0
            ws.cell(row=2, column=4).value = 0
        wb.save('data/info.xlsx')
        messagebox.showwarning('Agregar usuario', 'Usuario Agregado')
        if verbose: print("USUARIO AGREGADO A LA BASE DE DATOS")
        if auto_train: train()
Example #12
0
			ptsB = np.float32([kpsB[i] for (i, _) in matches])

			# compute the homography between the two sets of points
			(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
				reprojThresh)

			# return the matches along with the homograpy matrix
			# and status of each matched point
			return (matches, H, status)

		# otherwise, no homograpy could be computed
		return None

# initialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
leftStream = VideoStream(0).start()
rightStream = VideoStream(1).start()
time.sleep(2.0)

# initialize the image stitcher, motion detector, and total
# number of frames read
stitcher = Stitcher()
total = 0

# loop over frames from the video streams
while True:
	# grab the frames from their respective video streams
	left = leftStream.read()
	right = rightStream.read()

	# resize the frames
    def detect(self):

        # construct the argument parser and parse the arguments
        ap = argparse.ArgumentParser()
        ap.add_argument("-d", "--delay", type=float,
                        help="Amount of time in seconds to dzzelay the program before starting.")
        ap.add_argument("-m", "--min-area", type=int, default=500,
                        help="Minimum area in pixels difference to be considered actual motion.")
        ap.add_argument("-t", "--thresh", default=25, type=int,
                        help="Level of threshold intensity.")
        ap.add_argument("-v", "--video-path",
                        help="Path to video file. If not provided the default video recording device on your system will be used.")
        args = vars(ap.parse_args())

        print("Program starting.\n")

        if args.get("video_path", None) is not None:
            try:
                print("Attempting to access the video at path: {}".format(args["video_path"]))
                video_stream = cv2.VideoCapture(args["video_path"])
                print("Successfully accessed video.")
            except:
                print("Could not access the specified video. Please make sure you are "
                      + "providing an absolute path to file.")
        else:
            try:
                print("Attempting to access the default video recording device.")
                video_stream = VideoStream(src=0).start()
                time.sleep(2.0)
                print("Successfully connected to the default recording device.")
            except:
                print("Could not access the default recording device. Please make sure "
                      + "you have a device capable of recording video configured on your system.")
        print()

        if args.get("delay", None) is not None:
            print("Starting delay of: {} seconds".format(args["delay"]))
            time.sleep(args["delay"])
            print("Delay complete.")

        # Init variable to hold first frame of video. This will be used as a reference.
        # The motion detection algorithm utilizes the background of the initial frame
        # to compare all consecutive frames to in order to detect motion
        initial_frame = None

        print("Starting motion detection")
        print("Enter 'q' at any time to terminate")

        first_frame = 0

        # gethering the resource for the voice massege

        engine = pyttsx3.init('sapi5')
        voices = engine.getProperty("voices")
        print(voices)
        engine.setProperty('voice', voices[1].id)

        # define the voices to be speak
        def speak(audio):
            engine.say(audio)
            engine.runAndWait()
            time.sleep(3)

        # define the video formate which are to be save
        filename = 'video.avi'
        frames_per_seconds = 20.0
        my_res = '740'

        # formating the frame to be save
        def change_res(cap, width, height):
            cap.set(5, width)
            cap.set(6, height)

        # saving video quality or formate
        STD_DIMENSIONS = {
            "480p": (640, 480),
            "720p": (1280, 720),
            "1080": (1920, 1080),
            "4k": (3840, 2160)
        }

        # function for the selection the formate to be define
        def get_dim(cap, res=('1080p')):
            width, height = STD_DIMENSIONS['480p']
            if res in STD_DIMENSIONS:
                width, height = STD_DIMENSIONS[res]
            change_res(cap, width, height)
            return width, height

        # define the video type on the storage or saving formate
        VIDEO_TYPE = {
            'avi': cv2.VideoWriter_fourcc(*'XVID'),
            # 'mp4': cv2.VideoWriter_fourcc(*'H264'),
            'mp4': cv2.VideoWriter_fourcc(*'XVID'),
        }

        # seving file and formate selection
        def get_video_type(filename):
            filename, ext = os.path.splitext(filename)
            if ext in VIDEO_TYPE:
                return VIDEO_TYPE[ext]
            return VIDEO_TYPE['avi']

        # acces the camera for the inpute
        cap = cv2.VideoCapture(0)
        dims = get_dim(cap, res=my_res)
        out = cv2.VideoWriter(filename, get_video_type(filename), 20, get_dim(cap, my_res))
        dims = get_dim(cap, res=my_res)

        # baground subltraction for the motion detection on the frame
        fgbg = cv2.createBackgroundSubtractorMOG2(300, 400, True)

        # initilazing the parameter
        frameCount = 0
        currentframe = 0
        count = 0

        while True:

            # Set initial status to vacant.
            status = 'Area vacant.'

            # Grab current frame
            frame = video_stream.read()
            frame = frame if args.get("video_path", None) is None else frame[1]

            # If frame is none we have reached the end of the video
            if frame is None:
                break

                # Preprocess frame:
                # Resize to have a width of 500px. Improves speed without sacrificing accuracy
            frame = imutils.resize(frame, width=600)
            # Convert to grayscale as the background subtraction algorithm utilizes
            # black & white pixel data
            grayscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Apply guassian blur to smooth out image data and reduce irrelevant misleading
            # data from noise, scratches, etc.
            blurred_frame = cv2.GaussianBlur(grayscale_frame, (21, 21), 0)

            if initial_frame is None:
                initial_frame = grayscale_frame
                continue

            # Calculate the absolute difference between the current frame and the comparison

            # print(frame)

            def motion():
                grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # cv2.imshow('Original', frame)
                # cv2.imshow('frame', frame)
                count = 0

                # capture 6 photos if moving object are arais in the servilent

                while (count < 6):
                    cv2.imwrite("frame" + str(count) + ".jpg", grey)
                    count = count + 1
                    time.sleep(1)

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # check the initial frame or first frame has face or not
            face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
            faces = face_cascade.detectMultiScale(gray, scaleFactor=2.0, minNeighbors=5)
            for (x, y, w, h) in faces:
                # print(x, y, w, h)
                rol_gray = gray[y:y + h, x:x + w]
                rol_color = frame[y:y + h, x:x + h]
                img_item = "my-image.png"
                cv2.imwrite(img_item, rol_gray)
                cv2.imwrite(img_item, rol_color)

            #  Check if a current frame actually exist

            frameCount += 1
            # Resize the frame
            resizedFrame = cv2.resize(frame, (0, 0), fx=1, fy=1)

            # Get the foreground mask
            fgmask = fgbg.apply(resizedFrame)

            # Count all the non zero pixels within the mask
            count = np.count_nonzero(fgmask)

            # print('Frame: %d, Pixel Count: %d' % (frameCount, count))

            # Determine how many pixels do you want to detect to be considered "movement"
            # if (frameCount > 1 and cou`nt > 5000):

            # text in the frame for the detecting the object is moving or not

            if (frameCount > 1 and count > 1000):

                myframe("YES")



                # text in the frame for the detecting the object is moving or note

                # speak('object is moving')

                # motion()

                print('object are moving')

                # if object has largaer then the threshold value then show object is moving in the grame
                cv2.putText(resizedFrame, 'object is moving', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
                            cv2.LINE_AA)

                # load the datebet for the object face detection on the servelent camera

                # faces = face_cascade.detectMultiScale(resizedFrame, scaleFactor=2.0, minNeighbors=5)
                face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_default.xml')
                # face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_upperbody.xml')
                eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye_tree_eyeglasses.xml')

                # change the frame into gray scale ande set the flage for the face is present or not

                gray = cv2.cvtColor(resizedFrame, cv2.COLOR_BGR2GRAY)
                flage = 0

                # set the sceler factor  and no of neighbors of the face valu

                faces = face_cascade.detectMultiScale(
                    resizedFrame,
                    scaleFactor=1.3,
                    minNeighbors=7,
                    # Min size for valid detection, changes according to video size or body size in the video.
                    flags=cv2.CASCADE_SCALE_IMAGE

                )

                for (x, y, w, h) in faces:

                    img = cv2.rectangle(resizedFrame, (x, y), (x + w, y + h), (255, 0, 0), 2)

                    roi_gray = resizedFrame[y:y + h, x:x + w]
                    roi_color = img[y:y + h, x:x + w]

                    eyes = eye_cascade.detectMultiScale(
                        roi_gray,
                        scaleFactor=1.3,
                        minNeighbors=7,
                        # Min size for valid detection, changes according to video size or body size in the video.
                        flags=cv2.CASCADE_SCALE_IMAGE
                    )

                    for (ex, ey, ew, eh) in eyes:
                        cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
                        eye_roi_gray = roi_color[ey:ey + eh, ex:ex + ew]
                        eye_roi_color = img[ey:ey + eh, ex:ex + ew]

                        # detection of noise present or not
                        nose_cascade = cv2.CascadeClassifier('cascades/data/nose.xml')
                        nose = nose_cascade.detectMultiScale(
                            eye_roi_color,
                            scaleFactor=1.3,
                            minNeighbors=7,
                            # Min size for valid detection, changes according to video size or body size in the video.
                            flags=cv2.CASCADE_SCALE_IMAGE
                        )

                        for (nx, ny, nw, nh) in nose:
                            cv2.rectangle(eye_roi_color, (nx, ny), (nx + nw, ny + nh), (0, 255, 0), 2)
                            nose_roi_gray = eye_roi_color[ey:ey + eh, ex:ex + ew]
                            nose_roi_color = img[ey:ey + eh, ex:ex + ew]

                    if 1 > x:
                        print('no face')
                        facecover("HAS NO FACE")

                    else:
                        flage = 1
                        print('has face')
                        facecover("HAS FACE")

                if flage == 0:

                    print('hass no face')




                    # if there is no face then load helmet detaset

                    # for the detection of upper body of humman which are in the servilent camera
                    # and that is the input for the ferther process
                    haar_upper_body_cascade = cv2.CascadeClassifier("cascades/data/haarcascade_upperbody.xml")

                    # Uncomment this for real-time webcam detection
                    # If you have more than one webcam & your 1st/original webcam is occupied,
                    # you may increase the parameter to 1 or respectively to detect with other webcams, depending on which one you wanna use.

                    upper_body = haar_upper_body_cascade.detectMultiScale(
                        resizedFrame,
                        scaleFactor=1.1,
                        minNeighbors=5,
                        minSize=(50, 50),
                        # Min size for valid detection, changes according to video size or body size in the video.
                        flags=cv2.CASCADE_SCALE_IMAGE
                    )

                    # Draw a rectangle around the upper bodies
                    for (x, y, w, h) in upper_body:
                        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
                        # creates green color rectangle with a thickness size of 1
                        cv2.putText(frame, "Upper Body Detected", (x + 5, y + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (0, 255, 0),
                                    2)
                        # creates green color text with text size of 0.5 & thickness size of 2

                    # if there is no face then load helmet detaset

                    helmet_cascede = cv2.CascadeClassifier('cc/cascade.xml')
                    helmet = helmet_cascede.detectMultiScale(
                        resizedFrame,
                        scaleFactor=12,
                        minNeighbors=50,
                        # Min size for valid detection, changes according to video size or body size in the video.
                        flags=cv2.CASCADE_SCALE_IMAGE

                    )

                    # detection helmet weaaring or not

                    for (xh, yh, wh, hh) in helmet:
                        img = cv2.rectangle(resizedFrame, (xh, yh), (xh + wh, yh + hh), (255, 0, 0), 2)

                        roi_gray = resizedFrame[yh:yh + hh, xh:xh + wh]
                        roi_color = img[yh:yh + hh, xh:xh + wh]
                        #speak('sir  please remove your helmet')

                    # detect mask wearing or not
                    # load data for the mask wearing or not

                    mask_cascade = cv2.CascadeClassifier('cascades/data/mask.xml')
                    # face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_upperbody.xml')

                    # data for the eye of the user
                    eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye_tree_eyeglasses.xml')

                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                    mask = mask_cascade.detectMultiScale(
                        resizedFrame,
                        scaleFactor=2.0,
                        minNeighbors=7,
                        # Min size for valid detection, changes according to video size or body size in the video.
                        flags=cv2.CASCADE_SCALE_IMAGE
                    )

                    for (x, y, w, h) in mask:

                        img = cv2.rectangle(resizedFrame, (x, y), (x + w, y + h), (0, 0, 255), 2)

                        roi_gray = resizedFrame[y:y + h, x:x + w]
                        roi_color = img[y:y + h, x:x + w]

                        # detect the eye on the face if mask was wearing the person

                        eyes = eye_cascade.detectMultiScale(
                            roi_gray,
                            scaleFactor=7,
                            minNeighbors=20,
                            # Min size for valid detection, changes according to video size or body size in the video.
                            flags=cv2.CASCADE_SCALE_IMAGE
                        )

                        for (ex, ey, ew, eh) in eyes:
                            cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
                            eye_roi_gray = roi_color[ey:ey + eh, ex:ex + ew]
                            eye_roi_color = img[ey:ey + eh, ex:ex + ew]

                # for the checking the faces is empty or not
                # if face is present then the capture the face

                for (x, y, w, h) in faces:
                    print(x, y, w, h)
                    rol_gray = gray[y:y + h, x:x + w]
                    rol_color = frame[y:y + h, x:x + h]
                    img_item = "image1.png"

                    # save the face on the physical storege device

                    cv2.imwrite(img_item, rol_gray)

                    cv2.imwrite(img_item, rol_color)




            # show the frame or videos in the system display

            cv2.imshow('Frame', resizedFrame)
            cv2.imshow('gray', fgmask)

            # save the video capture by camera

            out.write(frame)

            # terminet the frame if the key is q given thrrogh user or admine

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break

        if args.get("video_path", None) is not None:
            video_stream.release()
        else:
            video_stream.stop()

        # terminet the other packege
        cap.release()
        out.release()
        cv2.destroyAllWindows()
        print("Program terminating")
Example #14
0
detector.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

# load our serialized face embedding model from disk and set the
# preferable target to MYRIAD
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])
embedder.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())

# initialize the video stream, then allow the camera sensor to warm up
print("[INFO] starting video stream...")
#vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=False).start()
time.sleep(2.0)

# start the FPS throughput estimator
fps = FPS().start()

# loop over frames from the video file stream
while True:
	# grab the frame from the threaded video stream
	frame = vs.read()
	if frame is None:
            print("noneframe")
            continue

	# resize the frame to have a width of 600 pixels (while
	# maintaining the aspect ratio), and then grab the image
import cv2
import numpy as np
from imutils.video import VideoStream, FPS
import time

from communication.control import Control
import algo

control = Control()
vs = VideoStream(usePiCamera=True,
                 resolution=(640, 480),
                 framerate=32,
                 rotation=180)
fps = FPS()


def init():
    vs.start()
    time.sleep(2.0)
    fps.start()
    try:
        loop()
    except:
        print('error')
        control.stop()


def loop():
    while True:
        frame = vs.read()
        canny = algo.canny(frame)
Example #16
0
from imutils.video import VideoStream
import argparse
import imagezmq
import socket
import time

parser = argparse.ArgumentParser()
parser.add_argument('-s',
                    '--server-ip',
                    required=True,
                    help='IP address of server to which client will connect')
parser.add_argument('-p',
                    '--pi-camera',
                    type=bool,
                    default=True,
                    help='Toggle use of Raspberry Pi camera module')
args = vars(parser.parse_args())

sender = imagezmq.ImageSender(connect_to=f'tcp://{args["server_ip"]}:5555')

host_name = socket.gethostname()
if args['pi_camera']:
    vs = VideoStream(usePiCamera=True).start()
else:
    vs = VideoStream(src=0).start()
time.sleep(2.0)

while True:
    frame = vs.read()
    sender.send_image(host_name, frame)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        help='Path of the video you want to test on.',
                        default=0)
    args = parser.parse_args()

    MINSIZE = 20
    THRESHOLD = [0.6, 0.7, 0.7]
    FACTOR = 0.709
    IMAGE_SIZE = 182
    INPUT_IMAGE_SIZE = 160
    CLASSIFIER_PATH = 'Models/facemodel.pkl'
    VIDEO_PATH = args.path
    FACENET_MODEL_PATH = 'Models/20180402-114759.pb'

    # Load The Custom Classifier
    with open(CLASSIFIER_PATH, 'rb') as file:
        model, class_names = pickle.load(file)
    print("Custom Classifier, Successfully loaded")

    with tf.Graph().as_default():

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))

        with sess.as_default():

            # Load the model
            print('Loading feature extraction model')
            facenet.load_model(FACENET_MODEL_PATH)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            pnet, rnet, onet = align.detect_face.create_mtcnn(
                sess, "src/align")

            people_detected = set()
            person_detected = collections.Counter()

            cap = VideoStream(src=0).start()

            while (True):
                frame = cap.read()
                frame = imutils.resize(frame, width=600)
                frame = cv2.flip(frame, 1)

                bounding_boxes, _ = align.detect_face.detect_face(
                    frame, MINSIZE, pnet, rnet, onet, THRESHOLD, FACTOR)

                faces_found = bounding_boxes.shape[0]
                try:
                    # if faces_found > 1:
                    #     cv2.putText(frame, "Only one face", (0, 100), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                    #                 1, (255, 255, 255), thickness=1, lineType=2)
                    if faces_found > 0:
                        det = bounding_boxes[:, 0:4]
                        bb = np.zeros((faces_found, 4), dtype=np.int32)
                        for i in range(faces_found):
                            bb[i][0] = det[i][0]
                            bb[i][1] = det[i][1]
                            bb[i][2] = det[i][2]
                            bb[i][3] = det[i][3]

                            print(bb[i][3] - bb[i][1])
                            print(frame.shape[0])
                            print((bb[i][3] - bb[i][1]) / frame.shape[0])
                            if (bb[i][3] - bb[i][1]) / frame.shape[0] > 0.25:
                                cropped = frame[bb[i][1]:bb[i][3],
                                                bb[i][0]:bb[i][2], :]
                                scaled = cv2.resize(
                                    cropped,
                                    (INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE),
                                    interpolation=cv2.INTER_CUBIC)
                                scaled = facenet.prewhiten(scaled)
                                scaled_reshape = scaled.reshape(
                                    -1, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3)
                                feed_dict = {
                                    images_placeholder: scaled_reshape,
                                    phase_train_placeholder: False
                                }
                                emb_array = sess.run(embeddings,
                                                     feed_dict=feed_dict)

                                predictions = model.predict_proba(emb_array)
                                best_class_indices = np.argmax(predictions,
                                                               axis=1)
                                best_class_probabilities = predictions[
                                    np.arange(len(best_class_indices)),
                                    best_class_indices]
                                best_name = class_names[best_class_indices[0]]
                                print("Name: {}, Probability: {}".format(
                                    best_name, best_class_probabilities))

                                if best_class_probabilities > 0.8:
                                    cv2.rectangle(frame, (bb[i][0], bb[i][1]),
                                                  (bb[i][2], bb[i][3]),
                                                  (0, 255, 0), 2)
                                    text_x = bb[i][0]
                                    text_y = bb[i][3] + 20

                                    name = class_names[best_class_indices[0]]
                                    cv2.putText(frame,
                                                name, (text_x, text_y),
                                                cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                                1, (255, 255, 255),
                                                thickness=1,
                                                lineType=2)
                                    cv2.putText(
                                        frame,
                                        str(
                                            round(best_class_probabilities[0],
                                                  3)), (text_x, text_y + 17),
                                        cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                        1, (255, 255, 255),
                                        thickness=1,
                                        lineType=2)
                                    person_detected[best_name] += 1
                                else:
                                    name = "Unknown"

                                cv2.rectangle(frame, (bb[i][0], bb[i][1]),
                                              (bb[i][2], bb[i][3]),
                                              (0, 255, 0), 2)
                                cv2.putText(frame, best_name,
                                            (bb[i][0] + 10, bb[i][3] + 30), 0,
                                            0.5, (0, 0, 255))

                except:
                    pass

                cv2.imshow('Face Recognition', frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            cap.release()
            cv2.destroyAllWindows()
Example #18
0
from imutils.video import VideoStream
import numpy as np
import cv2
import numpy as np
import time
import pygame
if not simulation_mode:
    from sense_hat import SenseHat
else:
    from sense_emu import SenseHat

sense = SenseHat()
sense.clear()

vs = VideoStream(usePiCamera=not simulation_mode).start()
time.sleep(1.0)

cv2.namedWindow("ROV", cv2.WINDOW_NORMAL)
cv2.resizeWindow('ROV', 600, 600)
pygame.init()

auto = False
use_keyboard = True
use_controller = False

telemetry_dict = {'light': 'OFF'}

if auto:
    yaw_abs = sense.get_accelerometer()['yaw']
    press_abs = sense.get_pressure()
# detect, then generate a set of bounding box colors for each class
CLASSES = [
    "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
    "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
    "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
#vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()

# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                 (300, 300), 127.5)
Example #20
0
def mainc():

    scale_percent = 20  # percentage of original size
    width = 0
    height = 0

    labelsPath = "Model/coco.names"  #path for model
    LABELS = open(labelsPath).read().strip().split("\n")

    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")

    weightsPath = "Model/yolov3.weights"  #path for yolov3 weights
    configPath = "Model/yolov3.cfg"  #path for yolov3 configuration file

    net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        print("Could not open webcam")
        exit()
    else:  #get dimension info
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        dim = (width, height)
        print('Original Dimensions : ', dim)
        width = int(width * scale_percent / 100)
        height = int(height * scale_percent / 100)
        dim = (width, height)
        print('Resized Dimensions : ', dim)

    def detect_and_predict_mask(frame, faceNet, maskNet):
        # grab the dimensions of the frame and then construct a blob from it
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
                                     (104.0, 177.0, 123.0))
        # pass the blob through the network and obtain the face detections
        faceNet.setInput(blob)
        detections = faceNet.forward()
        # initialize our list of faces, their corresponding locations,
        # and the list of predictions from our face mask network
        faces = []
        locs = []
        preds = []

        # loop over the detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the detection
            confidence = detections[0, 0, i, 2]
            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > 0.5:
                # compute the (x, y)-coordinates of the bounding box for
                # the object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")
                # ensure the bounding boxes fall within the dimensions of
                # the frame
                (startX, startY) = (max(0, startX), max(0, startY))
                (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                # extract the face ROI, convert it from BGR to RGB channel
                # ordering, resize it to 224x224, and preprocess it
                face = frame[startY:endY, startX:endX]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)
                # add the face and bounding boxes to their respective
                # lists
                faces.append(face)
                locs.append((startX, startY, endX, endY))

        # only make a predictions if at least one face was detected
        if len(faces) > 0:
            # for faster inference we'll make batch predictions on *all*
            # faces at the same time rather than one-by-one predictions
            # in the above `for` loop
            faces = np.array(faces, dtype="float32")
            preds = maskNet.predict(faces, batch_size=32)
        # return a 2-tuple of the face locations and their corresponding
        # locations
        return (locs, preds)

    base_dir = os.getcwd()
    base_dir = base_dir.replace('\\', '/')

    print(base_dir)
    dataset_path = base_dir + '/dataset'
    accuracy_plot_dir = base_dir + '/Model'
    model_store_dir = base_dir + '/Model/mask_detector.model'
    example = base_dir + '/Image/1.jpg'

    confidence = 0.4

    face_detector_caffe = base_dir + '/Face Detector/res10_300x300_ssd_iter_140000.caffemodel'

    # load our serialized face detector model from disk
    print("[INFO] loading face detector model...")
    prototxtPath = base_dir + '/Face Detector/deploy.prototxt'
    weightsPath = face_detector_caffe
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
    # load the face mask detector model from disk
    print("[INFO] loading face mask detector model...")
    maskNet = load_model(model_store_dir)
    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    #time.sleep(2.0)

    # loop over the frames from the video stream
    iter = 0
    while True:

        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=1200)

        resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)

        (H, W) = frame.shape[:2]
        ln = net.getLayerNames()
        ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
        blob = cv2.dnn.blobFromImage(frame,
                                     1 / 255.0, (224, 224),
                                     swapRB=True,
                                     crop=False)
        net.setInput(blob)
        start = time.time()
        layerOutputs = net.forward(ln)
        end = time.time()
        # print("Frame Prediction Time : {:.6f} seconds".format(end - start))
        boxes = []
        confidences = []
        classIDs = []

        for output in layerOutputs:
            for detection in output:
                scores = detection[5:]
                classID = np.argmax(scores)
                confidence = scores[classID]
                if confidence > 0.1 and classID == 0:
                    box = detection[0:4] * np.array([W, H, W, H])
                    (centerX, centerY, width, height) = box.astype("int")
                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    classIDs.append(classID)

        if iter % 3 == 0:

            idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
            ind = []
            for i in range(0, len(classIDs)):
                if (classIDs[i] == 0):
                    ind.append(i)
            a = []
            b = []

            if len(idxs) > 0:
                for i in idxs.flatten():
                    (x, y) = (boxes[i][0], boxes[i][1])
                    (w, h) = (boxes[i][2], boxes[i][3])
                    a.append(x)
                    b.append(y)

            distance = []
            nsd = []
            for i in range(0, len(a) - 1):
                for k in range(1, len(a)):
                    if (k == i):
                        break
                    else:
                        x_dist = (a[k] - a[i])
                        y_dist = (b[k] - b[i])
                        d = math.sqrt(x_dist * x_dist + y_dist * y_dist)
                        distance.append(d)
                        if (d <= 6912):
                            nsd.append(i)
                            nsd.append(k)
                        nsd = list(dict.fromkeys(nsd))
                    # print(nsd)

            color = (0, 0, 255)
            for i in nsd:
                (x, y) = (boxes[i][0], boxes[i][1])
                (w, h) = (boxes[i][2], boxes[i][3])
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                text = "Alert"
                cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, color, 2)
            color = (0, 255, 0)
            if len(idxs) > 0:
                for i in idxs.flatten():
                    if (i in nsd):
                        break
                    else:
                        (x, y) = (boxes[i][0], boxes[i][1])
                        (w, h) = (boxes[i][2], boxes[i][3])
                        cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                        text = 'OK'
                        cv2.putText(frame, text, (x, y - 5),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

        text = "Social Distancing Violators: {}".format(len(nsd))
        cv2.putText(frame, text, (660, frame.shape[0] - 45),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)

        cv2.putText(frame, "Covid Guard: Team TrojanWave", (140, 45),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.rectangle(frame, (20, 60), (1170, 100), (170, 170, 170), 2)
        cv2.putText(frame, "COLOR CODE: RISK ANALYSIS", (30, 85),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
        cv2.putText(frame, "--- GREEN : SAFE", (500, 85),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
        cv2.putText(frame, "--- RED: UNSAFE", (1000, 85),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)

        tot_str = "TOTAL: " + str(len(idxs))
        high_str = "HIGH RISK: " + str(len(nsd))
        low_str = "LOW RISK: " + str(0)
        safe_str = "SAFE: " + str(len(idxs) - len(nsd))

        sub_img = frame[H - 270:H, 0:240]
        black_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0

        res = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)

        frame[H - 270:H, 0:240] = res

        cv2.putText(frame, tot_str, (10, H - 235), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (255, 255, 255), 2)
        cv2.putText(frame, safe_str, (10, H - 200), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 255, 0), 2)
        cv2.putText(frame, low_str, (10, H - 165), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 120, 255), 2)
        cv2.putText(frame, high_str, (10, H - 130), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 0, 150), 2)

        #cv2.imshow("Social Distancing Detector", frame)

        cv2.rectangle(frame, (10, H - 100), (600, H - 10), (170, 170, 170), 2)
        cv2.putText(frame, "COLOR CODE: MASK DETECTION", (40, H - 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)
        cv2.putText(frame, "--- RED : NO MASK", (420, H - 70),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
        cv2.putText(frame, "--- GREEN : MASK", (420, H - 35),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

        # cv2.putText(frame, "--    GREEN: SAFE", (565, 150),
        # 			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

        # detect faces in the frame and determine if they are wearing a
        # face mask or not
        (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)

        # loop over the detected face locations and their corresponding
        # locations
        for (box, pred) in zip(locs, preds):
            # unpack the bounding box and predictions
            (startX, startY, endX, endY) = box
            (mask, withoutMask) = pred
            # determine the class label and color we'll use to draw
            # the bounding box and text
            label = "Mask" if mask > withoutMask else "No Mask"
            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
            # include the probability in the label
            label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
            # display the label and bounding box rectangle on the output
            # frame
            cv2.putText(frame, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)

        # show the output frame
        cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
        cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow('frame', frame)
        key = cv2.waitKey(1) & 0xFF
        # if the `q` key was pressed, break from the loop

        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
dt1 = 4
dt2 = 6
#################################################################################################################################
rp.setmode(rp.BCM)
rp.setwarnings(False)
rp.setup(servo1, rp.OUT)
rp.setup(servo2, rp.OUT)
pwm1 = rp.PWM(servo1, 50)
pwm2 = rp.PWM(servo2, 50)
pwm1.start(2)
pwm2.start(6)
#################################################################################################################################
cv.namedWindow('frame', cv.WINDOW_FREERATIO)
usingPiCamera = True
vs = VideoStream(src=0,
                 usePiCamera=usingPiCamera,
                 resolution=(240, 240),
                 framerate=60).start()
#wait for camera to load1
sleep(0.2)
################################################################################################################################
while True:
    #frame reading n stuff
    frame = vs.read()
    if not usingPiCamera:
        frame = imutils.resize(frame, width=frameSize[0])

#trackbar for servo noise
#kti = cv.getTrackbarPos('ktime','window')
#ksa = cv.getTrackbarPos('ksaad','window')

#center of frames
from imutils.video import VideoStream
from imutils import face_utils
import argparse
import imutils
import dlib
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-s",
                "--shapePredictorPath",
                required=True,
                help="Path to the shape predictor path of the dlib library")
args = vars(ap.parse_args())
print("[INFO] Camera starting up")
vs = VideoStream(-1).start()
print("[INFO] Face detector and Landmarks Detector loading up")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shapePredictorPath"])

while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 1)
    for rect in rects:
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        for (x, y) in shape:
            cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
Example #23
0
def vid_stream():
    # start video
    print("starting video...")
    stream = VideoStream(src=0).start()
    # give time to grab a frame
    time.sleep(2.0)
    fps = FPS().start()
    while (True):
        # max width of 600 pixel for video in
        f = stream.read()
        f = imutils.resize(f, width=args["width"])

        (h, w) = f.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(f, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass blob to nn
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (probability) of nn:s prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections (unlikely)
            if confidence > args["confidence"]:
                # extract the index of the class label
                # (x, y)-coordinates of the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw_prediction(confidence, endX, endY, f, idx, startX, startY)

            # how wide is classified object
            obj_width = endX - startX
            obj_height = endY - startY

            #if classified object is interesting
            # if idx == 2 or idx == 6 or idx == 7 or idx == 14 or idx==15:
            if idx in IDX_CONFIG:
                # draw the prediction on the streams cur_frame
                draw_prediction(confidence, endX, endY, f, idx, startX, startY)

                if obj_width >= 1 / 4 * w:
                    activate_diodes(startX, endX, w, idx)
                    draw_warning(f)

        # show the output frame
        if args["debug"]:
            cv2.imshow("Output Frame", f)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

        # update the FPS counter
        fps.update()
        fps.stop()
        # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    stream.stop()
Example #24
0
def registerBlFreq(newAvg):
    BlinkThresh = newAvg
    BlinkFrames = 3

    count = 0

    total = 0

    detector = dl.get_frontal_face_detector()
    predict = dl.shape_predictor("shape_predictor_68_face_landmarks.dat")

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    vs = VideoStream(src=0).start()
    fileStream = False
    time.sleep(1.0)

    a = time.time() + 10

    while time.time() < a:
        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        pixels = detector(gray, 0)

        for pixel in pixels:
            shape = predict(gray, pixel)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]

            leftEAR = EAR(leftEye)
            rightEAR = EAR(rightEye)

            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)

            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < BlinkThresh:
                count += 1
                # print(count)

            else:
                if count >= BlinkFrames:
                    total += 1

                count = 0

            cv2.imshow("Frame", frame)
            cv2.putText(frame, "Blinks: {}".format(total), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    vs.stop()
    return total
Example #25
0
def main():
    args = vars(ap.parse_args())
    EYE_AR_THRESH = args['threshold']
    EYE_AR_CONSEC_FRAMES = args['frames']
    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0
    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True

    time.sleep(1.0)
    away_cnt = 0
    not_blinking = 0
    # loop over frames from the video stream
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale
        # channels)
        frame = vs.read()
        frame = imutils.resize(frame, width=950)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # detect faces in the grayscale frame
        rects = detector(gray, 0)
        if len(rects) > 0:
            sleep_cnt = 0
            # loop over the face detections
            for rect in rects:
                # determine the facial landmarks for the face region, then
                # convert the facial landmark (x, y)-coordinates to a NumPy
                # array
                shape = predictor(gray, rect)
                shape = face_utils.shape_to_np(shape)

                # extract the left and right eye coordinates, then use the
                # coordinates to compute the eye aspect ratio for both eyes
                leftEye = shape[lStart:lEnd]
                rightEye = shape[rStart:rEnd]
                print(leftEye)
                print(rightEye)
                not_blinking += 1
                print("Not Blinking" + str(not_blinking))
                leftEAR = eye_aspect_ratio(leftEye)
                rightEAR = eye_aspect_ratio(rightEye)

                # average the eye aspect ratio together for both eyes
                ear = (leftEAR + rightEAR) / 2.0

                # compute the convex hull for the left and right eye, then
                # visualize each of the eyes
                leftEyeHull = cv2.convexHull(leftEye)
                rightEyeHull = cv2.convexHull(rightEye)
                cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
                cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
                if not_blinking >= 60:
                    print("Please blink your eyes.")
                # check to see if the eye aspect ratio is below the blink
                # threshold, and if so, increment the blink frame counter
                if ear < EYE_AR_THRESH:
                    COUNTER += 1

                # otherwise, the eye aspect ratio is not below the blink
                # threshold
                else:
                    # if the eyes were closed for a sufficient number of
                    # then increment the total number of blinks
                    if COUNTER >= EYE_AR_CONSEC_FRAMES:
                        TOTAL += 1
                        not_blinking = 0

                    # reset the eye frame counter
                    COUNTER = 0

                # draw the total number of blinks on the frame along with
                # the computed eye aspect ratio for the frame
                cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

            # show the frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break
        else:
            print("Unable to detect face")
            away_cnt = away_cnt + 1
            print(away_cnt)
            if away_cnt > 4000:
                print("Sleep Mode")
                os.system(
                    "osascript -e 'tell application \"Finder\" to sleep'")

        # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Example #26
0
def Drowsy_detection(ear, freq):
    alarm_on = False
    BlinkThresh = ear
    BlinkFrames = 3
    FreqThresh = freq

    MouthThresh = 0.60
    yawnStatus = False
    yawns = 0

    SleepThresh = 0.25
    SleepFrames = 18

    COUNTER = 0
    TOTAL = 0

    detector = dl.get_frontal_face_detector()
    predict = dl.shape_predictor("shape_predictor_68_face_landmarks.dat")

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

    vs = VideoStream(src=0).start()

    # fl=0
    time.sleep(1.0)
    flag = 0
    t1 = time.time()
    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        pixels = detector(gray, 0)

        prev_yawn_status = yawnStatus

        for pixel in pixels:

            shape = predict(gray, pixel)
            shape = face_utils.shape_to_np(shape)
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            mouth = shape[mStart:mEnd]

            leftEAR = EAR(leftEye)
            rightEAR = EAR(rightEye)
            mouEAR = MAR(mouth)

            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            mouthHull = cv2.convexHull(mouth)

            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)

            if ear < BlinkThresh:
                COUNTER += 1
                flag += 1
                #print(flag)
                if flag >= SleepFrames:
                    if not alarm_on:
                        alarm_on = True

                    thread = sound_alarm()
                    thread.start()
                    thread.join()
                    # fl=1
                    cv2.putText(frame, "***** ALERT! *****", (10, 250),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    cv2.putText(
                        frame,
                        " You are incapable of driving! Please get some rest!",
                        (10, 300), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255),
                        2)

            else:
                flag = 0
                alarm_on = False
                if COUNTER >= BlinkFrames:
                    TOTAL += 1
                COUNTER = 0
                baseURL = 'https://api.thingspeak.com/update?api_key=UP2MYEJSHUGSTV7D&field1='
                c = 1
                """f = urllib.request.urlopen(baseURL + str(c))
                f.read()
                f.close()"""

            if mouEAR > MouthThresh:
                cv2.putText(frame, "Yawning ", (10, 70),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                yawnStatus = True
                output_text = "Yawn Count: " + str(yawns)
                cv2.putText(frame, output_text, (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
            else:
                yawnStatus = False

            if prev_yawn_status == True and yawnStatus == False:
                yawns += 1
                output_text = "Yawn Count: " + str(yawns)
                cv2.putText(frame, output_text, (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)

            cv2.putText(frame, "MAR: {:.2f}".format(mouEAR), (300, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.imshow("Frame", frame)

            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            """t2=time.time()
            if t2-t1>=5:
                a = time.time()+4
                while time.time()<=a:
                    cv2.putText(frame, "Checking measured driver data:", (10,10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    if yawns>0:
                        cv2.putText(frame, "You yawned {:.2f} time(s). You might be sleepy".format(yawns), (30,20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                        yawns=0
                    if TOTAL>1.2*FreqThresh*2:
                        cv2.putText(frame, "You seem to be mildly tired. Beware!", (30,30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    elif TOTAL>1.5*FreqThresh*2:
                        cv2.putText(frame, "You seem to be extremely tired. Please take some rest!", (30,30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                
                    cv2.imshow("Frame", frame)  

                TOTAL=0
                t1=t2
                t2=time.time()
                """
        cv2.imshow("Frame", frame)
        """if fl==1:
            baseURL='https://api.thingspeak.com/update?api_key=UP2MYEJSHUGSTV7D&field1='
            c=0
            f = urllib.request.urlopen(baseURL + str(c))
            f.read()
            f.close()
            fl=0"""
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    vs.stop()
Example #27
0
def camera_thread():
    global key, ra, dec, killFlag, error_in_deg_h, error_in_deg_v
    global gray
    global mouseX, mouseY
    global update_tracker

    dx = dy = 25

    # mouseX, mouseY = -1, -1
    # cap = cv2.VideoCapture(0)
    #
    # ret, frame = cap.read()
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # cv2.namedWindow('frame')
    # cv2.imshow('frame', gray)
    # cv2.setMouseCallback('frame', draw_circle)
    # while (True):
    #     # Capture frame-by-frame
    #     ret, frame = cap.read()
    #
    #     # Our operations on the frame come here
    #     gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #     height, width = gray.shape
    #
    #     pix_to_deg_v = height / fov_v
    #     pix_to_deg_h = width / fov_h
    #
    #     # Display the resulting frame
    #     cv2.line(gray, (width / 4, height / 4 - 10), (width / 4, height / 4 + 10), (0, 255, 0), 3)
    #     cv2.line(gray, (width / 4 - 10, height / 4), (width / 4 + 10, height / 4), (0, 255, 0), 3)
    #
    #     cv2.line(gray, (3 * width / 4, 3 * height / 4 - 10), (3 * width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
    #     cv2.line(gray, (3 * width / 4 - 10, 3 * height / 4), (3 * width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)
    #
    #     cv2.line(gray, (width / 4, 3 * height / 4 - 10), (width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
    #     cv2.line(gray, (width / 4 - 10, 3 * height / 4), (width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)
    #
    #     cv2.line(gray, (3 * width / 4, height / 4 - 10), (3 * width / 4, height / 4 + 10), (0, 255, 0), 3)
    #     cv2.line(gray, (3 * width / 4 - 10, height / 4), (3 * width / 4 + 10, height / 4), (0, 255, 0), 3)
    #
    #     if mouseX > -1 and mouseY > -1:
    #         cv2.circle(gray, (mouseX, mouseY), 10, (0, 0, 0), thickness=3, lineType=8, shift=0)
    #
    #     cv2.circle(gray, (width / 2, height / 2), 10, (22, 222, 22), thickness=3, lineType=8, shift=0)
    #
    #     error_x = width / 2 - mouseX
    #     error_y = height / 2 - mouseY
    #
    #     error_in_deg_v = error_y / pix_to_deg_v
    #     error_in_deg_h = error_x / pix_to_deg_h
    #
    #     print (error_in_deg_h, error_in_deg_v)
    #     cv2.imshow('frame', gray)
    #
    #     # print(cv2.waitKey(1))
    #
    #     temp = 0
    #     lock.acquire()
    #     try:
    #         temp = ra
    #     finally:
    #         lock.release()
    #
    #     key = cv2.waitKey(1)
    #     if key & 0xFF == ord('q'):
    #         print("breaking")
    #         break
    #     if key & 0xFF == ord('w'):
    #         temp = temp + 5
    #         print("ra(temp): {}".format(temp))
    #     if key & 0xFF == ord('s'):
    #         temp = temp - 5
    #         print("ra(temp): {}".format(temp))
    #
    #     lock.acquire()
    #     try:
    #         ra = temp
    #     finally:
    #         lock.release()
    #
    # # When everything done, release the capture
    # cap.release()
    # cv2.destroyAllWindows()
    # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the

    # initialize a dictionary that maps strings to their corresponding
    # OpenCV object tracker implementations
    OPENCV_OBJECT_TRACKERS = {
        # "csrt": cv2.TrackerCSRT_create,
        "kcf": cv2.TrackerKCF_create,
        "boosting": cv2.TrackerBoosting_create,
        "mil": cv2.TrackerMIL_create,
        "tld": cv2.TrackerTLD_create,
        "medianflow": cv2.TrackerMedianFlow_create,
        # "mosse": cv2.TrackerMOSSE_create
    }

    # grab the appropriate object tracker using our dictionary of
    # OpenCV object tracker objects
    # tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
    tracker = OPENCV_OBJECT_TRACKERS['medianflow']()

    # initialize the bounding box coordinates of the object we are going
    # to track
    initBB = None

    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(1.0)

    # initialize the FPS throughput estimator
    fps = None

    # loop over frames from the video stream
    while True:
        # grab the current frame, then handle if we are using a
        # VideoStream or VideoCapture object
        frame = vs.read()

        # Our operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        height, width = gray.shape

        # check to see if we have reached the end of the stream
        if frame is None:
            break

        # resize the frame (so we can process it faster) and grab the
        # frame dimensions
        # frame = imutils.resize(frame, width=500)
        (H, W) = frame.shape[:2]

        # check to see if we are currently tracking an object
        if initBB is not None:
            # grab the new bounding box coordinates of the object
            (success, box) = tracker.update(frame)

            # check to see if the tracking was a success
            if success:
                (x, y, w, h) = [int(v) for v in box]
                cv2.rectangle(frame, (x, y), (x + dx, y + dy),
                              (0, 255, 0), 2)
                error_x = width / 2 - x
                error_y = height / 2 - y

                error_in_deg_v = error_y / pix_to_deg_v
                error_in_deg_h = error_x / pix_to_deg_h

                # print (error_in_deg_h, error_in_deg_v)

            # update the FPS counter
            fps.update()
            fps.stop()

            # initialize the set of information we'll be displaying on
            # the frame
            info = [
                ("Tracker", 'medianflow'),
                ("Success", "Yes" if success else "No"),
                ("FPS", "{:.2f}".format(fps.fps())),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we are currently tracking an object
        pix_to_deg_v = height / fov_v
        pix_to_deg_h = width / fov_h

        # Display the resulting frame
        cv2.line(gray, (width / 4, height / 4 - 10), (width / 4, height / 4 + 10), (0, 255, 0), 3)
        cv2.line(gray, (width / 4 - 10, height / 4), (width / 4 + 10, height / 4), (0, 255, 0), 3)

        cv2.line(gray, (3 * width / 4, 3 * height / 4 - 10), (3 * width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
        cv2.line(gray, (3 * width / 4 - 10, 3 * height / 4), (3 * width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)

        cv2.line(gray, (width / 4, 3 * height / 4 - 10), (width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
        cv2.line(gray, (width / 4 - 10, 3 * height / 4), (width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)

        cv2.line(gray, (3 * width / 4, height / 4 - 10), (3 * width / 4, height / 4 + 10), (0, 255, 0), 3)
        cv2.line(gray, (3 * width / 4 - 10, height / 4), (3 * width / 4 + 10, height / 4), (0, 255, 0), 3)

        if update_tracker and mouseX > -1 and mouseY > -1:
            update_tracker = False
            #frame = vs.read()
            cv2.circle(frame, (mouseX, mouseY), 10, (0, 0, 0), thickness=3, lineType=8, shift=0)
            cv2.rectangle(frame, (mouseX - dx, mouseY - dy), (mouseX + dx, mouseY + dy), (0, 0, 255), 2)
            initBB = (mouseX - dx, mouseY - dy, mouseX + dx, mouseY + dy)
            # print (initBB)
            tracker = OPENCV_OBJECT_TRACKERS['medianflow']()
            tracker.init(frame, initBB)
            fps = FPS().start()

        cv2.circle(frame, (width / 2, height / 2), 10, (22, 222, 22), thickness=3, lineType=8, shift=0)

        # error_x = width / 2 - mouseX
        # error_y = height / 2 - mouseY

        # error_in_deg_v = error_y / pix_to_deg_v
        # error_in_deg_h = error_x / pix_to_deg_h

        # print (error_in_deg_h, error_in_deg_v)
        # show the output frame
        cv2.imshow("Frame", frame)
        cv2.setMouseCallback("Frame", draw_circle)
        key = cv2.waitKey(1) & 0xFF

        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        if key == ord("s"):
            # select the bounding box of the object we want to track (make
            # sure you press ENTER or SPACE after selecting the ROI)
            initBB = cv2.selectROI("Frame", frame, fromCenter=False,
                                   showCrosshair=True)
            # print (initBB)

            # start OpenCV object tracker using the supplied bounding box
            # coordinates, then start the FPS throughput estimator as well
            tracker.init(frame, initBB)
            fps = FPS().start()
            # if the `q` key was pressed, break from the loop

        elif key == ord("q"):
            break

    # if we are using a webcam, release the pointer
    vs.stop()

    # # otherwise, release the file pointer
    # else:
    #     vs.release()

    # close all windows
    cv2.destroyAllWindows()
    killFlag = True
Example #28
0
def registerEAR():
    BlinkThresh = 0.23
    BlinkFrames = 3
    avgEAR = 0
    counter = 0
    count = 0
    TOTAL = 0
    detector = dl.get_frontal_face_detector()
    predict = dl.shape_predictor("shape_predictor_68_face_landmarks.dat")

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    vs = VideoStream(src=0).start()

    fileStream = False
    time.sleep(1.0)

    a = time.time() + 5

    while time.time() < a:
        counter += 1

        frame = vs.read()
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        pixels = detector(gray, 0)

        for pixel in pixels:
            shape = predict(gray, pixel)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]

            leftEAR = EAR(leftEye)
            rightEAR = EAR(rightEye)

            ear = (leftEAR + rightEAR) / 2

            avgEAR += ear

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)

            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
            cv2.imshow("Frame", frame)

            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

    cv2.destroyAllWindows()
    vs.stop()
    return 0.85 * (avgEAR / counter)
COUNTER = 0
ALARM_ON = False

# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("D:\cv\drowsiness-detection\shape_predictor_68_face_landmarks.dat")
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

# start the video stream thread
print("[INFO] starting video stream thread...")
vs = VideoStream(src=args["webcam"]).start()
time.sleep(1.0)

# loop over frames from the video stream
while True:
	# grab the frame from the threaded video file stream, resize
	# it, and convert it to grayscale
	# channels)
	frame = vs.read()
	frame = imutils.resize(frame, width=450)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	# detect faces in the grayscale frame
	rects = detector(gray, 0)

	# loop over the face detections
Example #30
0
import imutils
import time
import cv2

# initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful for multiple browsers/tabs
# are viewing tthe stream)
outputFrame = None
lock = threading.Lock()

# initialize a flask object
#app = Flask(__name__)

# initialize the video stream and allow the camera sensor to
# warmup
vs = VideoStream(usePiCamera=1).start()
#vs = VideoStream(src=0).start()
time.sleep(2.0)

@app.route("/")
def index():
	# return the rendered template
	return render_template("index.html")

def detect_motion(frameCount):
	# grab global references to the video stream, output frame, and
	# lock variables
	global vs, outputFrame, lock

	# loop over frames from the video stream
	while True: