Ejemplo n.º 1
0
def cam_off_loop(screen, disply_obj, FPS=0):
    log.info("cam_off_loop start")
    img_path = "1.jpg"

    if not os.path.isfile(img_path):
        log.error("[ERROR] image does not exist {}".format(img_path))
        # print("[ERROR] image does not exist {}".format(img_path))

    img = cv2.imread(img_path, 1)
    size = (define.HORIZ_PIXELS_SMALL, define.VERT_LINES_SMALL)
    resize_frame = cv2.resize(img, size)
    frame = cv2.cvtColor(resize_frame, cv2.COLOR_BGR2RGB)

    image_title = display_gui.Menu.Text(text=TASK_TITLE,
                                        font=display_gui.Font.Medium)

    display.display_render(screen, frame, disply_obj, TASK_INFO)

    image_title.Render(to=screen, pos=TASK_TITLE_POS)
Ejemplo n.º 2
0
def motion_detection_pygm(screen, disply_obj, fbs):
    """ """
    log.info("motion_detection_pygm starts... ")

    image_title = display_gui.Menu.Text(text=TASK_TITLE,
                                        font=display_gui.Font.Medium)

    cap = VideoStream(src=CAM_NUM).start()
    time.sleep(2.0)

    # initialize the firstFrame in video stream
    firstFrame = None
    fgbg = cv2.createBackgroundSubtractorMOG2()
    while True:

        # if ret is true than no error with cap.isOpened
        frame = cap.read()

        if frame is None:
            log.error("No frame available !!")
            # print("ERROR: No frame available !!")
            break

        # resize frame for required size
        resize_frame = cv2.resize(frame, define.VID_FRAME_SIZE)

        # opencv understand BGR, in order to display we need to convert image  form   BGR to RGB
        frame = cv2.cvtColor(resize_frame, cv2.COLOR_BGR2RGB)  # for display

        # color has no bearing on motion detection algorithm
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # to smooth the image and remove noise(if not then could throw algorithm off)
        # smoothing average pixel intensities across an 21x21 region
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the first stream is not initialized, store it for reference
        # to smooth the image and remove noise(if not then could throw algorithm off)
        # smothing avarage pixel intensities across an 21x21 region
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # apply background substraction
        fgmask = fgbg.apply(gray)
        (im2, contours, hierarchy) = cv2.findContours(fgmask.copy(),
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_SIMPLE)

        # looping for contours
        for c in contours:
            if cv2.contourArea(c) < MIN_AREA:
                continue

            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # get bounding box from countour
            (x, y, w, h) = cv2.boundingRect(c)

            # draw bounding box
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.circle(frame, (cX, cY), 7, (0, 0, 225), -1)

        if globals.VID_FRAME_INDEX == 0:
            frame = fgmask

        elif globals.VID_FRAME_INDEX == 1:

            frame = frame

        elif globals.VID_FRAME_INDEX == 2:
            frame = frame

        # Display the frame
        display.display_render(screen, frame, disply_obj, TASK_INFO)
        image_title.Render(to=screen, pos=TASK_TITLE_POS)

        # check if TASK_INDEX is not 1 then it means another buttons has pressed
        if not globals.TASK_INDEX == 2:
            log.info("TASK_INDEX is not 2 but {}".format(globals.TASK_INDEX))
            break

        if not globals.CAM_START or globals.EXIT:
            # print(f"face_recog globals.CAM_START {globals.CAM_START}")
            break
        # cv2.imshow('Original', frame)
        # cv2.imshow('threshold', thresh)
        # cv2.imshow('FrameDelta', frameDelta)

        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.stop()
    cv2.destroyAllWindows()
    log.info("closing motion detection")
Ejemplo n.º 3
0
def object_recog_pygm(screen, disply_obj):
    """ """
    log.info("object_recog_pygm start... ")

    # configuration file use to train caffe model
    prototxt_file = "MobileNetSSD_deploy.prototxt.txt"
    caffe_model = "MobileNetSSD_deploy.caffemodel"

    prototxt_file_path = file_path_check(prototxt_file)

    caffe_model_path = file_path_check(caffe_model)

    image_title = display_gui.Menu.Text(text=TASK_TITLE,
                                        font=display_gui.Font.Medium)

    # initialize the list of class labels MobileNet SSD was trained to
    # detect, then generate a set of bounding box colors for each class
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    # load our serialized model from disk
    net = cv2.dnn.readNetFromCaffe(prototxt_file_path, caffe_model_path)

    # initialize the video stream
    vid = Vision()

    # loop over the frames from the video stream
    while vid.is_camera_connected():

        ret, frame = vid.get_video()

        frame = vid.resize_frame(frame)

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and
        # predictions
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > 0.2:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

        if globals.VID_FRAME_INDEX == 0:

            frame = frame

        elif globals.VID_FRAME_INDEX == 1:

            frame = frame

        else:
            # opencv understand BGR, in order to display we need to convert image  form   BGR to RGB
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # for display
            # TASK_INFO = "Colored Frame  " + TASK_INFO
        # Display the frame
        display.display_render(screen, frame, disply_obj, TASK_INFO)

        image_title.Render(to=screen, pos=TASK_TITLE_POS)

        # check if TASK_INDEX is not 3 then it means another buttons has pressed
        if not globals.TASK_INDEX == 3:
            log.info("TASK_INDEX is not 1 but {}".format(globals.TASK_INDEX))
            break

        if not globals.CAM_START or globals.EXIT:
            # print(f"face_recog globals.CAM_START {globals.CAM_START}")
            break

        # show the output frame
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    vid.video_cleanUp()
    log.info("object_recog_pygm closing ")
def face_recog_pygm(screen, disply_obj, fbs):
    """
    Face Recognition pygame function read info from haarcascade_frontalface_defualt.xml, trainner.yml
    (for predicting trained faces), labels.pickle (to get label of faces ) and predict name of the face.

    """

    log.info("face_recog_pygm start")
    # print("[INFO] face_recog_pygm start")

    # objected created for cascade classifier
    face_cascade_name = "haarcascade_frontalface_default.xml"
    face_cascade_path = file_path_check(face_cascade_name)
    face_cascade = cv2.CascadeClassifier(face_cascade_path)
    # recognizer = cv2.face.createLBPHFaceRecognizer() # for opencv 2.4
    recognizer = cv2.face.LBPHFaceRecognizer_create()

    # creating object from trained file
    recognizer_file = "trainner.yml"
    recognizer_path = file_path_check(recognizer_file)
    file_path_check(recognizer_path)
    # recognizer.load(recognizer_path) # for opencv 2.4
    recognizer.read(recognizer_path)

    # reading labels from label.pickle file
    labels = {"person_name": 1}
    labels_file = "labels.pickle"
    labels_path = file_path_check(labels_file)
    try:
        with open(labels_path, 'rb') as f:
            og_labels = pickle.load(f)
            labels = {v: k for k, v in og_labels.items()}
    except Exception as error:
        log.error(error)
        raise

    image_title = display_gui.Menu.Text(text=TASK_TITLE,
                                        font=display_gui.Font.Medium)

    vid = Vision()

    front = cv2.FONT_HERSHEY_SIMPLEX

    color = (255, 0, 0)
    # width of text
    stroke = 2

    log.info("frame reading starts ")

    while vid.is_camera_connected():

        ret, frame = vid.get_video()

        # resize frame for required size
        resize_frame = vid.resize_frame(frame)

        # opencv understand BGR, in order to display we need to convert image  form   BGR to RGB
        frame = cv2.cvtColor(resize_frame, cv2.COLOR_BGR2RGB)  # for display

        # covert image into gray
        gray = cv2.cvtColor(resize_frame, cv2.COLOR_BGR2GRAY)  # for processing

        # detect object of different size i nthe input image.
        # the detected objects are returned as a list of rectangles.
        faces = face_cascade.detectMultiScale(gray,
                                              scaleFactor=1.3,
                                              minNeighbors=5)

        for (x, y, w, h) in faces:
            # create rectangle around face
            frame = cv2.rectangle(frame, (x, y), (x + w, y + w), (255, 0, 0),
                                  2)  # RGB
            roi_gray = gray[y:y + h, x:x + w]
            # roi_color = frame[y:y+h, x:x+w]

            id_, confidence = recognizer.predict(roi_gray)
            if confidence >= 20:
                name = labels[id_]
                cv2.putText(frame, name[::-1], (x, y), front, 1.0, color,
                            stroke, cv2.LINE_AA)

        if globals.VID_FRAME_INDEX == 0:

            frame = resize_frame

        elif globals.VID_FRAME_INDEX == 1:

            frame = gray

        # elif globals.VID_FRAME_INDEX == 2:
        else:

            frame = frame

        # Display the frame
        display.display_render(screen, frame, disply_obj, TASK_INFO)

        image_title.Render(to=screen, pos=TASK_TITLE_POS)

        # check if TASK_INDEX is not 1 then it means another buttons has pressed
        if not globals.TASK_INDEX == 1:
            log.info("TASK_INDEX is not 1 but {}".format(globals.TASK_INDEX))
            break

        if not globals.CAM_START or globals.EXIT:
            # print(f"face_recog globals.CAM_START {globals.CAM_START}")
            break

        # framerate control
        if cv2.waitKey(fbs) & 0xff == ord('q'):
            break

    log.info("Face Recognition closing ")
    vid.video_cleanUp()