Esempio n. 1
0
    def start(self):
        self.location = self.CCTVOption.set_location
        if self.location is None:
            QMessageBox.about(self, "정보", "위치를 입력해주세요.")
            return self.exec_setting()

        try:
            self.setCamera = cv2.VideoCapture(0)
            _, self.img_o = self.setCamera.read()
            self.img_o = cv2.cvtColor(self.img_o, cv2.COLOR_RGB2GRAY)
        except:
            QMessageBox.about(self, "정보", "카메라를 확인해주세요.")
            return

        self.main_record = True
        self.event_record = False
        self.event_switch = False
        self.event_status = False

        self.timer = QTimer()
        self.timer.timeout.connect(self.nextframe)
        self.timer.start(1000 / self.fps)

        port = 3030
        require_login = False
        self.streamer = Streamer(port, require_login)
Esempio n. 2
0
def main() -> None:
    """
    The main launcher logic, using a cuda streamer to stream from an IP camera

    Arguments:
        None
    Returns:
        None
    """

    cuda_streamer = CUDAStreamer(
        "rtsp://*****:*****@10.199.51.222/axis-media/media.amp?streamprofile=H264-OpenCV-Optimized",
        1920, 1080)
    streamer = Streamer(3030, False, (1920, 1080), frame_rate=30)

    while True:
        frame = cuda_streamer.get_image()
        streamer.update_frame(frame)
        if not streamer.is_streaming:
            streamer.start_streaming()
from flask_opencv_streamer.streamer import Streamer
from cv2 import VideoCapture, waitKey

port = 5000
require_login = False
streamer = Streamer(port, require_login)

cap = VideoCapture(0)

print('Starting to read camera and host flask in port:', port)

while True:
    success, frame = cap.read()
    streamer.update_frame(frame)
    if not streamer.is_streaming:
        streamer.start_streaming()
    
    waitKey(66)
Esempio n. 4
0
def vision():
    src = int(args['source']) if args['source'].isdigit() else args['source']
    flip = args['flip']
    rotate = args['rotate']
    view = args['view']
    debug = args['debug']
    threshold = args['threshold'] if args['threshold'] < 50 else 0
    angle_threshold = args['athreshold'] if 0 < args['athreshold'] < 30 else 0
    filter_threshold = args[
        'fthreshold'] if 0 < args['fthreshold'] <= 0.8 else 0.5
    net_table = args['networktables']
    is_pi = args['pi']
    crash = args['crash']
    window_moved = False
    sequence = False
    frame = []

    def capture_frame(frame, table, value):
        if value:
            cv2.imwrite('/tmp/gv_frame.jpg', frame),
            table.putBoolean('capture_frame', False)

    cap = cv2.VideoCapture(src)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, data['image-width'])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, data['image-height'])
    port1 = 3030
    streamer = Streamer(port1, False)

    if net_table:
        nt.NetworkTables.initialize(server=data['server-ip'])
        camera_table = nt.NetworkTables.getTable("CameraPublisher")
        sub_table = camera_table.getSubTable("Camera")
        sub_table.getEntry("streams").setStringArray(
            ["mjpg:http://10.18.16.16:3030/video_feed"])
        table = nt.NetworkTables.getTable('SmartDashboard')
        if table:
            print('table OK')
        table.putNumber('center_x', -1)
        table.putNumber('center_y', -1)
        table.putNumber('contours', -1)
        table.putNumber('targets', -1)
        table.putNumber('width', data['image-width'])
        table.putNumber('height', data['image-height'])
        table.putBoolean('capture_frame', False)
        table.addEntryListener(lambda table, key, value, isNew: capture_frame(
            frame, table, value),
                               key='capture_frame')
        # values = {'vision_active': False}
        # table.addEntryListener(value_changed, key='vision_active')

    if debug:
        print(
            '----------------------------------------------------------------')
        print('Current Source: {}'.format(src))
        print('View Flag: {}'.format(view))
        print('Debug Flag: {}'.format(debug))
        print('Threshold Value: {}'.format(threshold))
        print('Angle Threshold Value: {}'.format(angle_threshold))
        print('Network Tables Flag: {}'.format(net_table))
        print(
            '----------------------------------------------------------------\n'
        )

    v_focal_length = data['camera_matrix'][1][1]
    h_focal_length = data['camera_matrix'][0][0]
    lower_color = np.array([
        data['lower-color-list'][0] - threshold, data['lower-color-list'][1],
        data['lower-color-list'][2]
    ])  # HSV to test: 0, 220, 25
    upper_color = np.array([
        data['upper-color-list'][0] + threshold, data['upper-color-list'][1],
        data['upper-color-list'][2]
    ])
    center_coords = (int(data['image-width'] / 2),
                     int(data['image-height'] / 2))
    screen_c_x = data['image-width'] / 2 - 0.5
    screen_c_y = data['image-height'] / 2 - 0.5

    first_read = True
    rectangle_list = []
    sorted_contours = []
    average_coord_list = []
    append = average_coord_list.append

    while True:
        fps = FPS().start()
        if crash:
            raise Exception('Get bamboozled...')
        start_time = time.time()
        biggest_contour_area = -1
        best_center_average_coords = (-1, -1)
        index = -1
        pitch = -999
        yaw = -999

        if view:
            if not first_read:

                key = cv2.waitKey(30) & 0xFF
                if key == ord('q'):
                    break
                if sequence and key != ord(' '):
                    continue

        first_read = False

        _, frame = cap.read()

        if frame is None:
            continue

        if flip:
            frame = cv2.flip(frame, -1)
        if rotate:
            frame = imutils.rotate_bound(frame, 90)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower_color, upper_color)
        # find contours from mask
        all_contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
        # remove super small or super big contours that exist due to light noise/objects
        filtered_contours = [
            c for c in all_contours if 50 < cv2.contourArea(c) < 15000
        ]

        filtered_contours_area = [
            cv2.contourArea(c) for c in all_contours if 50 < cv2.contourArea(c)
        ]

        # find the contour with the biggest area so we can further remove contours created from light noise
        if len(all_contours) > 0:
            biggest_contour_area = max(
                [cv2.contourArea(c) for c in all_contours])
        # create a contour list that removes contours smaller than the biggest * some constant

        filtered_contours = [
            c for c in filtered_contours
            if cv2.contourArea(c) > filter_threshold * biggest_contour_area
        ]

        # sort contours by left to right, top to bottom
        if len(filtered_contours) > 1:
            bounding_boxes = [cv2.boundingRect(c) for c in filtered_contours]
            sorted_contours, _ = zip(
                *sorted(zip(filtered_contours, bounding_boxes),
                        key=lambda b: b[1][0],
                        reverse=False))
            sorted_contours = list(sorted_contours)

        if len(sorted_contours) > 1:
            # gets ((cx, cy), (width, height), angle of rot) for each contour
            rectangle_list = [cv2.minAreaRect(c) for c in sorted_contours]
            for pos, rect in enumerate(rectangle_list):
                if biggest_contour_area < 10000:
                    if -78 - angle_threshold < rect[
                            2] < -74 + angle_threshold and pos != len(
                                rectangle_list) - 1:

                        if view:
                            color = (0, 255, 255)
                            box = np.int0(cv2.boxPoints(rect))
                            cv2.drawContours(frame, [box], 0, color, 2)
                        # only add rect if the second rect is the correct angle
                        if -16 - angle_threshold < rectangle_list[
                                pos + 1][2] < -12 + angle_threshold:
                            if view:
                                color = (0, 0, 255)
                                rect2 = rectangle_list[pos + 1]
                                box2 = np.int0(cv2.boxPoints(rect2))
                                cv2.drawContours(frame, [box2], 0, color, 2)
                            cx = int(
                                (rect[0][0] + rectangle_list[pos + 1][0][0]) /
                                2)
                            cy = int(
                                (rect[0][1] + rectangle_list[pos + 1][0][1]) /
                                2)
                            append((cx, cy))
                else:
                    if pos != len(rectangle_list) - 1:
                        if view:
                            color = (0, 255, 255)
                            box = np.int0(cv2.boxPoints(rect))
                            cv2.drawContours(frame, [box], 0, color, 2)
                            rect2 = rectangle_list[pos + 1]
                            box2 = np.int0(cv2.boxPoints(rect2))
                            color = (255, 255, 0)
                            cv2.drawContours(frame, [box2], 0, color, 2)
                        cx = int(
                            (rect[0][0] + rectangle_list[pos + 1][0][0]) / 2)
                        cy = int(
                            (rect[0][1] + rectangle_list[pos + 1][0][1]) / 2)
                        append((cx, cy))

        if len(average_coord_list) == 1:
            best_center_average_coords = average_coord_list[index]
            index = 0
            yaw = math.degrees(
                math.atan((best_center_average_coords[0] - screen_c_x) /
                          h_focal_length))
            pitch = math.degrees(
                math.atan((best_center_average_coords[1] - screen_c_y) /
                          v_focal_length))
            if view:
                cv2.line(frame, best_center_average_coords, center_coords,
                         (0, 255, 0), 2)
                cv2.line(frame, best_center_average_coords,
                         best_center_average_coords, (255, 0, 0), 5)

        elif len(average_coord_list) > 1:
            # finds c_x that is closest to the center of the center
            best_center_average_x = min(
                average_coord_list,
                key=lambda xy: abs(xy[0] - data['image-width'] / 2))[0]
            index = [coord[0] for coord in average_coord_list
                     ].index(best_center_average_x)
            best_center_average_y = average_coord_list[index][1]
            best_center_average_coords = (best_center_average_x,
                                          best_center_average_y)
            yaw = math.degrees(
                math.atan((best_center_average_coords[0] - screen_c_x) /
                          h_focal_length))
            pitch = math.degrees(
                math.atan((best_center_average_coords[1] - screen_c_y) /
                          v_focal_length))
            if view:
                cv2.line(frame, best_center_average_coords, center_coords,
                         (0, 255, 0), 2)
                for coord in average_coord_list:
                    cv2.line(frame, coord, coord, (255, 0, 0), 5)
        new_h = 320
        new_w = 240
        resize = cv2.resize(frame, (new_w, new_h))
        streamer.update_frame(resize)

        if not streamer.is_streaming:
            streamer.start_streaming()
        # if view:
        #     cv2.imshow('Mask', mask)
        #     cv2.imshow('Contour Window', frame)
        #     if not window_moved:
        #         cv2.moveWindow('Mask', 300, 250)
        #         cv2.moveWindow('Contour Window', 1100, 250)
        #         window_moved = True

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        end_time = time.time()

        fps.update()
        fps.stop()
        curr_fps = fps.fps()
        if debug:
            sys.stdout.write("""
=========================================================
Filtered Contour Area: {}
Sorted Contour Area: {}
Biggest Contour Area: {}
Rectangle List: {}
Contours: {}
Targets: {}
Avg_center_list: {}
Best Center Coords: {}
Index: {}
Pitch: {}
Yaw: {}
FPS: {}
Execute time: {}\r""".format(
                filtered_contours_area,
                [cv2.contourArea(contour)
                 for contour in sorted_contours], biggest_contour_area,
                len(rectangle_list), len(sorted_contours),
                len(average_coord_list), average_coord_list,
                best_center_average_coords, index, pitch, yaw, curr_fps,
                end_time - start_time))

        if net_table:
            table.putNumber('center_x', best_center_average_coords[0])
            table.putNumber('center_y', best_center_average_coords[1])
            table.putNumber('yaw', yaw)
            table.putNumber('contours', len(sorted_contours))
            table.putNumber('targets', len(average_coord_list))
            table.putNumber('pitch', pitch)
            table.putNumber('fps', curr_fps)
        filtered_contours.clear()
        sorted_contours.clear()
        rectangle_list.clear()
        average_coord_list.clear()

    cap.release()
    cv2.destroyAllWindows()
Esempio n. 5
0
import cv2
import os
import sys
import shutil
import socket
from flask_opencv_streamer.streamer import Streamer

streamer = Streamer(8080, False)
port_getcommand = 9999
port_filesend = 7777
ss = socket.socket()
print('Data_Gathering initiating')
print('[+] Server socket is created.')
receiveno = 0
ss.bind(('', port_getcommand))
print('[+] Socket is binded to {}'.format(port_getcommand))
minW = 100
minH = 100

face_detector = cv2.CascadeClassifier(cv2.data.haarcascades +
                                      "haarcascade_frontalface_default.xml")
font = cv2.FONT_HERSHEY_SIMPLEX
while True:  #socket listen loop start
    ss.listen(3)
    receiveno = receiveno + 1
    print('[+] Waiting for connection no.{}'.format(receiveno))
    con, addr = ss.accept()
    print('[+] Got connection from {}'.format(addr[0]))
    data = con.recv(1024)
    face_id = int(str(data, 'utf8'))
    print("\n [INFO] Initializing face capture. Look the camera and wait ...")
Esempio n. 6
0
import os
import cv2
import numpy as np
import tensorflow as tf
import argparse
import sys
from flask_opencv_streamer.streamer import Streamer

# Set up camera constants
IM_WIDTH = 640
IM_HEIGHT = 480

camera_type = 'usb'
port = 5000
require_login = False
streamer = Streamer(port, require_login, stream_res=(640, 480))

# This is needed since the working directory is the object_detection folder.
sys.path.append('..')

# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util

# Name of the directory containing the object detection module we're using
MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'

# Grab path to current working directory
CWD_PATH = os.getcwd()

# Path to frozen detection graph .pb file, which contains the model that is used
Esempio n. 7
0
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
font = cv2.FONT_HERSHEY_SIMPLEX
#iniciate id counter
id = 0
cam = cv2.VideoCapture(0)  # Initialize and start video capture
cam.set(3, 640)  # set video widht
cam.set(4, 480)  # set video height
# Define min window size to be recognized as a face
minW = 100
minH = 100

streamer = Streamer(8080, False)  #for web

nowtime = datetime.now()
trainer_updatedday = nowtime.day

print("Camera ON")
while True:
    ret, img = cam.read()
    img = cv2.flip(img, -1)  # Flip vertically
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.2,
        minNeighbors=5,
        minSize=(int(minW), int(minH)),
    )
Esempio n. 8
0
from flask_opencv_streamer.streamer import Streamer
from time import sleep

s = Streamer(80, True, "logins.txt", ".login")
s.start_streaming()

while True:
    try:
        sleep(1 / 30)
    except KeyboardInterrupt:
        break
Esempio n. 9
0
import queue
import threading
import json
import time
import send
import dlib
import cv2
import os
import base64
import zmq
from flask_opencv_streamer.streamer import Streamer
# context = zmq.Context()
# footage_socket = context.socket(zmq.PUB)
# footage_socket.connect('tcp://localhost:5555')7

stream = Streamer(5555, False, stream_res=(1280, 720))
protoPath = os.path.join("face_detection_model", "deploy.prototxt")
modelPath = os.path.join("face_detection_model",
                         "res10_300x300_ssd_iter_140000.caffemodel")
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
embedder = cv2.dnn.readNetFromTorch("openface_nn4.small2.v1.t7")
unlocked = False
thumbnail_created = False
recognizer = pickle.loads(open("output/recognizer.pickle", "rb").read())
le = pickle.loads(open("output/le.pickle", "rb").read())
passes = 0
step = 0
datas = queue.Queue(maxsize=0)
temp = queue.Queue(maxsize=0)
attended = {"user_id": ""}
sent = {"sent": False}
Esempio n. 10
0
        else:
            mqttc2.publish("RpiToWeb", "no")
        #for x in myresult:
        #    print(x)

        print("Write complete")

        #mydb.commit()
        cur.close()
        mydb.close()

    #except:
    #mydb.rollback()
    #print("We have a problem")

    except Exception:  #方法一:捕获所有异常
        #如果发生异常,则回滚
        print("发生异常", Exception)
        mydb.rollback()


while True:
    if message == "go":

        port = 3030
        require_login = False
        stream_res = (400, 300)
        streamer = Streamer(port, require_login, stream_res)
        detect_recognize_face()
        break
Esempio n. 11
0
def main(blur_level=(69, 69)):
    streamer = Streamer(3000, False)

    # Generate blank images
    by_type = np.zeros((1080, 1920, 4), np.uint8)

    bg = cv2.imread("./Peeples blank.png")
    key_type = cv2.imread("./key_type.png")

    # Make sure all images have alpha channel
    bg = cv2.cvtColor(bg, cv2.COLOR_RGB2RGBA)
    key_type = cv2.cvtColor(key_type, cv2.COLOR_RGB2RGBA)

    # Kafka stuff -- going to start reading this in real-time
    consumer = Consumer({
        "bootstrap.servers": "sckafka1.simcenter.utc.edu",
        "group.id": "gradient-overlay",
    })
    consumer.subscribe(["cuip_vision_events"])
    last_write_time = time.time()

    while True:
        try:
            consumer.poll(1)
            msg = consumer.consume()
            if msg is not None:
                for m in msg:
                    if m.error():
                        continue
                    j = json.loads(m.value())
                    if j["camera_id"] != "mlk-peeples-cam-3":
                        continue
                    current = 0
                    next = 1
                    while next < (len(j["locations"]) - 1):
                        x1 = int(j["locations"][current]["coords"][0])
                        y1 = int(j["locations"][current]["coords"][1])
                        x2 = int(j["locations"][next]["coords"][0])
                        y2 = int(j["locations"][next]["coords"][1])
                        cv2.line(
                            by_type,
                            get_center(j["locations"][0]["coords"]),
                            get_center(j["locations"][-1]["coords"]),
                            get_color_from_label(j["label"]),
                            2,
                        )
                        del x1, y1, x2, y2
                        current += 1
                        next += 1

            if time.time() - last_write_time >= 15:  # write every 15 seconds
                print("Writing to disk")
                output = cv2.GaussianBlur(by_type.copy(), blur_level, 0)
                output = cv2.add(output, bg)
                rows, cols = key_type.shape[:2]
                output[0:rows, 0:cols] = key_type
                cv2.imwrite("by_type.png", output)
                del output
                last_write_time = time.time()

            streamer.update_frame(
                cv2.add(cv2.GaussianBlur(by_type, blur_level, 0), bg))
            if not streamer.is_streaming:
                streamer.start_streaming()

            time.sleep(1 / 30)
        except KeyboardInterrupt:
            break

    by_type = cv2.GaussianBlur(by_type, blur_level, 0)
    by_type = cv2.add(by_type, bg)
    rows, cols = key_type.shape[:2]
    by_type[0:rows, 0:cols] = key_type
    cv2.imwrite("by_type.png", by_type)
    consumer.close()
    cv2.destroyAllWindows()