예제 #1
0
def main() -> None:
    """
    The main launcher logic, using a cuda streamer to stream from an IP camera

    Arguments:
        None
    Returns:
        None
    """

    cuda_streamer = CUDAStreamer(
        "rtsp://*****:*****@10.199.51.222/axis-media/media.amp?streamprofile=H264-OpenCV-Optimized",
        1920, 1080)
    streamer = Streamer(3030, False, (1920, 1080), frame_rate=30)

    while True:
        frame = cuda_streamer.get_image()
        streamer.update_frame(frame)
        if not streamer.is_streaming:
            streamer.start_streaming()
예제 #2
0
from flask_opencv_streamer.streamer import Streamer
from cv2 import VideoCapture, waitKey

port = 5000
require_login = False
streamer = Streamer(port, require_login)

cap = VideoCapture(0)

print('Starting to read camera and host flask in port:', port)

while True:
    success, frame = cap.read()
    streamer.update_frame(frame)
    if not streamer.is_streaming:
        streamer.start_streaming()
    
    waitKey(66)
예제 #3
0
def vision():
    src = int(args['source']) if args['source'].isdigit() else args['source']
    flip = args['flip']
    rotate = args['rotate']
    view = args['view']
    debug = args['debug']
    threshold = args['threshold'] if args['threshold'] < 50 else 0
    angle_threshold = args['athreshold'] if 0 < args['athreshold'] < 30 else 0
    filter_threshold = args[
        'fthreshold'] if 0 < args['fthreshold'] <= 0.8 else 0.5
    net_table = args['networktables']
    is_pi = args['pi']
    crash = args['crash']
    window_moved = False
    sequence = False
    frame = []

    def capture_frame(frame, table, value):
        if value:
            cv2.imwrite('/tmp/gv_frame.jpg', frame),
            table.putBoolean('capture_frame', False)

    cap = cv2.VideoCapture(src)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, data['image-width'])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, data['image-height'])
    port1 = 3030
    streamer = Streamer(port1, False)

    if net_table:
        nt.NetworkTables.initialize(server=data['server-ip'])
        camera_table = nt.NetworkTables.getTable("CameraPublisher")
        sub_table = camera_table.getSubTable("Camera")
        sub_table.getEntry("streams").setStringArray(
            ["mjpg:http://10.18.16.16:3030/video_feed"])
        table = nt.NetworkTables.getTable('SmartDashboard')
        if table:
            print('table OK')
        table.putNumber('center_x', -1)
        table.putNumber('center_y', -1)
        table.putNumber('contours', -1)
        table.putNumber('targets', -1)
        table.putNumber('width', data['image-width'])
        table.putNumber('height', data['image-height'])
        table.putBoolean('capture_frame', False)
        table.addEntryListener(lambda table, key, value, isNew: capture_frame(
            frame, table, value),
                               key='capture_frame')
        # values = {'vision_active': False}
        # table.addEntryListener(value_changed, key='vision_active')

    if debug:
        print(
            '----------------------------------------------------------------')
        print('Current Source: {}'.format(src))
        print('View Flag: {}'.format(view))
        print('Debug Flag: {}'.format(debug))
        print('Threshold Value: {}'.format(threshold))
        print('Angle Threshold Value: {}'.format(angle_threshold))
        print('Network Tables Flag: {}'.format(net_table))
        print(
            '----------------------------------------------------------------\n'
        )

    v_focal_length = data['camera_matrix'][1][1]
    h_focal_length = data['camera_matrix'][0][0]
    lower_color = np.array([
        data['lower-color-list'][0] - threshold, data['lower-color-list'][1],
        data['lower-color-list'][2]
    ])  # HSV to test: 0, 220, 25
    upper_color = np.array([
        data['upper-color-list'][0] + threshold, data['upper-color-list'][1],
        data['upper-color-list'][2]
    ])
    center_coords = (int(data['image-width'] / 2),
                     int(data['image-height'] / 2))
    screen_c_x = data['image-width'] / 2 - 0.5
    screen_c_y = data['image-height'] / 2 - 0.5

    first_read = True
    rectangle_list = []
    sorted_contours = []
    average_coord_list = []
    append = average_coord_list.append

    while True:
        fps = FPS().start()
        if crash:
            raise Exception('Get bamboozled...')
        start_time = time.time()
        biggest_contour_area = -1
        best_center_average_coords = (-1, -1)
        index = -1
        pitch = -999
        yaw = -999

        if view:
            if not first_read:

                key = cv2.waitKey(30) & 0xFF
                if key == ord('q'):
                    break
                if sequence and key != ord(' '):
                    continue

        first_read = False

        _, frame = cap.read()

        if frame is None:
            continue

        if flip:
            frame = cv2.flip(frame, -1)
        if rotate:
            frame = imutils.rotate_bound(frame, 90)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower_color, upper_color)
        # find contours from mask
        all_contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
        # remove super small or super big contours that exist due to light noise/objects
        filtered_contours = [
            c for c in all_contours if 50 < cv2.contourArea(c) < 15000
        ]

        filtered_contours_area = [
            cv2.contourArea(c) for c in all_contours if 50 < cv2.contourArea(c)
        ]

        # find the contour with the biggest area so we can further remove contours created from light noise
        if len(all_contours) > 0:
            biggest_contour_area = max(
                [cv2.contourArea(c) for c in all_contours])
        # create a contour list that removes contours smaller than the biggest * some constant

        filtered_contours = [
            c for c in filtered_contours
            if cv2.contourArea(c) > filter_threshold * biggest_contour_area
        ]

        # sort contours by left to right, top to bottom
        if len(filtered_contours) > 1:
            bounding_boxes = [cv2.boundingRect(c) for c in filtered_contours]
            sorted_contours, _ = zip(
                *sorted(zip(filtered_contours, bounding_boxes),
                        key=lambda b: b[1][0],
                        reverse=False))
            sorted_contours = list(sorted_contours)

        if len(sorted_contours) > 1:
            # gets ((cx, cy), (width, height), angle of rot) for each contour
            rectangle_list = [cv2.minAreaRect(c) for c in sorted_contours]
            for pos, rect in enumerate(rectangle_list):
                if biggest_contour_area < 10000:
                    if -78 - angle_threshold < rect[
                            2] < -74 + angle_threshold and pos != len(
                                rectangle_list) - 1:

                        if view:
                            color = (0, 255, 255)
                            box = np.int0(cv2.boxPoints(rect))
                            cv2.drawContours(frame, [box], 0, color, 2)
                        # only add rect if the second rect is the correct angle
                        if -16 - angle_threshold < rectangle_list[
                                pos + 1][2] < -12 + angle_threshold:
                            if view:
                                color = (0, 0, 255)
                                rect2 = rectangle_list[pos + 1]
                                box2 = np.int0(cv2.boxPoints(rect2))
                                cv2.drawContours(frame, [box2], 0, color, 2)
                            cx = int(
                                (rect[0][0] + rectangle_list[pos + 1][0][0]) /
                                2)
                            cy = int(
                                (rect[0][1] + rectangle_list[pos + 1][0][1]) /
                                2)
                            append((cx, cy))
                else:
                    if pos != len(rectangle_list) - 1:
                        if view:
                            color = (0, 255, 255)
                            box = np.int0(cv2.boxPoints(rect))
                            cv2.drawContours(frame, [box], 0, color, 2)
                            rect2 = rectangle_list[pos + 1]
                            box2 = np.int0(cv2.boxPoints(rect2))
                            color = (255, 255, 0)
                            cv2.drawContours(frame, [box2], 0, color, 2)
                        cx = int(
                            (rect[0][0] + rectangle_list[pos + 1][0][0]) / 2)
                        cy = int(
                            (rect[0][1] + rectangle_list[pos + 1][0][1]) / 2)
                        append((cx, cy))

        if len(average_coord_list) == 1:
            best_center_average_coords = average_coord_list[index]
            index = 0
            yaw = math.degrees(
                math.atan((best_center_average_coords[0] - screen_c_x) /
                          h_focal_length))
            pitch = math.degrees(
                math.atan((best_center_average_coords[1] - screen_c_y) /
                          v_focal_length))
            if view:
                cv2.line(frame, best_center_average_coords, center_coords,
                         (0, 255, 0), 2)
                cv2.line(frame, best_center_average_coords,
                         best_center_average_coords, (255, 0, 0), 5)

        elif len(average_coord_list) > 1:
            # finds c_x that is closest to the center of the center
            best_center_average_x = min(
                average_coord_list,
                key=lambda xy: abs(xy[0] - data['image-width'] / 2))[0]
            index = [coord[0] for coord in average_coord_list
                     ].index(best_center_average_x)
            best_center_average_y = average_coord_list[index][1]
            best_center_average_coords = (best_center_average_x,
                                          best_center_average_y)
            yaw = math.degrees(
                math.atan((best_center_average_coords[0] - screen_c_x) /
                          h_focal_length))
            pitch = math.degrees(
                math.atan((best_center_average_coords[1] - screen_c_y) /
                          v_focal_length))
            if view:
                cv2.line(frame, best_center_average_coords, center_coords,
                         (0, 255, 0), 2)
                for coord in average_coord_list:
                    cv2.line(frame, coord, coord, (255, 0, 0), 5)
        new_h = 320
        new_w = 240
        resize = cv2.resize(frame, (new_w, new_h))
        streamer.update_frame(resize)

        if not streamer.is_streaming:
            streamer.start_streaming()
        # if view:
        #     cv2.imshow('Mask', mask)
        #     cv2.imshow('Contour Window', frame)
        #     if not window_moved:
        #         cv2.moveWindow('Mask', 300, 250)
        #         cv2.moveWindow('Contour Window', 1100, 250)
        #         window_moved = True

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        end_time = time.time()

        fps.update()
        fps.stop()
        curr_fps = fps.fps()
        if debug:
            sys.stdout.write("""
=========================================================
Filtered Contour Area: {}
Sorted Contour Area: {}
Biggest Contour Area: {}
Rectangle List: {}
Contours: {}
Targets: {}
Avg_center_list: {}
Best Center Coords: {}
Index: {}
Pitch: {}
Yaw: {}
FPS: {}
Execute time: {}\r""".format(
                filtered_contours_area,
                [cv2.contourArea(contour)
                 for contour in sorted_contours], biggest_contour_area,
                len(rectangle_list), len(sorted_contours),
                len(average_coord_list), average_coord_list,
                best_center_average_coords, index, pitch, yaw, curr_fps,
                end_time - start_time))

        if net_table:
            table.putNumber('center_x', best_center_average_coords[0])
            table.putNumber('center_y', best_center_average_coords[1])
            table.putNumber('yaw', yaw)
            table.putNumber('contours', len(sorted_contours))
            table.putNumber('targets', len(average_coord_list))
            table.putNumber('pitch', pitch)
            table.putNumber('fps', curr_fps)
        filtered_contours.clear()
        sorted_contours.clear()
        rectangle_list.clear()
        average_coord_list.clear()

    cap.release()
    cv2.destroyAllWindows()
from flask_opencv_streamer.streamer import Streamer
import cv2
from mss import mss
from PIL import Image
import numpy as np

mon = {'top': 0, 'left': 0, 'width': 1920, 'height': 1080}

sct = mss()

port = 3030
require_login = False
streamer = Streamer(port, require_login)

# Open video device 0
video_capture = cv2.VideoCapture(0)

while True:
    _, frame = video_capture.read()
    sct.get_pixels(mon)
    img = Image.frombytes('RGB', (sct.width, sct.height), sct.image)

    streamer.update_frame(np.array(img))

    if not streamer.is_streaming:
        streamer.start_streaming()

    cv2.waitKey(30)
예제 #5
0
파일: cctv.py 프로젝트: ypdev26/ELK-Project
class CCTVMain(QMainWindow):
    def __init__(self):
        super().__init__()
        uic.loadUi("QTui\cctv_main.ui", self)
        self.video = None
        self.location = "CCTV 위치"
        self.lat = None
        self.lon = None
        self.CCTVOption = CCTVOption(self)
        self.sensor = 10
        self.fps = 60
        self.count = 0
        self.fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        self.action_setting.triggered.connect(self.exec_setting)

    def start(self):
        self.location = self.CCTVOption.set_location
        if self.location is None:
            QMessageBox.about(self, "정보", "위치를 입력해주세요.")
            return self.exec_setting()

        try:
            self.setCamera = cv2.VideoCapture(0)
            _, self.img_o = self.setCamera.read()
            self.img_o = cv2.cvtColor(self.img_o, cv2.COLOR_RGB2GRAY)
        except:
            QMessageBox.about(self, "정보", "카메라를 확인해주세요.")
            return

        self.main_record = True
        self.event_record = False
        self.event_switch = False
        self.event_status = False

        self.timer = QTimer()
        self.timer.timeout.connect(self.nextframe)
        self.timer.start(1000 / self.fps)

        port = 3030
        require_login = False
        self.streamer = Streamer(port, require_login)

    def stop(self):
        self.main_video.release()
        self.setCamera.release()
        self.imgLabel.setPixmap(QPixmap.fromImage(QImage()))
        self.timer.stop()

    def nextframe(self):
        _, self.cam = self.setCamera.read()
        self.streamer.update_frame(self.cam)
        if not self.streamer.is_streaming:
            self.streamer.start_streaming()
        self.cam = cv2.cvtColor(self.cam, cv2.COLOR_BGR2RGB)
        self.img_p = cv2.cvtColor(self.cam, cv2.COLOR_RGB2GRAY)

        self.write_main()
        self.compare()
        self.img_o = self.img_p.copy()
        img = QImage(self.cam, self.cam.shape[1], self.cam.shape[0],
                     QImage.Format_RGB888)
        pix = QPixmap.fromImage(img)
        self.imgLabel.setPixmap(pix)

    def write_main(self):
        cam = cv2.cvtColor(self.cam, cv2.COLOR_BGR2RGB)
        if self.main_record == True:
            self.main_record = False
            logdate = datetime.datetime.now().strftime("%y-%m-%d_%H%M%S")
            self.main_video = cv2.VideoWriter(
                "video\MainVideo_" + logdate + ".mp4", self.fourcc, 20,
                (self.cam.shape[1], self.cam.shape[0]))
        if self.main_record == False:
            self.main_video.write(cam)

    def write_event(self):
        self.cam_rec = cv2.cvtColor(self.cam, cv2.COLOR_BGR2RGB)
        if (self.event_record == True) & (self.event_switch == False):
            self.event_record = False
            self.event_switch = True
            self.event_status = True
            logdate = datetime.datetime.now().strftime("%y-%m-%d_%H%M%S")
            self.write_log()
            self.event_video = cv2.VideoWriter(
                "video\event\EventVideo_" + logdate + ".mp4", self.fourcc, 20,
                (self.cam.shape[1], self.cam.shape[0]))
        if self.event_switch == True:
            self.event_video.write(self.cam_rec)

    def write_log(self):
        date = datetime.datetime.utcnow().isoformat()
        self.lat = self.CCTVOption.set_lat
        self.lon = self.CCTVOption.set_lon
        log = open('log\detect.log', 'a')
        log.write(self.location + " " + self.lat + " " + self.lon + " " +
                  date + " -\n")
        log.close()

    def compare(self):
        self.sensor = self.CCTVOption.set_sensor
        value = np.sum(
            (self.img_o.astype("float") - self.img_p.astype("float"))**2)
        value /= float(self.img_o.shape[0] * self.img_p.shape[1])
        if (value >= self.sensor):
            self.event_record = True
            self.count = 0
        if (value < self.sensor):
            self.event_record = False
            if self.event_status == True:
                self.event_status = False
                self.event_timer()
        self.write_event()

    def event_timer(self):
        timer = threading.Timer(1, self.event_timer)
        timer.start()
        self.count += 1
        if self.count > 9:
            timer.cancel()
            self.event_video.release()
            self.event_switch = False

    def exec_setting(self):
        self.CCTVOption.exec_()
예제 #6
0
        faces = face_detector.detectMultiScale(gray,
                                               scaleFactor=1.3,
                                               minNeighbors=5,
                                               minSize=(int(minW), int(minH)))

        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            count += 1
            # Save the captured image into the datasets folder
            cv2.imwrite(
                "dataset/User." + str(face_id) + '.' + str(count) + ".jpg",
                gray[y:y + h, x:x + w])
            cv2.putText(img,
                        str(count) + " of " + str(30), (x + 5, y + h - 5),
                        font, 1, (255, 255, 0), 1)
        streamer.update_frame(img)
        if not streamer.is_streaming:
            streamer.start_streaming()

        k = cv2.waitKey(100) & 0xff  # Press 'ESC' for exiting video
        if k == 27:
            break
        elif count >= 30:  # Take 30 face sample and stop video
            img = cv2.imread("done.jpg", 1)
            cv2.imshow('image', img)
            streamer.update_frame(img)
            break
    # Do a bit of cleanup
    print("\n [+]Shutting down Cam")
    cam.release()
    cv2.destroyAllWindows()
            cv2.imwrite('fire.png', frame)
            #send_message('fire')
        else:
            # if prediction for FIRE was FALSE, draw RED contour for superpixel
            cv2.drawContours(small_frame, contours, -1, (0, 0, 2255), 1)

        # stop the timer and convert to ms. (to see how long processing and display takes)

    stop_t = ((cv2.getTickCount() - start_t) / cv2.getTickFrequency()) * 1000

    # image display and key handling
    #cv2.imshow(windowName, small_frame);

    # wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)
    frame_count = frame_count + 1
    streamer.update_frame(small_frame)

    if not streamer.is_streaming:
        streamer.start_streaming()

    cv2.waitKey(30)

    key = cv2.waitKey(max(2, frame_time - int(math.ceil(stop_t)))) & 0xFF
    if (key == 27):
        keepProcessing = False
    elif (key == ord('f')):
        cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

cv2.destroyAllWindows()
예제 #8
0
def main(blur_level=(69, 69)):
    streamer = Streamer(3000, False)

    # Generate blank images
    by_type = np.zeros((1080, 1920, 4), np.uint8)

    bg = cv2.imread("./Peeples blank.png")
    key_type = cv2.imread("./key_type.png")

    # Make sure all images have alpha channel
    bg = cv2.cvtColor(bg, cv2.COLOR_RGB2RGBA)
    key_type = cv2.cvtColor(key_type, cv2.COLOR_RGB2RGBA)

    # Kafka stuff -- going to start reading this in real-time
    consumer = Consumer({
        "bootstrap.servers": "sckafka1.simcenter.utc.edu",
        "group.id": "gradient-overlay",
    })
    consumer.subscribe(["cuip_vision_events"])
    last_write_time = time.time()

    while True:
        try:
            consumer.poll(1)
            msg = consumer.consume()
            if msg is not None:
                for m in msg:
                    if m.error():
                        continue
                    j = json.loads(m.value())
                    if j["camera_id"] != "mlk-peeples-cam-3":
                        continue
                    current = 0
                    next = 1
                    while next < (len(j["locations"]) - 1):
                        x1 = int(j["locations"][current]["coords"][0])
                        y1 = int(j["locations"][current]["coords"][1])
                        x2 = int(j["locations"][next]["coords"][0])
                        y2 = int(j["locations"][next]["coords"][1])
                        cv2.line(
                            by_type,
                            get_center(j["locations"][0]["coords"]),
                            get_center(j["locations"][-1]["coords"]),
                            get_color_from_label(j["label"]),
                            2,
                        )
                        del x1, y1, x2, y2
                        current += 1
                        next += 1

            if time.time() - last_write_time >= 15:  # write every 15 seconds
                print("Writing to disk")
                output = cv2.GaussianBlur(by_type.copy(), blur_level, 0)
                output = cv2.add(output, bg)
                rows, cols = key_type.shape[:2]
                output[0:rows, 0:cols] = key_type
                cv2.imwrite("by_type.png", output)
                del output
                last_write_time = time.time()

            streamer.update_frame(
                cv2.add(cv2.GaussianBlur(by_type, blur_level, 0), bg))
            if not streamer.is_streaming:
                streamer.start_streaming()

            time.sleep(1 / 30)
        except KeyboardInterrupt:
            break

    by_type = cv2.GaussianBlur(by_type, blur_level, 0)
    by_type = cv2.add(by_type, bg)
    rows, cols = key_type.shape[:2]
    by_type[0:rows, 0:cols] = key_type
    cv2.imwrite("by_type.png", by_type)
    consumer.close()
    cv2.destroyAllWindows()