Beispiel #1
0
    def __init__(self):
        # Init pygame
        pygame.init()

        # Creat pygame window
        pygame.display.set_caption("Tello video stream")
        self.screen = pygame.display.set_mode([960, 720])

        # Init Tello object that interacts with the Tello drone
        self.tello = Tello()

        # Drone velocities between -100~100
        self.for_back_velocity = 0
        self.left_right_velocity = 0
        self.up_down_velocity = 0
        self.yaw_velocity = 0
        self.speed = 10
        self.mode = None
        self.send_rc_control = False
        self.yolo = Yolo()
        self.yolo.initializeModel()
        self.tracker = tracker = cv2.TrackerCSRT().create()
        self.locked = False
        self.locked_frame = None

        # create update timer
        pygame.time.set_timer(USEREVENT + 1, 50)
        logger.info("Game Initialized")
Beispiel #2
0
    def playFirstCamera(self,detection_graph,sess,img):
        # 显示轮询法参数
        cv2.putText(img, str(self.firstTimes), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (0, 0, 255), 2)
        # cv2.rectangle(img, (340, 450), (800, 700), (205, 240, 0), 2)
        if self.firstTimes!=0:
            self.firstTimes-=1
        else:
            image_np_expanded = np.expand_dims(img, axis=0)
            height,width,  a = img.shape
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            scores = detection_graph.get_tensor_by_name('detection_scores:0')
            classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            # Actual detection.
            (boxes, scores, classes, num_detections) = sess.run(
                [boxes, scores, classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
            # vis_util.visualize_boxes_and_labels_on_image_array(
            #     img,
            #     np.squeeze(boxes),
            #     np.squeeze(classes).astype(np.int32),
            #     np.squeeze(scores),
            #     category_index,
            #     use_normalized_coordinates=True,
            #     line_thickness=8,
            #     min_score_thresh=0.9)

            #过滤置信度低于80%的目标
            s_boxes = boxes[scores > 0.8]
            for i in range(0, s_boxes.shape[0]):
                ymin = int(s_boxes[i][0] * height)  # ymin
                xmin = int(s_boxes[i][1] * width)  # xmin
                ymax = int(s_boxes[i][2] * height)  # ymax
                xmax = int(s_boxes[i][3] * width)  # xmax
                # print(str(xmin)+" "+str(xmax)+" "+str(ymin)+" "+str(ymax))
                # if xmax>xmin+1 and ymax>ymin+1:
                #设置监控范围在【450,700】的竖直方向
                if ymin >= 450 and ymax <= 700:
                    flag = True
                    midx = int(xmin + (xmax - xmin) / 2)
                    midy = int(ymin + (ymax - ymin) / 2)
                    for i in range(len(self.trackersPositionSecond)):
                        if self.trackersPositionSecond[i][0] < midx < self.trackersPositionSecond[i][2] and self.trackersPositionSecond[i][1] < midy < \
                                self.trackersPositionSecond[i][3]:
                            flag = False
                            break
                    if flag:
                        tracker = cv2.TrackerCSRT().create()
                        # tracker = cv2.TrackerKCF().create()
                        # tracker = cv2.TrackerMOSSE().create()
                        tracker.init(img, (xmin, ymin, xmax - xmin, ymax - ymin))
                        self.trackersSecond.append(tracker)
                        self.trackersPositionSecond.append((xmin, ymin, xmax, ymax))
            self.firstTimes = 15

        out = queue.Queue()
        # 更新跟踪器
        for i in range(len(self.trackersSecond)):
            tracker = self.trackersSecond[i]
            ok, bboxes = tracker.update(img)
            if ok:
                p1 = (int(bboxes[0]), int(bboxes[1]))
                p2 = (int(bboxes[0] + bboxes[2]), int(bboxes[1] + bboxes[3]))
                self.trackersPositionSecond[i] = (
                int(bboxes[0]), int(bboxes[1]), int(bboxes[0] + bboxes[2]), int(bboxes[1] + bboxes[3]))
                # cv2.rectangle(img, p1, p2, (0, 0, 255), 2, 10)
                #判断是否到达结束区域
                if int(bboxes[1] + bboxes[3]) < 550:
                    ymin = int(bboxes[1]) - 15
                    xmin = int(bboxes[0]) - 15
                    ymax = int(bboxes[1] + bboxes[3]) + 15
                    xmax = int(bboxes[0] + bboxes[2]) + 15
                    part = img[ymin:ymax, xmin:xmax]
                    self.listWaitToDetecte.put((part, 1))
                    # result = self.detecte2(detection_graph,sess,part)
                    # if result is None:
                    #     print("有行李未被检测出")
                    # else:
                    #     cv2.imwrite(r"./img/second/" + str(time.time()) + ".jpg", result)
                    out.put(i)
                if int(bboxes[2]) * int(bboxes[3]) >= 150000:
                    print("有行李出现错误,失效")
                    out.put(i)

        while not out.empty():
            outIndex = out.get()
            self.trackersSecond.pop(outIndex)
            self.trackersPositionSecond.pop(outIndex)

        return img
Beispiel #3
0
    def playThirdCamera(self,detection_graph,sess,img):
        # 显示轮询法参数
        cv2.putText(img, str(self.thirdTimes), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (0, 0, 255), 2)
        # cv2.rectangle(img, (580, 370), (1050, 680), (205, 240, 0), 2)
        if self.thirdTimes!=0:
            self.thirdTimes-=1
        else:
            image_np_expanded = np.expand_dims(img, axis=0)
            height, width, a = img.shape
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            scores = detection_graph.get_tensor_by_name('detection_scores:0')
            classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            # Actual detection.
            (boxes, scores, classes, num_detections) = sess.run(
                [boxes, scores, classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})
            # 可视化工具
            # vis_util.visualize_boxes_and_labels_on_image_array(
            #     img,
            #     np.squeeze(boxes),
            #     np.squeeze(classes).astype(np.int32),
            #     np.squeeze(scores),
            #     category_index,
            #     use_normalized_coordinates=True,
            #     line_thickness=8,
            #     min_score_thresh=0.9)
            #过滤置信度低于80%的行李
            s_boxes = boxes[scores > 0.8]

            for i in range(0, s_boxes.shape[0]):
                ymin = int(s_boxes[i][0] * height)  # ymin
                xmin = int(s_boxes[i][1] * width)  # xmin
                ymax = int(s_boxes[i][2] * height)  # ymax
                xmax = int(s_boxes[i][3] * width)  # xmax
                # print(str(xmin)+" "+str(xmax)+" "+str(ymin)+" "+str(ymax))
                # if xmax>xmin+1 and ymax>ymin+1:
                # 设置监控区域范围 ,在[370,680]竖直区间内
                if ymin >= 370 and ymax <= 680:
                    flag = True
                    midx = int(xmin + (xmax - xmin) / 2)
                    midy = int(ymin + (ymax - ymin) / 2)
                    for i in range(len(self.trackersPositionThird)):
                        if self.trackersPositionThird[i][0] < midx < self.trackersPositionThird[i][2] and \
                                self.trackersPositionThird[i][1] < midy < \
                                self.trackersPositionThird[i][3]:
                            flag = False
                            break
                    if flag:
                        # 初始化追踪器
                        tracker = cv2.TrackerCSRT().create()
                        # tracker = cv2.TrackerKCF().create()
                        # tracker = cv2.TrackerMOSSE().create()
                        tracker.init(img, (xmin, ymin, xmax - xmin, ymax - ymin))
                        self.trackersThird.append(tracker)
                        self.trackersPositionThird.append((xmin, ymin, xmax, ymax))
            self.thirdTimes = 20

        out = queue.Queue()
        # 跟新追踪器
        for i in range(len(self.trackersThird)):
            tracker = self.trackersThird[i]
            ok, bboxes = tracker.update(img)
            if ok:
                p1 = (int(bboxes[0]), int(bboxes[1]))
                p2 = (int(bboxes[0] + bboxes[2]), int(bboxes[1] + bboxes[3]))
                self.trackersPositionThird[i] = (
                    int(bboxes[0]), int(bboxes[1]), int(bboxes[0] + bboxes[2]), int(bboxes[1] + bboxes[3]))
                # cv2.rectangle(img, p1, p2, (0, 0, 255), 2, 10)
                # 判断是否到达结束区域
                if int(bboxes[1] + bboxes[3]) < 470:
                    ymin = int(bboxes[1]) - 20
                    xmin = int(bboxes[0]) - 20
                    ymax = int(bboxes[1] + bboxes[3]) + 20
                    xmax = int(bboxes[0] + bboxes[2]) + 20
                    part = img[ymin:ymax, xmin:xmax]
                    self.listWaitToDetecte.put((part, 3))
                    out.put(i)
                 # 判断面积,当面积太大时,说明追踪算法已经失效,应该舍弃
                if  int(bboxes[2])*int(bboxes[3])>=150000:
                    print("有行李出现错误,失效")
                    out.put(i)

        while not out.empty():
            outIndex = out.get()
            self.trackersThird.pop(outIndex)
            self.trackersPositionThird.pop(outIndex)
        return img
# image Processing

import numpy as np
import cv2
import numpy as np
import cv2 as cv
from ImageProcessing.yolov3 import Yolo

face_cascade = cv.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv.CascadeClassifier("haarcascade_eye.xml")

cap = cv2.VideoCapture(0)
yolo = Yolo()
yolo.initializeModel()
tracker = cv2.TrackerCSRT().create()
tracker_start = False
first_frame = True
# size=
out = cv2.VideoWriter("project.avi", cv2.VideoWriter_fourcc(*"DIVX"), 15,
                      (1280, 720))
while True:

    # Capture frame-by-frame
    ret, frame = cap.read()
    shape = frame.shape
    print(shape)

    fx = 0.5
    fy = 0.5
    # Our operations on the frame come here
    img = cv2.resize(frame, None, fx=0.5, fy=0.5)