コード例 #1
0
class VideoProcessorThread:
    def __init__(self, path, queueSize=3000):
        self.path = path
        # self.video = UMatFileVideoStream(self.path, 128).start()
        # self.rgb = cv2.UMat(self.height, self.width, cv2.CV_8UC3)
        self.cap = cv2.VideoCapture(path)
        self.stopped = False
        # # initialize the queue used to store frames read from
        # # the video file
        self.Q = Queue(maxsize=queueSize)
        self.fgbg = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       varThreshold=200,
                                                       detectShadows=False)
        self.height, self.width = int(self.cap.get(
            cv2.CAP_PROP_FRAME_HEIGHT)), int(
                self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio),
                            int(self.height * resize_ratio))
        self.IP = HumanDetection(self.resize_size)

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def update(self):
        # keep looping infinitely
        self.IP.init()
        # IP.object_tracker.init_tracker()
        cnt = 0
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                return
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.cap.read()
                start = time.time()
                if grabbed:
                    frame = cv2.resize(frame, self.resize_size)
                    frame2 = copy.deepcopy(frame)
                    kps, boxes, kps_score = self.IP.process_img(frame,
                                                                gray=gray)
                    img, img_black = self.IP.visualize()
                    if classify_type == 1:
                        result = self.IP.classify_whole(img_black, img)
                    elif classify_type == 2:
                        result = self.IP.classify_whole(frame2, img)
                    elif classify_type == 3:
                        result = self.IP.classify(img_black, img, boxes)
                    elif classify_type == 4:
                        result = self.IP.classify(frame2, img, boxes)
                    else:
                        raise ValueError("Not a right classification type!")

                    cv2.imshow("res", cv2.resize(img, show_size))
                    cv2.waitKey(2)
                    all_time = time.time() - start
                    print("time is:", all_time)
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
                # add the frame to the queue
                self.Q.put(frame)
            else:
                self.Q.queue.clear()
コード例 #2
0
class VideoProcessor:
    def __init__(self, video_path):
        self.cap = cv2.VideoCapture(video_path)
        self.height, self.width = int(self.cap.get(
            cv2.CAP_PROP_FRAME_HEIGHT)), int(
                self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio),
                            int(self.height * resize_ratio))
        self.IP = ImgProcessor(self.resize_size)

        if config.write_video:
            try:
                self.out = cv2.VideoWriter(video_path[:-4] + "_processed.avi",
                                           fourcc, 15,
                                           (self.width, self.height))
                self.res_out = cv2.VideoWriter(
                    video_path[:-4] + "_processed_res.avi", fourcc, 15,
                    (self.width, self.height))
            except:
                self.out = cv2.VideoWriter("output.avi", fourcc, 15,
                                           (self.width, self.height))
                self.res_out = cv2.VideoWriter("output_res.avi", fourcc, 15,
                                               (self.width, self.height))
        if config.write_box:
            box_file = "/".join(video_path.split(
                "/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_box.txt"
            self.box_txt = open(box_file, "w")
        if config.write_kps:
            kps_file = "/".join(video_path.split(
                "/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_kps.txt"
            kps_score_file = "/".join(
                video_path.split("/")[:-1]) + "/" + video_path.split(
                    "/")[-1][:-4] + "_kps_score.txt"
            self.kps_txt = open(kps_file, "w")
            self.kps_score_txt = open(kps_score_file, "w")

    def process_video(self):
        self.IP.init()
        cnt = 0
        while True:
            ret, frame = self.cap.read()
            cnt += 1
            if ret:
                frame = cv2.resize(frame, self.resize_size)
                frame2 = copy.deepcopy(frame)
                kps, boxes, kps_score = self.IP.process_img(frame, gray=gray)
                img, img_black = self.IP.visualize()
                if classify_type == 1:
                    result = self.IP.classify_whole(img_black, img)
                elif classify_type == 2:
                    result = self.IP.classify_whole(frame2, img)
                elif classify_type == 3:
                    result = self.IP.classify(img_black, img, boxes)
                elif classify_type == 4:
                    result = self.IP.classify(frame2, img, boxes)
                else:
                    raise ValueError("Not a right classification type!")

                if boxes is not None:
                    if config.write_box:
                        box_str = ""
                        for k, v in boxes.items():
                            box_str += boxdict2str(k, v)
                        self.box_txt.write(box_str)
                        self.box_txt.write("\n")
                else:
                    if config.write_box:
                        self.box_txt.write("\n")

                if kps:
                    # cv2.putText(img, "cnt{}".format(cnt), (100, 200), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 255), 5)
                    if config.write_kps:
                        kps_str = ""
                        for k, v in kps.items():
                            kps_str += kpsdict2str(k, v)
                        self.kps_txt.write(kps_str)
                        self.kps_txt.write("\n")

                        kps_score_str = ""
                        for k, v in kps_score.items():
                            kps_score_str += kpsScoredict2str(k, v)
                        self.kps_score_txt.write(kps_score_str)
                        self.kps_score_txt.write("\n")

                else:
                    if config.write_kps:
                        self.kps_txt.write("\n")
                        self.kps_score_txt.write("\n")
                    img = frame
                    # cv2.putText(img, "cnt{}".format(cnt), (100, 200), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 255), 5)

                cv2.imshow("res", cv2.resize(img, show_size))
                cv2.waitKey(1)

                if config.write_video:
                    self.out.write(cv2.resize(frame, store_size))

            else:
                self.cap.release()
                if config.write_video:
                    self.out.release()
                break