Example #1
0
class VideoProcessor:
    def __init__(self, video_path):
        self.cap = cv2.VideoCapture(video_path)
        self.height, self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
            self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio), int(self.height * resize_ratio))
        self.IP = HumanDetection(self.resize_size)
        if opt.out_video_path:
            self.out = cv2.VideoWriter(opt.out_video_path, fourcc, 10, store_size)

    def process_video(self):
        cnt = 0
        while True:
            ret, frame = self.cap.read()
            cnt += 1
            if ret:
                frame = cv2.resize(frame, self.resize_size)
                kps, boxes, kps_score = self.IP.process_img(frame)
                img, img_black = self.IP.visualize()
                cv2.imshow("res", cv2.resize(img, show_size))

                cv2.waitKey(2)
                if opt.out_video_path:
                    self.out.write(cv2.resize(img, store_size))
            else:
                self.cap.release()
                if opt.out_video_path:
                    self.out.release()
                break
Example #2
0
 def __init__(self, video_path):
     self.cap = cv2.VideoCapture(video_path)
     self.height, self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
         self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
     self.resize_size = (int(self.width * resize_ratio), int(self.height * resize_ratio))
     self.IP = HumanDetection(self.resize_size)
     if opt.out_video_path:
         self.out = cv2.VideoWriter(opt.out_video_path, fourcc, 10, store_size)
    def __init__(self, video_path):
        self.cap = cv2.VideoCapture(video_path)
        self.height, self.width = int(self.cap.get(
            cv2.CAP_PROP_FRAME_HEIGHT)), int(
                self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio),
                            int(self.height * resize_ratio))
        self.IP = ImgProcessor(self.resize_size)

        if config.write_video:
            try:
                self.out = cv2.VideoWriter(video_path[:-4] + "_processed.avi",
                                           fourcc, 15,
                                           (self.width, self.height))
                self.res_out = cv2.VideoWriter(
                    video_path[:-4] + "_processed_res.avi", fourcc, 15,
                    (self.width, self.height))
            except:
                self.out = cv2.VideoWriter("output.avi", fourcc, 15,
                                           (self.width, self.height))
                self.res_out = cv2.VideoWriter("output_res.avi", fourcc, 15,
                                               (self.width, self.height))
        if config.write_box:
            box_file = "/".join(video_path.split(
                "/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_box.txt"
            self.box_txt = open(box_file, "w")
        if config.write_kps:
            kps_file = "/".join(video_path.split(
                "/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_kps.txt"
            kps_score_file = "/".join(
                video_path.split("/")[:-1]) + "/" + video_path.split(
                    "/")[-1][:-4] + "_kps_score.txt"
            self.kps_txt = open(kps_file, "w")
            self.kps_score_txt = open(kps_score_file, "w")
Example #4
0
 def __init__(self, path, queueSize=3000):
     self.path = path
     # self.video = UMatFileVideoStream(self.path, 128).start()
     # self.rgb = cv2.UMat(self.height, self.width, cv2.CV_8UC3)
     self.cap = cv2.VideoCapture(path)
     self.stopped = False
     # # initialize the queue used to store frames read from
     # # the video file
     self.Q = Queue(maxsize=queueSize)
     self.fgbg = cv2.createBackgroundSubtractorMOG2(history=500,
                                                    varThreshold=200,
                                                    detectShadows=False)
     self.height, self.width = int(self.cap.get(
         cv2.CAP_PROP_FRAME_HEIGHT)), int(
             self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
     self.resize_size = (int(self.width * resize_ratio),
                         int(self.height * resize_ratio))
     self.IP = HumanDetection(self.resize_size)
Example #5
0
class VideoProcessorThread:
    def __init__(self, path, queueSize=3000):
        self.path = path
        # self.video = UMatFileVideoStream(self.path, 128).start()
        # self.rgb = cv2.UMat(self.height, self.width, cv2.CV_8UC3)
        self.cap = cv2.VideoCapture(path)
        self.stopped = False
        # # initialize the queue used to store frames read from
        # # the video file
        self.Q = Queue(maxsize=queueSize)
        self.fgbg = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       varThreshold=200,
                                                       detectShadows=False)
        self.height, self.width = int(self.cap.get(
            cv2.CAP_PROP_FRAME_HEIGHT)), int(
                self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio),
                            int(self.height * resize_ratio))
        self.IP = HumanDetection(self.resize_size)

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def update(self):
        # keep looping infinitely
        self.IP.init()
        # IP.object_tracker.init_tracker()
        cnt = 0
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                return
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.cap.read()
                start = time.time()
                if grabbed:
                    frame = cv2.resize(frame, self.resize_size)
                    frame2 = copy.deepcopy(frame)
                    kps, boxes, kps_score = self.IP.process_img(frame,
                                                                gray=gray)
                    img, img_black = self.IP.visualize()
                    if classify_type == 1:
                        result = self.IP.classify_whole(img_black, img)
                    elif classify_type == 2:
                        result = self.IP.classify_whole(frame2, img)
                    elif classify_type == 3:
                        result = self.IP.classify(img_black, img, boxes)
                    elif classify_type == 4:
                        result = self.IP.classify(frame2, img, boxes)
                    else:
                        raise ValueError("Not a right classification type!")

                    cv2.imshow("res", cv2.resize(img, show_size))
                    cv2.waitKey(2)
                    all_time = time.time() - start
                    print("time is:", all_time)
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
                # add the frame to the queue
                self.Q.put(frame)
            else:
                self.Q.queue.clear()
Example #6
0
from src.human_detection import HumanDetection
import cv2
import os
from config.config import img_folder, gray
import numpy as np

IP = HumanDetection()

if __name__ == '__main__':
    src_folder = img_folder
    dest_folder = src_folder + "_cut"
    os.makedirs(dest_folder, exist_ok=True)
    cnt = 0
    for img_name in os.listdir(src_folder):
        cnt += 1
        print("Processing pic {}".format(cnt))
        frame = cv2.imread(os.path.join(src_folder, img_name))
        kps, boxes, _ = IP.process_img(frame, gray=gray)
        img_rgb, img_black = IP.visualize()
        # cv2.imwrite(os.path.join(dest_folder, img_name), img)
        frame = img_black
        if boxes is not None:
            for idx, box in enumerate(boxes):
                x1, y1, x2, y2 = int(box[0]), int(box[1]), int(box[2]), int(
                    box[3])
                x1 = 0 if x1 < 0 else x1
                y1 = 0 if y1 < 0 else y1
                x2 = frame.shape[1] if x2 > frame.shape[1] else x2
                y2 = frame.shape[0] if y2 > frame.shape[0] else y2
                img = np.asarray(frame[y1:y2, x1:x2])
                cv2.imshow("img", img)
import cv2
from src.human_detection import HumanDetection
import os
from utils.utils import *
from config import config

cls = config.label_cls
frame_length = config.label_frame
comment = config.label_comment
videos = config.label_main_folder
labels = config.label_folder_name

IP = HumanDetection()
store_size = config.size
write = config.write_label_info


class LabelVideo:
    def __init__(self, video_path, label_path):
        self.label_path = label_path.replace("\\", "/")
        self.video_path = video_path.replace("\\", "/")
        self.idbox_cnt = defaultdict(int)
        self.label = defaultdict(list)
        self.cls = {str(idx): label for idx, label in enumerate(cls)}
        self.cls["p"] = "pass"
        self.cls_str = ""
        for k, v in self.cls.items():
            self.cls_str += "{}-->{}, ".format(k,v)
        self.id_record = defaultdict(bool)
        if write:
Example #8
0
from src.human_detection import HumanDetection
import cv2
from config.config import video_process_class, size, save_frame, save_black_img, save_kps_img, save_kps_video, process_gray
import os
from utils.kp_process import KPSProcessor

IP = HumanDetection()
store_size = size
dest_folder = "2_kps_video"


class VideoProcessor:
    def __init__(self, video_path, draw_video_path, output_txt_path):
        self.cap = cv2.VideoCapture(video_path)
        self.coord = []
        # self.draw_img = draw_video_path
        self.out = cv2.VideoWriter(draw_video_path, cv2.VideoWriter_fourcc(*'XVID'), 10, store_size)
        self.file = open(output_txt_path, "w")
        self.KPSP = KPSProcessor(int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)))

    def __write_txt(self):
        for item in self.coord:
            self.file.write(str(item)+"\t")
        self.file.write("\n")

    def process_video(self):
        cnt = 0
        while True:
            cnt += 1
            # print("Current frame is {}".format(cnt))
            ret, frame = self.cap.read()
class VideoProcessor:
    def __init__(self, video_path):
        self.cap = cv2.VideoCapture(video_path)
        self.height, self.width = int(self.cap.get(
            cv2.CAP_PROP_FRAME_HEIGHT)), int(
                self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio),
                            int(self.height * resize_ratio))
        self.IP = ImgProcessor(self.resize_size)

        if config.write_video:
            try:
                self.out = cv2.VideoWriter(video_path[:-4] + "_processed.avi",
                                           fourcc, 15,
                                           (self.width, self.height))
                self.res_out = cv2.VideoWriter(
                    video_path[:-4] + "_processed_res.avi", fourcc, 15,
                    (self.width, self.height))
            except:
                self.out = cv2.VideoWriter("output.avi", fourcc, 15,
                                           (self.width, self.height))
                self.res_out = cv2.VideoWriter("output_res.avi", fourcc, 15,
                                               (self.width, self.height))
        if config.write_box:
            box_file = "/".join(video_path.split(
                "/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_box.txt"
            self.box_txt = open(box_file, "w")
        if config.write_kps:
            kps_file = "/".join(video_path.split(
                "/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_kps.txt"
            kps_score_file = "/".join(
                video_path.split("/")[:-1]) + "/" + video_path.split(
                    "/")[-1][:-4] + "_kps_score.txt"
            self.kps_txt = open(kps_file, "w")
            self.kps_score_txt = open(kps_score_file, "w")

    def process_video(self):
        self.IP.init()
        cnt = 0
        while True:
            ret, frame = self.cap.read()
            cnt += 1
            if ret:
                frame = cv2.resize(frame, self.resize_size)
                frame2 = copy.deepcopy(frame)
                kps, boxes, kps_score = self.IP.process_img(frame, gray=gray)
                img, img_black = self.IP.visualize()
                if classify_type == 1:
                    result = self.IP.classify_whole(img_black, img)
                elif classify_type == 2:
                    result = self.IP.classify_whole(frame2, img)
                elif classify_type == 3:
                    result = self.IP.classify(img_black, img, boxes)
                elif classify_type == 4:
                    result = self.IP.classify(frame2, img, boxes)
                else:
                    raise ValueError("Not a right classification type!")

                if boxes is not None:
                    if config.write_box:
                        box_str = ""
                        for k, v in boxes.items():
                            box_str += boxdict2str(k, v)
                        self.box_txt.write(box_str)
                        self.box_txt.write("\n")
                else:
                    if config.write_box:
                        self.box_txt.write("\n")

                if kps:
                    # cv2.putText(img, "cnt{}".format(cnt), (100, 200), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 255), 5)
                    if config.write_kps:
                        kps_str = ""
                        for k, v in kps.items():
                            kps_str += kpsdict2str(k, v)
                        self.kps_txt.write(kps_str)
                        self.kps_txt.write("\n")

                        kps_score_str = ""
                        for k, v in kps_score.items():
                            kps_score_str += kpsScoredict2str(k, v)
                        self.kps_score_txt.write(kps_score_str)
                        self.kps_score_txt.write("\n")

                else:
                    if config.write_kps:
                        self.kps_txt.write("\n")
                        self.kps_score_txt.write("\n")
                    img = frame
                    # cv2.putText(img, "cnt{}".format(cnt), (100, 200), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 255), 5)

                cv2.imshow("res", cv2.resize(img, show_size))
                cv2.waitKey(1)

                if config.write_video:
                    self.out.write(cv2.resize(frame, store_size))

            else:
                self.cap.release()
                if config.write_video:
                    self.out.release()
                break
Example #10
0
class VideoProcessor:
    def __init__(self, video_path):
        self.cap = cv2.VideoCapture(video_path)
        self.height, self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), \
                                  int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.resize_size = (int(self.width * resize_ratio), int(self.height * resize_ratio))
        self.IP = ImgProcessor(self.resize_size)

        if write_video:
            try:
                self.out = cv2.VideoWriter(video_path[:-4] + "_processed.avi", fourcc, 15, (self.width, self.height))
                self.res_out = cv2.VideoWriter(video_path[:-4] + "_processed_res.avi", fourcc, 15, (self.width, self.height))
            except:
                self.out = cv2.VideoWriter("output.avi", fourcc, 15, (self.width, self.height))
                self.res_out = cv2.VideoWriter("output_res.avi", fourcc, 15, (self.width, self.height))
        if write_box:
            box_file = "/".join(video_path.split("/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_box.txt"
            self.box_txt = open(box_file, "w")
        if write_kps:
            kps_file = "/".join(video_path.split("/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_kps.txt"
            kps_score_file = "/".join(video_path.split("/")[:-1]) + "/" + video_path.split("/")[-1][:-4] + "_kps_score.txt"
            self.kps_txt = open(kps_file, "w")
            self.kps_score_txt = open(kps_score_file, "w")

    def process_video(self):
        cnt = 0
        count = 0
        up_flag = 0
        count_squat = 0
        count_up = 0
        while True:
            ret, frame = self.cap.read()
            frame_save = copy.deepcopy(frame)
            cnt += 1
            if ret:
                kps, boxes, kps_score = self.IP.process_img(frame, gray=gray)
                img, img_black = self.IP.visualize()
                preds = self.IP.classify(frame, img_black, boxes)

                for location, preds in preds.items():
                    cv2.putText(img, preds[0], location, cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 100, 255), 3)

                if boxes is not None:
                    if write_box:
                        box_str = ""
                        for k, v in boxes.items():
                            box_str += boxdict2str(k, v)
                        self.box_txt.write(box_str)
                        self.box_txt.write("\n")
                else:
                    if write_box:
                        self.box_txt.write("\n")

                if kps:
                    # cv2.putText(img, "cnt{}".format(cnt), (100, 200), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 255), 5)
                    if write_kps:
                        kps_str = ""
                        for k, v in kps.items():
                            kps_str += kpsdict2str(k, v)
                        self.kps_txt.write(kps_str)
                        self.kps_txt.write("\n")

                        kps_score_str = ""
                        for k, v in kps_score.items():
                            kps_score_str += kpsScoredict2str(k, v)
                        self.kps_score_txt.write(kps_score_str)
                        self.kps_score_txt.write("\n")

                else:
                    if write_kps:
                        self.kps_txt.write("\n")
                        self.kps_score_txt.write("\n")
                    img = frame
                    # cv2.putText(img, "cnt{}".format(cnt), (100, 200), cv2.FONT_HERSHEY_PLAIN, 5, (0, 255, 255), 5)
                try:
                    key_point = kps[1]
                    if len(img) > 0 and len(key_point) > 0:
                        coord = [key_point[idx] for idx in nece_point]
                        angle = get_angle(coord[1], coord[0], coord[2])
                        if angle > 60:
                            count_squat = 0 if count_squat == 0 else count_squat - 1
                            if count_up < 5:
                                count_up += 1
                            else:
                                count_up = 0
                                up_flag = 1
                        else:
                            if up_flag == 1:
                                count_up = 0 if count_up == 0 else count_up - 1
                                if count_squat > 4:
                                    count += 1
                                    up_flag = 0
                                    count_squat = 0
                                else:
                                    count_squat += 1
                            else:
                                pass
                except:
                    pass

                cv2.putText(img, "Count: {}".format(count), (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 5)
                cv2.imshow("res", cv2.resize(img, show_size))
                cv2.imshow("res_black", cv2.resize(img_black, show_size))
                cv2.waitKey(1)
                if write_video:
                    self.out.write(frame_save)
                    try:
                        self.res_out.write(img)
                    except:
                        pass
            else:
                self.cap.release()
                if write_video:
                    self.out.release()
                break

    def locate(self, kps):
        return kps