Пример #1
0
    def init(self):
        # Load Communicator
        if self.server_address != "":
            try:
                self.communicator = Communicator()
                print(self.cam_address, self.cam_address)
                self.communicator.load_client(self.server_address, self.cam_address, self.final_result_pool)
                self.communicator_thread = threading.Thread(target=self.communicator.run)
            except:
                return False, PrintLog.e("Socket communicator failed to load.")

        # Load Object Detector
        try:
            self.object_detector = YOLOv4(self.frame_info_pool, self.object_detection_result_pool, self.final_result_pool,model=self.object_detection_model,
                                          dataset=self.object_detection_dataset, score_threshold=self.score_threshold, nms_threshold=self.nms_threshold)
        except:
            return False, PrintLog.e("Object detector failed to load.")

        # Load Event Detector
        try:
            self.event_manager = EventManager(self.object_detection_result_pool, self.final_result_pool)
            self.event_manager_thread = threading.Thread(target=self.event_manager.run)
        except:
            return False, PrintLog.e("Object detector failed to load.")

        # Load Decoder Manager
        try:
            self.decoder_manager = DecoderManager(self.analysis_fps, self.cam_address, self.frame_info_pool)
            self.decoder_thread = threading.Thread(target=self.decoder_manager.run)
        except:
            return False, PrintLog.e("Decoder manager failed to load.")

        PrintLog.i("Edge analysis module is successfully loaded.")
        return True
def extract_event_results(event_model_names, event_dir, video_name,
                          event_detectors, event_results):
    event_csv_file_path = os.path.join(event_dir,
                                       video_name.split(".mp4")[0] + ".csv")
    with open(event_csv_file_path, "w") as event_file:
        csv_writer = csv.writer(event_file)
        if type(event_model_names) == list:
            event_names = event_model_names
        elif event_model_names == "all":
            event_names = [
                "assault", "falldown", "kidnapping", "obstacle", "tailing",
                "wanderer"
            ]
        else:
            event_names = [event_model_names]
        name = [""]
        for i, event_detector in enumerate(event_detectors):
            name.append(event_detector.model_name)

        csv_writer.writerow(name)

        for event_result in event_results:
            row = ["{:>10}".format(event_result["frame_number"])]
            for i, event_name in enumerate(event_names):
                if event_result["event_result"][event_name]:
                    row.append(i + 1)
                else:
                    row.append("")
            csv_writer.writerow(row)
    PrintLog.i("Event result file is successfully extracted.(path: {})".format(
        event_csv_file_path))
Пример #3
0
    def __init__(self, frame_info_pool=None, detection_result_pool=None, final_result_pool=None, model='yolov4-416', dataset='obstacle', score_threshold=0.5, nms_threshold=0.3):
        """
        :param model: model name
        :param category_num:
        """
        self.results = dict()
        self.model_name = model
        self.dataset = dataset
        self.frame_info_pool = frame_info_pool
        self.detection_result_pool = detection_result_pool
        self.final_result_pool = final_result_pool
        self.score_threshold = score_threshold
        self.nms_threshold = nms_threshold
        if dataset == "coco":
            category_num = 80
        else :
            category_num = 15
        self.cls_dict = get_cls_dict(category_num)

        yolo_dim = model.split('-')[-1]
        if 'x' in yolo_dim:
            dim_split = yolo_dim.split('x')
            if len(dim_split) != 2:
                raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)
            w, h = int(dim_split[0]), int(dim_split[1])
        else:
            h = w = int(yolo_dim)
        if h % 32 != 0 or w % 32 != 0:
            raise SystemExit('ERROR: bad yolo_dim (%s)!' % yolo_dim)

        self.model = TrtYOLO(model, (h, w), category_num)

        PrintLog.i("Object detection model is loaded - {}\t{}".format(model, dataset))
 def __init__(self, analysis_fps, cam_address, frame_info_pool):
     self.cam_address = self.address_spliter(cam_address)
     self.frame_info_pool = frame_info_pool
     self.analysis_fps = analysis_fps
     self.decoder = None
     PrintLog.i("Decoder loading is started")
     if len(self.cam_address) == 1:
         self.decoder = CvSingleDecoder(self.cam_address[0], analysis_fps)
     else :
         self.decoder = CvMultipleDecoder(self.cam_address, analysis_fps)
     PrintLog.i("Decoder is loaded - {}".format(cam_address))
def run_ffmpeg(video_path, extract_fps, frame_dir):
    try:
        command = "ffmpeg -y -hide_banner -loglevel panic -i {} -vsync 2 -q:v 0 -vf fps={} {}/%04d.jpg".format(
            video_path, extract_fps, frame_dir)
        os.system(command)
        frame_path_list = sorted(os.listdir(frame_dir))
        PrintLog.i(
            "Frame extraction is successfully completed(path: {}, framecount: {})"
            .format(frame_dir, len(frame_path_list)))
        return frame_path_list
    except:
        PrintLog.e("Frame extraction is failed")
        exit(0)
Пример #6
0
 def run(self):
     PrintLog.i("Frame Number\tTimestamp")
     while True:
         if len(self.frame_info_pool) > 0:
             frame_info = self.frame_info_pool.pop(0)
             result = self.inference_by_image(frame_info["frame"])
             detection_result = dict()
             detection_result["cam_address"] = frame_info["cam_address"]
             detection_result["timestamp"] = frame_info["timestamp"]
             detection_result["frame_number"] = frame_info["frame_number"]
             detection_result["results"] = []
             detection_result["results"].append(
                 {"detection_result": result})
             self.detection_result_pool.append(detection_result)
def load_video(video_path, extract_fps):
    capture = cv2.VideoCapture(video_path)
    frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(round(capture.get(cv2.CAP_PROP_FPS)))

    PrintLog.i("Extract information\n"
               "\tvideo path: {}\n"
               "\tvideo fps: {}\n"
               "\tvideo framecount: {}\n"
               "\textract fps: {}\n"
               "\textract frame number: {}".format(
                   video_path, fps, frame_count, extract_fps,
                   int(frame_count / (fps / extract_fps))))
    return capture, frame_count, fps
Пример #8
0
    def __init__(self, object_detection_result_pool, final_result_pool):
        self.event_detectors = []
        self.final_result_pool = final_result_pool
        self.object_detection_result_pool = object_detection_result_pool
        assault_detection_model = AssaultEvent()
        PrintLog.i("{} model is loaded".format(
            assault_detection_model.model_name))

        wander_detection_model = WandererEvent()
        PrintLog.i("{} model is loaded".format(
            wander_detection_model.model_name))

        falldown_detection_model = FalldownEvent()
        PrintLog.i("{} model is loaded".format(
            falldown_detection_model.model_name))

        obstacle_model = ObstacleEvent()
        PrintLog.i("{} model is loaded".format(obstacle_model.model_name))

        kidnapping_model = KidnappingEvent()
        PrintLog.i("{} model is loaded".format(kidnapping_model.model_name))

        tailing_model = TailingEvent()
        PrintLog.i("{} model is loaded".format(tailing_model.model_name))

        reid_model = ReidEvent()
        PrintLog.i("{} model is loaded".format(reid_model.model_name))

        self.event_detectors.append(assault_detection_model)
        self.event_detectors.append(wander_detection_model)
        self.event_detectors.append(falldown_detection_model)
        self.event_detectors.append(obstacle_model)
        self.event_detectors.append(kidnapping_model)
        self.event_detectors.append(tailing_model)
        self.event_detectors.append(reid_model)
def load_models(od_model_name="yolov4-416",
                score_threshold=0.5,
                nms_threshold=0.3,
                event_model_names="all"):
    if od_model_name == "yolov4-416":
        od_model = YOLOv4(model=od_model_name,
                          score_threshold=score_threshold,
                          nms_threshold=nms_threshold)
    else:
        od_model = YOLOv4(model=od_model_name,
                          score_threshold=score_threshold,
                          nms_threshold=nms_threshold)

    PrintLog.i("YOLO detector information\n"
               "\tmodel name: {}\n"
               "\tscore threshold: {}\n"
               "\tnms threshold: {}".format(od_model_name, score_threshold,
                                            nms_threshold))

    event_detectors = []
    PrintLog.i("{} model is loaded".format("Object detection(yolov4)"))

    splited_model_names = split_model_names(event_model_names)

    if event_model_names == "all" or "assault" in splited_model_names:
        assault_detection_model = AssaultEvent()
        event_detectors.append(assault_detection_model)
        PrintLog.i("{} model is loaded".format(
            assault_detection_model.model_name))

    if event_model_names == "all" or "falldown" in splited_model_names:
        falldown_detection_model = FalldownEvent()
        event_detectors.append(falldown_detection_model)
        PrintLog.i("{} model is loaded".format(
            falldown_detection_model.model_name))

    if event_model_names == "all" or "obstacle" in splited_model_names:
        obstacle_detection_model = ObstacleEvent()
        event_detectors.append(obstacle_detection_model)
        PrintLog.i("{} model is loaded".format(
            obstacle_detection_model.model_name))

    if event_model_names == "all" or "kidnapping" in splited_model_names:
        kidnapping_detection_model = KidnappingEvent()
        event_detectors.append(kidnapping_detection_model)
        PrintLog.i("{} model is loaded".format(
            kidnapping_detection_model.model_name))

    if event_model_names == "all" or "tailing" in splited_model_names:
        tailing_detection_model = TailingEvent()
        event_detectors.append(tailing_detection_model)
        PrintLog.i("{} model is loaded".format(
            tailing_detection_model.model_name))

    if event_model_names == "all" or "wanderer" in splited_model_names:
        wanderer_detection_model = WandererEvent()
        event_detectors.append(wanderer_detection_model)
        PrintLog.i("{} model is loaded".format(
            wanderer_detection_model.model_name))

    return od_model, event_detectors
Пример #10
0
def run_detection(od_model, event_detectors, frame_dir, frame_path_list,
                  fram_bbox_dir, json_dir, bbox_video_path):
    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    video_writer = cv2.VideoWriter(bbox_video_path, fourcc, 20, (640, 360))

    frame_number = 0
    event_results = []
    cls_dict = get_cls_dict(15)
    bbox_visualization = BBoxVisualization(cls_dict)
    for i, frame_name in enumerate(frame_path_list):
        frame_number += 1
        frame = cv2.imread(os.path.join(frame_dir, frame_name))

        results = od_model.inference_by_image(frame)

        frame_bbox = bbox_visualization.draw_bboxes(frame, results)
        cv2.imwrite(os.path.join(fram_bbox_dir, frame_name), frame_bbox)
        video_writer.write(frame_bbox)

        dict_result = dict()
        dict_result["image_path"] = os.path.join(frame_dir, frame_name)
        dict_result["cam_address"] = video_path
        dict_result["module"] = od_model_name
        dict_result["frame_number"] = int(frame_number / extract_fps * fps)
        dict_result["timestamp"] = str(
            convert_framenumber2timestamp(frame_number / extract_fps * fps,
                                          fps))
        dict_result["results"] = []
        dict_result["results"].append({"detection_result": results})

        event_result = dict()
        event_result["cam_address"] = video_path
        event_result["frame_number"] = int(frame_number / extract_fps * fps)
        event_result["timestamp"] = str(
            convert_framenumber2timestamp(frame_number / extract_fps * fps,
                                          fps))
        event_result["event_result"] = dict()

        for event_detector in event_detectors:
            event_result["event_result"][
                event_detector.model_name] = event_detector.inference(
                    frame, dict_result)
        event_results.append(event_result)
        print(
            "\rframe number: {:>6}/{}\t/ extract frame number: {:>6}\t/ timestamp: {:>6}"
            .format(
                frame_number, len(frame_path_list),
                int(frame_number / extract_fps * fps),
                str(
                    convert_framenumber2timestamp(
                        frame_number / extract_fps * fps, fps))),
            end='')

        json_result_file = open(
            os.path.join(json_dir,
                         frame_name.split(".jpg")[0] + ".json"), "w")
        json.dump(dict_result, json_result_file, indent=4)
        json_result_file.close()
    video_writer.release()
    print()
    PrintLog.i("Extraction is successfully completed(framecount: {})".format(
        frame_number))
    if os.path.exists(bbox_video_path):
        PrintLog.i("BBox video is successfully generated(path: {})".format(
            bbox_video_path))
    else:
        PrintLog.i("BBox video is failed to generated.")
    return event_results
Пример #11
0
    video_path = option.video_path
    video_name = video_path.split("/")[-1]
    extract_fps = option.fps
    score_threshold = option.score_threshold
    nms_threshold = option.nms_threshold
    od_model_name = option.od_model_name
    event_model_names = option.event_model
    result_dir = option.result_dir
    bbox_video_path = os.path.join(result_dir,
                                   video_name.split(".mp4")[0] + "_bbox.avi")
    PrintLog.i("Argument Info:\n"
               "\tinput video path: {}\n"
               "\textract fps: {}\n"
               "\tscore threshold: {}\n"
               "\tnms threshold: {}\n"
               "\tobject detection model name: {}\n"
               "\tevent model names: {}\n"
               "\tresult directory path: {}\n"
               "\tbbox video path: {}".format(video_path, extract_fps,
                                              score_threshold, nms_threshold,
                                              od_model_name, event_model_names,
                                              result_dir, bbox_video_path))

    # Load Video
    capture, frame_count, fps = load_video(video_path, extract_fps)

    # Load Object Detection & Event Detection models
    od_model, event_detectors = load_models(
        od_model_name,
        score_threshold=score_threshold,
        nms_threshold=nms_threshold,
        event_model_names=event_model_names)