Пример #1
0
 def load_model(self):
     self.model = self.get_current_model()
     log("Loading ONNX - {}".format(self.model))
     self.dimensions = self.get_dimensions(self.model)
     ort.set_default_logger_severity(4)
     self.detector = ort.InferenceSession(self.model)
     self.input_name = self.detector.get_inputs()[0].name
Пример #2
0
 def add_filter(self, name):
     class_ = self.filter_manager.get_class(name)
     if class_ is None:
         log("No class {}".format(name))
         return False
     log(name)
     self.filters.append(class_())
Пример #3
0
    def main(self):
        self.run()
        return

        input_help = "`, G, g, i, o, h, Q"
        log("Input thread started")
        input_lock = False
        while True:
            c = click.getchar()
            if c == '`':
                input_lock = not input_lock
                log("Input Lock? {}".format(input_lock))
            else:
                if input_lock:
                    pass
                elif c == 'G':
                    self.app.cascade_detector.next_classifier('frontalface')
                elif c == 'g':
                    self.app.cascade_detector.next_classifier('profileface')
                elif c == 'i':
                    self.app.input.add_interval(-1)
                elif c == 'o':
                    self.app.input.add_interval(1)
                elif c == 'h':
                    log(input_help)
                elif c == 'Q':
                    log("Exit")
                    exit(0)
                else:
                    log(c)
Пример #4
0
def get_available_cameras(api):
    idx = 0
    cameras = []

    consecutive = 0
    while consecutive < LIMIT_CONSECUTIVE:
        camera = cv2.VideoCapture(idx, apiPreference=api)
        if not camera.isOpened():
            consecutive += 1
            log("{} - fail".format(idx))
        else:
            read, img = camera.read()
            if read:
                consecutive = 0
                if log_verbose():
                    camera_width = camera.get(cv2.CAP_PROP_FRAME_WIDTH)
                    camera_height = camera.get(cv2.CAP_PROP_FRAME_HEIGHT)
                    camera_fps = camera.get(cv2.CAP_PROP_FPS)
                    log("[{}] {} x {} @ {}fps".format(idx, camera_width,
                                                      camera_height,
                                                      camera_fps))
                cameras.append(idx)
            camera.release()
        idx += 1
    return cameras
Пример #5
0
 def add(self, filter_class, name=None):
     if name is None:
         log(filter_class.__name__)
         self.classes[filter_class.__name__] = filter_class
     else:
         log(name)
         self.classes[name] = filter_class
     self.list.append(filter_class())
    def process(self, frame, frame_idx):
        detect_width = int(frame.shape[1]) * self.scale // 100
        detect_height = int(frame.shape[0]) * self.scale // 100
        dim = (detect_width, detect_height)
        frame_small = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
        self.time_measurements.mark("resize")
        gray = cv2.cvtColor(frame_small, cv2.COLOR_BGR2GRAY)
        name = 'frontalface'
        scaled_detections = self.classifiers[name].detectMultiScale(gray, 1.1, 4)
        detections = []
        mirrored = False
        if len(scaled_detections) == 0:
            name = 'profileface'
            scaled_detections = self.classifiers[name].detectMultiScale(gray, 1.1, 4)
            # try second profile
            if len(scaled_detections) == 0:
                scaled_detections = self.classifiers[name].detectMultiScale(cv2.flip(gray, 1), 1.1, 4)
                mirrored = True
                if len(scaled_detections):
                    log("profileface - mirrored")
            else:
                log("profileface")
        else:
            log("frontalface")
        self.time_measurements.mark("detect")

        if len(scaled_detections):
            if mirrored:
                for (x, y, w, h) in scaled_detections:
                    detections.append((frame.shape[1] - (x * 100 // self.scale),
                                      y * 100 // self.scale,
                                      0 - (w * 100 // self.scale),
                                      h * 100 // self.scale,
                                      name, self.colors[name]))
            else:
                for (x, y, w, h) in scaled_detections:
                    detections.append((
                        x * 100 // self.scale,
                        y * 100 // self.scale,
                        w * 100 // self.scale,
                        h * 100 // self.scale,
                        name, self.colors[name]))

        if len(detections):
            self.detected(frame_idx)
            self.bounding_boxes = detections
            if log_verbose():
                for (x, y, w, h, name, color) in self.bounding_boxes:
                    log("[{}] {} {}x{} {}x{} @ {}x{}".format(
                        frame_idx, name, x, y, (x + w), (y + h), frame.shape[1], frame.shape[0]))
Пример #7
0
 def get_class(self, name):
     log(name)
     if type(name) is type:
         return self.classes.get(type(name).__name__)
     else:
         return self.classes.get(name)
Пример #8
0
 def put(self, item):
     try:
         self.input.put(item, block=False)
     except queue.Full:
         log("queue full")
Пример #9
0
 def detected(self, idx):
     log("idx: {}".format(idx))
     if self.frame_state is not None:
         log("updating")
         self.frame_state.set_detect_idx(idx)
Пример #10
0
 def get_dimensions(path):
     log(path)
     width = int(
         re.match("^.*[-_](?P<width>[0-9]+)\\.onnx$", path).group('width'))
     return width, (width // 4) * 3
Пример #11
0
    def run(self):
        if self.args.verbose:
            set_log_level(LogLevel.VERBOSE)

        log_file(self.args.log)

        camera_api = cv2.CAP_MSMF if self.args.capture_msmf else cv2.CAP_DSHOW

        if self.args.list:
            cameras = get_available_cameras(camera_api)
            msg("Available cameras")
            msg(cameras)
            return 0

        log(self.args)

        self.frame_state = FrameState()

        if self.args.yolo and os.path.isdir(self.args.yolo):
            self.yolo_detector = YoloV3Detector()
            self.yolo_detector.setup(self.args.yolo)
            self.detectors.append(self.yolo_detector)

        if self.args.haar_cascades and os.path.isdir(self.args.haar_cascades):
            self.cascade_detector = CascadeClassifierDetector()
            self.cascade_detector.add_key('frontalface', (0, 0, 255))
            self.cascade_detector.add_key('profileface', (0, 0, 255))
            # self.cascade_detector.add_key('smile', (255, 0, 0))
            # self.cascade_detector.add_key('cat', (0, 255, 0))
            self.cascade_detector.setup(self.args.haar_cascades)
            self.detectors.append(self.cascade_detector)

        if self.args.onnx and os.path.isdir(self.args.onnx):
            self.onnx_detector = UltrafaceOnnxDetector()
            self.onnx_detector.setup(self.args.onnx)
            self.detectors.append(self.onnx_detector)

        self.filter_manager = FilterManager()
        add_default_filters(self.filter_manager)

        self.input_thread = TuiThread()

        self.threads.append(self.input_thread.create_thread())

        for detector in self.detectors:
            detector.set_frame_state(self.frame_state)
            self.threads.append(detector.create_thread())

        open_player = self.args.open_player

        if self.args.file:
            self.input = FileInput(frame_state=self.frame_state,
                                   filter_manager=self.filter_manager,
                                   file_path=self.args.file,
                                   loop=True,
                                   open_player=open_player)
        else:
            self.input = CameraInput(frame_state=self.frame_state,
                                     filter_manager=self.filter_manager,
                                     capture_idx=self.args.capture,
                                     capture_api=camera_api,
                                     open_player=open_player)
        self.input.detectors = self.detectors

        self.threads.append(self.input.create_thread(name="FrameProcessing"))

        # TODO: enable ui thread once there is any ui
        # self.ui_thread = UiThread()
        # self.threads.append(self.ui_thread.create_thread())

        for thread in self.threads:
            log("Starting thread: \"{}\"".format(thread.getName()))
            thread.start()

        # simple watchdog
        while True:
            time.sleep(5)
            any_dead = False
            for thread in self.threads:
                if not thread.is_alive():
                    log("Thread(\"{}\") - died".format(thread.getName()))
                    any_dead = True
            if any_dead:
                # respawn or die
                return -1
 def create_classifier(self, key):
     if len(self.classifier_files[key][1]):
         log("loading classifier {} from {}".format(key, self.classifier_files[key][1][self.get_idx(key)]))
         self.classifiers[key] = cv2.CascadeClassifier(self.classifier_files[key][1][self.get_idx(key)])
     else:
         self.classifiers[key] = None
Пример #13
0
    def process(self, frame, frame_idx):
        log("Loading YOLO")
        net = cv2.dnn.readNetFromDarknet(
            os.path.join(self.path, "yolov3.cfg"),
            os.path.join(self.path, "yolov3.weights"))
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)

        ln = net.getLayerNames()
        log("unconnected layers")
        for i in net.getUnconnectedOutLayers():
            log(i)
            log(ln[i[0] - 1])
        ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

        LABELS = open(os.path.join(self.path,
                                   'coco.names')).read().strip().split("\n")
        COLORS = np.random.randint(0,
                                   255,
                                   size=(len(LABELS), 3),
                                   dtype="uint8")

        (H, W) = frame.shape[:2]

        blob = cv2.dnn.blobFromImage(frame,
                                     1 / 255.0, (416, 416),
                                     swapRB=True,
                                     crop=False)
        net.setInput(blob)
        start = time.time()
        layer_outputs = net.forward(ln)
        end = time.time()
        log("YOLO processing took {:.6f} seconds".format(end - start))
        boxes = []
        confidences = []
        class_ids = []
        detections = []
        for output in layer_outputs:
            for detection in output:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]

                if confidence > self.confidence:
                    box = detection[0:4] * np.array([W, H, W, H])
                    (centerX, centerY, width, height) = box.astype("int")

                    x = int(centerX - (width / 2))
                    y = int(centerY - (height / 2))
                    boxes.append([x, y, int(width), int(height)])
                    confidences.append(float(confidence))
                    class_ids.append(class_id)

        idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence,
                                self.threshold)
        if len(idxs) > 0:
            for i in idxs.flatten():
                color = [int(c) for c in COLORS[i]]
                detections.append(
                    (boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3],
                     "{}: {:.4f}".format(LABELS[i], confidences[i]), color))

        if len(detections):
            self.detected(frame_idx)
            self.bounding_boxes = detections