def detect(self, frame, **kwargs): # convert roi-plugin to detector roi roi = Roi(*self.g_pool.roi.bounds) debug_img = frame.bgr if self.g_pool.display_mode == "algorithm" else None result = self.detector_2d.detect( gray_img=frame.gray, color_img=debug_img, roi=roi, ) norm_pos = normalize(result["location"], (frame.width, frame.height), flip_y=True) # Create basic pupil datum datum = self.create_pupil_datum( norm_pos=norm_pos, diameter=result["diameter"], confidence=result["confidence"], timestamp=frame.timestamp, ) # Fill out 2D model data datum["ellipse"] = {} datum["ellipse"]["axes"] = result["ellipse"]["axes"] datum["ellipse"]["angle"] = result["ellipse"]["angle"] datum["ellipse"]["center"] = result["ellipse"]["center"] return datum
def detect(self, frame): roi = Roi(*self.g_pool.u_r.get()[:4]) if (not 0 <= roi.x_min <= roi.x_max < frame.width or not 0 <= roi.y_min <= roi.y_max < frame.height): # TODO: Invalid ROIs can occur when switching camera resolutions, because we # adjust the roi only after all plugin recent_events() have been called. # Optimally we make a plugin out of the ROI and call its recent_events() # immediately after the backend, before the detection. logger.debug( f"Invalid Roi {roi} for img {frame.width}x{frame.height}!") return None debug_img = frame.bgr if self.g_pool.display_mode == "algorithm" else None result = self.detector_2d.detect( gray_img=frame.gray, color_img=debug_img, roi=roi, ) eye_id = self.g_pool.eye_id location = result["location"] result["norm_pos"] = normalize(location, (frame.width, frame.height), flip_y=True) result["timestamp"] = frame.timestamp result["topic"] = f"pupil.{eye_id}" result["id"] = eye_id result["method"] = "2d c++" return result
def process(self, img): if img is None: return height, width = img.shape[0], img.shape[1] gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) timestamp = uvc.get_time_monotonic() roi = None if self.bbox is not None: xmin, ymin, w, h = self.bbox roi = Roi(xmin, ymin, xmin+w, ymin+h) result = self.detector.detect(gray, timestamp, roi=roi) #print(result) if result["model_confidence"] > 0.25: sphere = result["projected_sphere"] self.__draw_ellipse(sphere, img, (255,120,120), 1) if result["confidence"] > 0.5: n = np.array(result['circle_3d']['normal']) self.bbox = self.__get_bbox(result, img) self.__draw_tracking_info(result, img) # cv2.imshow("testando", img) # cv2.waitKey(1) self.pos = np.array([n[0], n[1], n[2], time.monotonic()]) self.countdown = 5 else: self.countdown -= 1 if self.countdown <= 0: self.pos = None self.bbox = None return img
def detect(self, frame): roi = Roi(*self.g_pool.u_r.get()[:4]) result = self.detector_2d.detect(gray_img=frame.gray, color_img=frame.bgr, roi=roi) eye_id = self.g_pool.eye_id location = result["location"] result["norm_pos"] = normalize(location, (frame.width, frame.height), flip_y=True) result["timestamp"] = frame.timestamp result["topic"] = f"pupil.{eye_id}" result["id"] = eye_id result["method"] = "2d c++" return result
def detect(self, frame, **kwargs): # convert roi-plugin to detector roi roi = Roi(*self.g_pool.roi.bounds) debug_img = frame.bgr if self.g_pool.display_mode == "algorithm" else None result = self.detector_2d.detect( gray_img=frame.gray, color_img=debug_img, roi=roi, ) eye_id = self.g_pool.eye_id location = result["location"] result["norm_pos"] = normalize(location, (frame.width, frame.height), flip_y=True) result["timestamp"] = frame.timestamp result["topic"] = f"pupil.{eye_id}.{self.identifier}" result["id"] = eye_id result["method"] = "2d c++" return result
def detect(self, frame): # convert roi-plugin to detector roi roi = Roi(*self.g_pool.roi.bounds) debug_img = frame.bgr if self.g_pool.display_mode == "algorithm" else None result = self.detector_3d.detect( gray_img=frame.gray, timestamp=frame.timestamp, color_img=debug_img, roi=roi, debug=self.is_debug_window_open, ) eye_id = self.g_pool.eye_id location = result["location"] result["norm_pos"] = normalize(location, (frame.width, frame.height), flip_y=True) result["topic"] = f"pupil.{eye_id}" result["id"] = eye_id result["method"] = "3d c++" return result
def detect(self, frame, **kwargs): self._process_focal_length_changes() # convert roi-plugin to detector roi roi = Roi(*self.g_pool.roi.bounds) debug_img = frame.bgr if self.g_pool.display_mode == "algorithm" else None result = self.detector_3d.detect( gray_img=frame.gray, timestamp=frame.timestamp, color_img=debug_img, roi=roi, debug=self.is_debug_window_open, internal_raw_2d_data=kwargs.get("internal_raw_2d_data", None), ) # print(kwargs.get("internal_raw_2d_data", None)) #print("-----------------") #for key, value in result.items(): # #print(key + ": " + str(type(value))) # if not isinstance(value, dict) and not isinstance(value, bytes): # print(key + ": " + str(value)) # elif isinstance(value, dict): # print(key + ":") # for key2, value2 in value.items(): # print("- " + key2 + ": " + str(value2)) # else: # print(key + ": " + str(type(value))) eye_id = self.g_pool.eye_id location = result["location"] result["norm_pos"] = normalize(location, (frame.width, frame.height), flip_y=True) result["topic"] = f"pupil.{eye_id}.{self.identifier}" result["id"] = eye_id result["method"] = "3d c++" return result
def detect(self, frame, **kwargs): self._process_focal_length_changes() # convert roi-plugin to detector roi roi = Roi(*self.g_pool.roi.bounds) debug_img = frame.bgr if self.g_pool.display_mode == "algorithm" else None result = self.detector_3d.detect( gray_img=frame.gray, timestamp=frame.timestamp, color_img=debug_img, roi=roi, debug=self.is_debug_window_open, internal_raw_2d_data=kwargs.get("internal_raw_2d_data", None), ) eye_id = self.g_pool.eye_id location = result["location"] result["norm_pos"] = normalize(location, (frame.width, frame.height), flip_y=True) result["topic"] = f"pupil.{eye_id}.{self.identifier}" result["id"] = eye_id result["method"] = "3d c++" return result