def calibrate(self): if self.source.value is None: return self._calibration_cycle_i = (self._calibration_cycle_i + 1) % sum( [n for action, n in self.CALIBRATION_CYCLE]) action = self._calibration_action() if action == 'learn_background': self._background.apply(self.source.value.image, learningRate=1 / self.CALIBRATION_LEARNING_DURATION) elif action == 'wait': pass elif action == 'draw_pattern': pattern = self._draw_pattern() self._push_pattern_to_projector(pattern) elif action == 'clear_pattern': pattern = numpy.zeros(tuple(reversed(self.CANVAS_SIZE)), dtype=numpy.uint8) cv2.rectangle(pattern, (0, 0), self.CANVAS_SIZE, 128, self.CALIBRATION_PATTERN_GRID_SIZE) self._push_pattern_to_projector(pattern) elif action == 'calibrate_pattern': mask = self._background.apply(self.source.value.image, learningRate=0) mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.KERNEL) mask = cv2.blur(mask, (5, 5)) blob_detector = self._create_blob_detector() found, centers = cv2.findCirclesGrid( mask, self._calibration_pattern_size, cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING, blob_detector, None) if self.debug: output = self.source.value.image.copy() blobs = blob_detector.detect(mask) output = cv2.drawKeypoints( output, blobs, numpy.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.drawChessboardCorners(output, self._calibration_pattern_size, centers, found) self.debug.push(Frame(mask, 'projector.calibration.mask')) self.debug.push(Frame(output, 'projector.calibration.debug')) if found: self._finish_calibration(centers) else: raise ValueError(f'Unknown action {action}')
async def process(self) -> None: output_image = self.input.value.image.copy() if self.people.value is not None: for (x, y, w, h) in self.people.value: cv2.rectangle(output_image, (x, y), (x + w, y + h), (255, 255, 255), 2) output_frame = Frame(output_image, source=self.id) self.output.push(output_frame) self.debug_output.push(output_frame)
def _draw_overlay(self): if not self.overlay.value: return canvas = cv2.warpPerspective(self.overlay.value.image, self._calibrator.transformation_matrix, self._calibrator.CANVAS_SIZE) canvas_frame = Frame(canvas, 'projector') self.projector.push(canvas_frame)
async def process(self) -> None: people, weights = await run_in_executor(self.detect, self.input.value.image) output_image = self.input.value.image.copy() for (x, y, w, h) in people: cv2.rectangle(output_image, (x, y), (x + w, y + h), (255, 255, 255), 2) output_frame = Frame(output_image, source=self.id) self.output.push(output_frame) self.debug_output.push(output_frame)
async def process(self) -> None: people, weights = self.hog.detectMultiScale(self.input.value.image, scale=1.01) output_image = self.input.value.image.copy() for (x, y, w, h) in people: cv2.rectangle(output_image, (x, y), (x + w, y + h), (255, 255, 255), 2) output_frame = Frame(output_image, source=self.id) self.output.push(output_frame) self.debug_output.push(output_frame)
async def process(self): if not self.source.value: return image = self.source.value.image.copy() edges = cv2.Canny(image, 100, 200) image[edges > 0] = np.array([200, 200, 200]) red_image = cv2.transform(image, self.TERMINATOR_MAT) if self.faces.value is not None: for (x, y, w, h) in self.faces.value: cv2.rectangle(red_image, (x, y), (x + w, y + h), (255, 255, 255), 2) frame = Frame(red_image, 'Terminator') self.output.push(frame) self.debug_output.push(frame)
async def process(self): if not self.source.value: return if self.clear_canvas.updated: self._canvas = None if self._canvas is None: self._canvas = np.zeros(self.source.value.image.shape, dtype='uint8') for e in self.mouse_movement.values: if e.region.name in ['display', 'VideoSource0' ] and e.buttons[MouseButton.LEFT]: if self._last_position: cv2.line(self._canvas, self._last_position, e.restored_position, self.COLOR, self.THICKNESS) self._last_position = e.restored_position elif e.region.name in ['display', 'VideoSource0' ] and e.buttons[MouseButton.RIGHT]: if self._last_position: cv2.line(self._canvas, self._last_position, e.restored_position, self.BG_COLOR, self.THICKNESS) self._last_position = e.restored_position else: self._last_position = None output = self.source.value.image.copy() mask = (self._canvas > 0).any(-1) output[mask, :] = self._canvas[mask, :] output_frame = Frame(output, 'display') self.output.push(output_frame) self.debug.push(output_frame) overlay_frame = Frame(self._canvas, 'overlay') self.overlay.push(overlay_frame) self.debug.push(overlay_frame)
async def process(self): image = self.source.value.image.copy() if self.people.value: for p in self.people.value: if self._contains_mouse_pointer(p): color = self.HIGHLIGHTED_RECT_COLOR else: color = self.RECT_COLOR cv2.rectangle(image, (p.x, p.y), (p.x + p.w, p.y + p.h), color, self.RECT_THICKNESS) frame = Frame(image, 'display') self.output.push(frame) self.debug.push(frame)
async def process(self) -> None: for event in self.keyboard.values: if event.action == 'toggle_horizontal_flip' and event.active: self._horizontal_flip_enabled = not self._horizontal_flip_enabled if event.action == 'toggle_vertical_flip' and event.active: self._vertical_flip_enabled = not self._vertical_flip_enabled flipped_image = self.input.value.image if self._horizontal_flip_enabled: flipped_image = cv2.flip(flipped_image, 1) if self._vertical_flip_enabled: flipped_image = cv2.flip(flipped_image, 0) output_frame = Frame(flipped_image, source=self.id) self.output.push(output_frame) self.debug_output.push(output_frame)
def _push_pattern_to_projector(self, pattern): frame = Frame(pattern, 'projector') self.projector.push(frame) if self.debug: self.debug.push(frame)
async def process(self) -> None: flipped_image = cv2.flip(self.input.value.image, 1) output_frame = Frame(flipped_image, source=self.id) self.output.push(output_frame) self.debug_output.push(output_frame)
async def process(self) -> None: flipped = cv2.flip(self.input.value.image, 0) self.output.push(Frame(flipped, 'flipped'))