class ItemSource(BareComponent):

    def __init__(self):
        self.output = Output('item_source')

    def push_item(self, item):
        self.output.push(item)
class SampleSource(BareComponent):

    def __init__(self):
        self.output = Output('sample')

    def push(self, value):
        self.output.push(value)
Example #3
0
class SquareFilter(EventDrivenComponent):
    def __init__(self, input_, output):
        self.input = Latest(input_, trigger=True)
        self.output = Output(output)

    async def process(self):
        self.output.push(self.input.value**2)
class LabelSource(BareComponent):

    def __init__(self):
        self.output = Output('label')

    def push_label(self, label: str):
        self.output.push(label)
Example #5
0
class PaintDisplay(OpenCvDisplay):
    def __init__(self, source):
        super().__init__(source)
        self.mouse_event: Buffer[MouseEvent] = Buffer(
            EventBasedMouseHandler.MOUSE_EVENT)
        self.reset_calibration = Output(
            ProjectorDriver2d.RESET_CALIBRATION_TRIGGER)
        self.clear_canvas = Output('trigger.clear_canvas')

        self._menu = Menu([
            Label('Menu'),
            Button('Calibrate', self.calibrate),
            Button('Clear Canvas', self.clear),
        ],
                          position=(1, 0))

    def calibrate(self):
        self.reset_calibration.push(None)

    def clear(self):
        self.clear_canvas.push(None)

    def draw(self, surface: pygame.Surface) -> List[MouseRegion]:
        regions = super().draw(surface)
        self._menu.handle_mouse_events(self.mouse_event.values)
        regions += self._menu.draw(surface)
        return regions
class Labeler(EventDrivenComponent):

    def __init__(self):
        self.item_source = Buffer('item_source', trigger=True)
        self.label = Latest('label')
        self.output = Output('sink')

    async def process(self):
        for item in self.item_source.values:
            self.output.push(item + self.label.value)
class SquareFilter(IteratingComponent):
    target_fps = 10

    def __init__(self, input_, output):
        self.input = Latest(input_)
        self.output = Output(output)

    async def process(self):
        if self.input.updated:
            self.output.push(self.input.value**2)
Example #8
0
class SampleSubComponent(SubComponent):
    def __init__(self):
        self.input = Buffer('sample', trigger=True)
        self.output = Output('sample')
        self.log = []

    def push(self, value):
        self.output.push(value)

    def do_something(self):
        self.log += self.input.values
class TriggerComponent(IteratingComponent):
    def __init__(self) -> None:
        super().__init__()
        self.output = Output('trigger')

    @property
    def target_fps(self) -> int:
        return 1

    async def process(self) -> None:
        self.output.push(None)
class SampleSource(IteratingComponent):
    target_fps = 10

    def __init__(self, data, name='sample'):
        self.output = Output(name)
        self.data = data.copy()

    async def process(self):
        try:
            self.output.push(self.data.pop(0))
        except IndexError:
            self.logger.warning('End of data reached')
Example #11
0
class MyKeyboardHandler(KeyboardHandler):
    ACTIONS = [
        Action('enter', ['RETURN']),
    ]

    def __init__(self, layout: KeyboardLayout):
        super().__init__(layout)
        self.state = Output('state')
        self.name = Output('name')
        self._name = 'World'

    def key_down(self, action: str) -> None:
        self.state.push('input')
        self.capture_text('name', self._name)

    def key_up(self, action: str) -> None:
        pass

    def process(self) -> None:
        pass

    def text_capture_completed(self, capture_id: str, text: str):
        self._name = text
        self.name.push(self._name)
        self.state.push('hello')

    def text_capture_update(self, capture_id: str, text: str):
        self.name.push(text)
class FaceDetector(EventDrivenComponent):
    FACE_CASCADE = os.path.join(cv2.data.haarcascades, 'haarcascade_frontalface_default.xml')

    def __init__(self):
        self.source: Latest[Frame] = Latest('source', trigger=True)
        self.face_cascade = cv2.CascadeClassifier(self.FACE_CASCADE)
        self.output = Output('faces')

    async def process(self):
        if not self.source.value:
            return
        faces = await run_in_executor(self._detect_faces, self.source.value.image)
        self.output.push(faces)

    def _detect_faces(self, image):
        return self.face_cascade.detectMultiScale(image, 1.3, 5)
Example #13
0
class GameController(EventDrivenComponent):
    def __init__(self, game):
        self.game = game
        self.node = 'start'
        self.selection = 0

        self.keyboard = Buffer(EventBasedKeyboardHandler.KEYBOARD_EVENT,
                               trigger=True)  # type: Buffer[KeyboardEvent]

        self.text = Output('text')
        self.choices = Output('choices')

    async def setup(self):
        self._publish_state()

    async def process(self):
        changed = False
        choices = self.game[self.node]['choices']
        for event in self.keyboard.values:
            if event.active:
                if event.action == 'up' and self.selection > 0:
                    self.selection -= 1
                    changed = True
                elif event.action == 'down' and self.selection + 1 < len(
                        choices):
                    self.selection += 1
                    changed = True
                elif event.action == 'choose' and len(choices):
                    choice = choices[self.selection]
                    self.node = choice['goto']
                    self.selection = 0
                    changed = True

        if changed:
            self._publish_state()

    def _publish_state(self):
        node = self.game[self.node]
        self.text.push(node['text'])
        choices = [{
            'text': choice['text'],
            'selected': i == self.selection
        } for i, choice in enumerate(node['choices'])]
        self.choices.push(choices)
Example #14
0
class PaintController(EventDrivenComponent):
    COLOR = (0, 255, 0)
    BG_COLOR = (0, 0, 0)
    THICKNESS = 4

    def __init__(self):
        self.source = Latest('source', trigger=True)  # type: Latest[Frame]
        self.output = Output('display')
        self.overlay = Output('overlay')
        self.mouse_movement = Buffer(
            EventBasedMouseHandler.MOUSE_MOVEMENT,
            trigger=True)  # type: Buffer[MouseMovement]
        self.clear_canvas = Latest('trigger.clear_canvas', trigger=True)
        self.debug = Output(event.OPENCV_FRAME_EVENT)
        self._canvas = None
        self._last_position = None

    async def process(self):
        if not self.source.value:
            return

        if self.clear_canvas.updated:
            self._canvas = None

        if self._canvas is None:
            self._canvas = np.zeros(self.source.value.image.shape,
                                    dtype='uint8')

        for e in self.mouse_movement.values:
            if e.region.name in ['display', 'VideoSource0'
                                 ] and e.buttons[MouseButton.LEFT]:
                if self._last_position:
                    cv2.line(self._canvas, self._last_position,
                             e.restored_position, self.COLOR, self.THICKNESS)
                self._last_position = e.restored_position
            elif e.region.name in ['display', 'VideoSource0'
                                   ] and e.buttons[MouseButton.RIGHT]:
                if self._last_position:
                    cv2.line(self._canvas, self._last_position,
                             e.restored_position, self.BG_COLOR,
                             self.THICKNESS)
                self._last_position = e.restored_position
            else:
                self._last_position = None

        output = self.source.value.image.copy()
        mask = (self._canvas > 0).any(-1)
        output[mask, :] = self._canvas[mask, :]
        output_frame = Frame(output, 'display')
        self.output.push(output_frame)
        self.debug.push(output_frame)
        overlay_frame = Frame(self._canvas, 'overlay')
        self.overlay.push(overlay_frame)
        self.debug.push(overlay_frame)
Example #15
0
class PersonDetector(EventDrivenComponent):
    def __init__(self):
        self.source = Latest('source', trigger=True)  # type: Latest[Frame]
        self._hog = cv2.HOGDescriptor()
        self._hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        self.output = Output('people')

    async def process(self):
        rects, weights = await run_in_executor(self._detect)
        people = [
            Person(*rect, weight) for rect, weight in zip(rects, weights)
        ]
        self.output.push(people)

    def _detect(self):
        step_size = int(self.source.value.width / 200)
        scale = 1.5
        padding = 16
        return self._hog.detectMultiScale(self.source.value.image,
                                          winStride=(step_size, step_size),
                                          scale=scale,
                                          padding=(padding, padding),
                                          useMeanshiftGrouping=True)
Example #16
0
class PersonDisplay(EventDrivenComponent):
    RECT_COLOR = (255, 0, 0)
    HIGHLIGHTED_RECT_COLOR = (255, 255, 0)
    RECT_THICKNESS = 2

    def __init__(self):
        self.source = Latest('source', trigger=True)  # type: Latest[Frame]
        self.people = Latest('people')  # type: Latest[List[Person]]
        self.output = Output('display')
        self.debug = Output(event.OPENCV_FRAME_EVENT)
        self.mouse = Buffer(
            EventBasedMouseHandler.MOUSE_EVENT)  # type: Buffer[MouseEvent]
        self.mouse_move = LatestBy(
            EventBasedMouseHandler.MOUSE_MOVEMENT,
            lambda m: m.region.name)  # type: LatestBy[MouseMovement]

    async def process(self):
        image = self.source.value.image.copy()

        if self.people.value:
            for p in self.people.value:
                if self._contains_mouse_pointer(p):
                    color = self.HIGHLIGHTED_RECT_COLOR
                else:
                    color = self.RECT_COLOR
                cv2.rectangle(image, (p.x, p.y), (p.x + p.w, p.y + p.h), color,
                              self.RECT_THICKNESS)

        frame = Frame(image, 'display')
        self.output.push(frame)
        self.debug.push(frame)

    def _contains_mouse_pointer(self, person: Person) -> bool:
        if not self.mouse_move.value_dict.get('display', None):
            return False
        x, y = self.mouse_move.value_dict['display'].restored_position
        return person.x <= x <= person.x + person.w and person.y <= y <= person.y + person.h
class EventSource(BareComponent):
    def __init__(self):
        self.output = Output('sample')

    def trigger(self):
        self.output.push(None)
Example #18
0
class EventBasedKeyboardHandler(KeyboardHandler):
    """
    Abstract event based keyboard handler base class

    This keyboard handler emits events containing `KeyboardEvent` payload on the event key `KEYBOARD_EVENT`, when a key
    bound to a configured `Action` is pressed or released.

    Text capture can be triggered by sending an event containing a `capture_id` to the key `CAPTURE_TEXT_TRIGGER`.
    The results of this text capture flow are emitted via events containing `CaptureTextEvent` payload on the event key
    `CAPTURE_TEXT_EVENT`.

    Example:

    ::

        class MyKeyboardHandler(EventBasedKeyboardHandler):
            ACTIONS = [
                Action('up', ['UP', 'w']),
                Action('down', ['DOWN', 's']),
                Action('left', ['LEFT', 'a']),
                Action('right', ['RIGHT', 'd']),
            ]

    """

    CAPTURE_TEXT_TRIGGER: str = 'async2v.keyboard.trigger.capture'
    """
    :type: str
    """

    CAPTURE_TEXT_EVENT: str = 'async2v.keyboard.text'
    """
    :type: str
    """

    KEYBOARD_EVENT: str = 'async2v.keyboard.action'
    """
    :type: str
    """

    def __init__(self, layout: KeyboardLayout):
        """
        :param layout: Use layout created by the `layout_from_args` method of the `KeyboardConfigurator` created for
            that concrete `KeyboardHandler`
        """
        super().__init__(layout)
        self.keyboard = Output(self.KEYBOARD_EVENT)
        self.text = Output(self.KEYBOARD_EVENT)
        self.capture_trigger = Latest(self.CAPTURE_TEXT_TRIGGER)

    def key_down(self, action: str) -> None:
        self.keyboard.push(KeyboardEvent(action, True))

    def key_up(self, action: str) -> None:
        self.keyboard.push(KeyboardEvent(action, False))

    def process(self) -> None:
        if self.capture_trigger.updated:
            self.capture_text(self.capture_trigger.value)

    def text_capture_update(self, capture_id: str, text: str):
        self.text.push(CaptureTextEvent(capture_id, text, complete=False))

    def text_capture_completed(self, capture_id: str, text: str):
        self.text.push(CaptureTextEvent(capture_id, text, complete=True))
Example #19
0
class EventBasedMouseHandler(MouseHandler):
    """
    Event based mouse handler

    This mouse handler emits events containing `MouseEvent` payload on the event key `MOUSE_EVENT` and
    events containing `MouseMovement` payload on the event key `MOUSE_MOVEMENT`.

    Supports the `MouseRegion` concept explained above.
    """

    MOUSE_EVENT: str = 'async2v.mouse.event'
    """
    :type: str
    """

    MOUSE_MOVEMENT: str = 'async2v.mouse.movement'
    """
    :type: str
    """
    def __init__(self):
        self._regions: List[MouseRegion] = []
        self.event = Output(self.MOUSE_EVENT)
        self.movement = Output(self.MOUSE_MOVEMENT)
        self._last_region: MouseRegion = None
        self._last_position = (-1, -1)

    def push_regions(self, regions: [MouseRegion]):
        self._regions: List[MouseRegion] = regions
        self._get_region()

    def _get_region(self,
                    position: Tuple[int, int] = None) -> Optional[MouseRegion]:
        if position:
            self._last_position = position
        for region in reversed(self._regions):
            if region.rect.collidepoint(self._last_position[0],
                                        self._last_position[1]):
                if not self._last_region or region.name != self._last_region.name:
                    if self._last_region:
                        self.event.push(
                            MouseEvent(self._last_region, self._last_position,
                                       MouseEventType.LEAVE))
                    self.event.push(
                        MouseEvent(region, self._last_position,
                                   MouseEventType.ENTER))
                    self._last_region = region
                return region
        else:
            if self._last_position != (-1, -1):
                self.logger.warning(
                    f'Position {self._last_position} is not in any region')
            return None

    def push_button_down(self, position: Tuple[int, int], button: int):
        region = self._get_region(position)
        button = MouseButton(button)
        if region:
            self.event.push(
                MouseEvent(region,
                           position,
                           MouseEventType.DOWN,
                           button=button))

    def push_button_up(self, position: Tuple[int, int], button: int):
        region = self._get_region(position)
        button = MouseButton(button)
        if region:
            self.event.push(
                MouseEvent(region, position, MouseEventType.UP, button=button))

    def push_movement(self, position: Tuple[int, int], rel: Tuple[int, int],
                      buttons: Tuple[int, int, int]):
        region = self._get_region(position)
        buttons = {MouseButton(i + 1): buttons[i] > 0 for i in range(3)}
        if region:
            self.movement.push(MouseMovement(region, position, rel, buttons))
Example #20
0
class ProjectorDriver2d(EventDrivenComponent, ContainerMixin):
    """
    Draw overlays into the real world with a projector

    This component automatically calibrates a projector that projects into the field of view of a camera.
    Overlays can then be drawn in coordinates of the camera image. Calibration is performed automatically on start.
    It can also be triggered later by sending an arbitrary event to the key stored in `RESET_CALIBRATION_TRIGGER`.
    """

    #: Event key for reset command input. Events on this key trigger a new projector calibration.
    RESET_CALIBRATION_TRIGGER = 'async2v.projector.trigger.reset_calibration'

    @staticmethod
    def configurator() -> ProjectorDriver2dConfigurator:
        """
        Convenience method to create a matching configurator
        """
        return ProjectorDriver2dConfigurator()

    def __init__(
        self,
        source: str,
        overlay: str,
        projector: str = 'projector',
        config: ProjectorDriver2dConfiguration = ProjectorDriver2dConfiguration(
        )):
        """
        :param source: Key of the video stream input. Needs to provide events with `Frame` payload.
        :param overlay: Key of the overlay input. Needs to provide events with `Frame` payload.
        :param projector: Key of projector output. `Frame` events are pushed to this output.
        :param config: Can be generated via `ProjectorDriver2dConfigurator`
        """
        self._calibrator = _ProjectorCalibrator2d(source, projector,
                                                  config.debug)
        super().__init__([self._calibrator])
        self.overlay: Latest[Frame] = Latest(overlay, trigger=True)
        self.projector = Output(projector)
        self.reset = Latest(self.RESET_CALIBRATION_TRIGGER, trigger=True)
        self._calibration_started = False

    async def process(self):
        if self.reset.updated:
            self._calibrator.reset()

        if self._calibrator.needs_calibration():
            if not self._calibration_started:
                self._calibration_started = True
                self.logger.info('Starting projector calibration')
            self._calibrator.calibrate()
        elif self._calibration_started:
            self.logger.info('Finished projector calibration')
            self._calibration_started = False
            self._draw_overlay()
        elif self.overlay.updated:
            self._draw_overlay()

    def _draw_overlay(self):
        if not self.overlay.value:
            return

        canvas = cv2.warpPerspective(self.overlay.value.image,
                                     self._calibrator.transformation_matrix,
                                     self._calibrator.CANVAS_SIZE)
        canvas_frame = Frame(canvas, 'projector')
        self.projector.push(canvas_frame)
Example #21
0
class _ProjectorCalibrator2d(SubComponent):
    KERNEL = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

    CANVAS_SIZE = (1200, 900)  # TODO make this configurable in a sane way
    CALIBRATION_LEARNING_DURATION = 10
    CALIBRATION_PATTERN_GRID_SIZE = 100
    CALIBRATION_CYCLE = [
        ('learn_background', CALIBRATION_LEARNING_DURATION),
        ('draw_pattern', 1),
        ('wait', 5),
        ('calibrate_pattern', 5),
        ('clear_pattern', 1),
        ('wait', 5),
    ]

    def __init__(self, source, projector, debug):
        self._background: cv2.BackgroundSubtractor = cv2.createBackgroundSubtractorMOG2(
        )
        self._calibration_cycle_i = 0
        self._initialize_pattern()
        self.source: Latest[Frame] = Latest(source, trigger=True)
        self.projector = Output(projector)
        if debug:
            self.debug = Output(event.OPENCV_FRAME_EVENT)
        else:
            self.debug = None
        self._last_calibrated = 0
        self._transformation_matrix = None

    def reset(self) -> None:
        self._transformation_matrix = None

    def needs_calibration(self) -> bool:
        return self._transformation_matrix is None

    @property
    def transformation_matrix(self):
        return self._transformation_matrix

    def calibrate(self):
        if self.source.value is None:
            return
        self._calibration_cycle_i = (self._calibration_cycle_i + 1) % sum(
            [n for action, n in self.CALIBRATION_CYCLE])
        action = self._calibration_action()
        if action == 'learn_background':
            self._background.apply(self.source.value.image,
                                   learningRate=1 /
                                   self.CALIBRATION_LEARNING_DURATION)
        elif action == 'wait':
            pass
        elif action == 'draw_pattern':
            pattern = self._draw_pattern()
            self._push_pattern_to_projector(pattern)
        elif action == 'clear_pattern':
            pattern = numpy.zeros(tuple(reversed(self.CANVAS_SIZE)),
                                  dtype=numpy.uint8)
            cv2.rectangle(pattern, (0, 0), self.CANVAS_SIZE, 128,
                          self.CALIBRATION_PATTERN_GRID_SIZE)
            self._push_pattern_to_projector(pattern)
        elif action == 'calibrate_pattern':
            mask = self._background.apply(self.source.value.image,
                                          learningRate=0)
            mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.KERNEL)
            mask = cv2.blur(mask, (5, 5))
            blob_detector = self._create_blob_detector()
            found, centers = cv2.findCirclesGrid(
                mask, self._calibration_pattern_size,
                cv2.CALIB_CB_ASYMMETRIC_GRID + cv2.CALIB_CB_CLUSTERING,
                blob_detector, None)
            if self.debug:
                output = self.source.value.image.copy()
                blobs = blob_detector.detect(mask)
                output = cv2.drawKeypoints(
                    output, blobs, numpy.array([]), (0, 0, 255),
                    cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
                cv2.drawChessboardCorners(output,
                                          self._calibration_pattern_size,
                                          centers, found)
                self.debug.push(Frame(mask, 'projector.calibration.mask'))
                self.debug.push(Frame(output, 'projector.calibration.debug'))
            if found:
                self._finish_calibration(centers)
        else:
            raise ValueError(f'Unknown action {action}')

    def _draw_pattern(self):
        overlay = numpy.zeros(tuple(reversed(self.CANVAS_SIZE)),
                              dtype=numpy.uint8)
        cv2.rectangle(overlay, (0, 0), self.CANVAS_SIZE, 128,
                      self.CALIBRATION_PATTERN_GRID_SIZE)
        r = int(self.CALIBRATION_PATTERN_GRID_SIZE * 0.2)
        for pos in self._calibration_pattern:
            cv2.circle(overlay, pos, r, (255, 255, 255), -1)
        return overlay

    def _initialize_pattern(self):
        x0 = int(self.CANVAS_SIZE[0] * 0.25)
        y0 = int(self.CANVAS_SIZE[1] * 0.25)
        a0 = self.CALIBRATION_PATTERN_GRID_SIZE
        a1 = numpy.sqrt(3) / 2 * a0
        n_x = int(self.CANVAS_SIZE[0] * 0.25 / a0) * 2 + 1
        n_y = int(self.CANVAS_SIZE[1] * 0.25 / a1) * 2 + 1

        circles = []
        for j in range(n_y):
            for i in range(n_x):
                x = int(x0 + i * a0 + (j % 2) * 0.5 * a0)
                y = int(y0 + j * a1)
                circles.append((x, y))

        self._calibration_pattern = circles
        self._calibration_pattern_size = (n_x, n_y)

    def _calibration_action(self):
        n = 0
        for action, duration in self.CALIBRATION_CYCLE:
            if n <= self._calibration_cycle_i < n + duration:
                return action
            n += duration
        else:
            raise RuntimeError(
                f'Invalid calibration state {self._calibration_cycle_i}')

    def _create_blob_detector(self):
        input_size = min(self.source.value.width, self.source.value.height)
        min_distance = int(input_size / 40)
        params = cv2.SimpleBlobDetector_Params()
        params.blobColor = 255
        params.filterByConvexity = False
        params.filterByArea = True
        params.filterByCircularity = True
        params.filterByColor = True
        params.filterByInertia = False
        params.minDistBetweenBlobs = min_distance
        params.minCircularity = 0.7
        params.minArea = int((min_distance * 0.1)**2)
        return cv2.SimpleBlobDetector_create(params)

    def _finish_calibration(self, detected_centers):
        canvas_centers = numpy.array(self._calibration_pattern,
                                     dtype='float32')
        self._transformation_matrix, _ = cv2.findHomography(
            detected_centers, canvas_centers)
        self._last_calibrated = time.time()
        self._calibration_cycle_i = 0

    def _push_pattern_to_projector(self, pattern):
        frame = Frame(pattern, 'projector')
        self.projector.push(frame)
        if self.debug:
            self.debug.push(frame)
Example #22
0
class VideoSource(IteratingComponent):
    """
    OpenCV video source component

    Reads from a video file or a camera at a given framerate.
    The frames are pushed to the provided event key wrapped in `Frame` objects.
    Supports full command line configuration via `VideoSourceConfigurator`.
    """

    @staticmethod
    def configurator() -> VideoSourceConfigurator:
        """
        Convenience method to create a matching configurator
        """
        return VideoSourceConfigurator()

    def __init__(self, config: VideoSourceConfig, key: str = 'source'):
        """
        :param config: Can be generated via `VideoSourceConfigurator`
        :param key: Event key of video output
        """
        self._path = config.path
        self._target_fps = config.fps
        self.output = Output(key)
        self.debug_output = Output(event.OPENCV_FRAME_EVENT)
        self._executor = ThreadPoolExecutor(max_workers=1)
        self._resolution = config.resolution
        self._resolution_verified = False
        self._capture: cv2.VideoCapture = None

    @property
    def target_fps(self) -> int:
        return self._target_fps

    @property
    def graph_colors(self) -> Tuple[str, str]:
        return '#8080F0', '#FBFBFF'

    async def setup(self):
        await asyncio.get_event_loop().run_in_executor(self._executor, self._create_capture)

    def _create_capture(self):
        self._capture = cv2.VideoCapture(self._path)
        if self._resolution:
            self._capture.set(cv2.CAP_PROP_FRAME_WIDTH, self._resolution[0])
            self._capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self._resolution[1])

    async def process(self):
        ret, image = await asyncio.get_event_loop().run_in_executor(self._executor, self._capture.read)
        if ret:
            frame = Frame(image, self.id)
            if not self._resolution_verified:
                self._verify_resolution(frame)
            self.output.push(frame)
            self.debug_output.push(frame)
        else:
            self.logger.info('Could not read frame, assuming end of file')
            self.shutdown()

    def _verify_resolution(self, frame: Frame):
        if self._resolution:
            w, h = self._resolution
            if frame.width != w or frame.height != h:
                raise ValueError(f'Expected resolution {w}x{h}, got {frame.width}x{frame.height}')
        self._resolution_verified = True
        self.logger.info(f'Source resolution {frame.width}x{frame.height}')

    async def cleanup(self):
        await asyncio.get_event_loop().run_in_executor(self._executor, self._release_capture)

    def _release_capture(self):
        self._capture.release()