def wait_for_match(image, timeout_secs=10, consecutive_matches=1, match_parameters=None, region=Region.ALL, frames=None): """Search for an image in the device-under-test's video stream. :param image: The image to search for. See `match`. :type timeout_secs: int or float or None :param timeout_secs: A timeout in seconds. This function will raise `MatchTimeout` if no match is found within this time. :param int consecutive_matches: Forces this function to wait for several consecutive frames with a match found at the same x,y position. Increase ``consecutive_matches`` to avoid false positives due to noise, or to wait for a moving selection to stop moving. :param match_parameters: See `match`. :param region: See `match`. :type frames: Iterator[stbt.Frame] :param frames: An iterable of video-frames to analyse. Defaults to ``stbt.frames()``. :returns: `MatchResult` when the image is found. :raises: `MatchTimeout` if no match is found after ``timeout_secs`` seconds. """ if match_parameters is None: match_parameters = MatchParameters() if frames is None: import stbt_core frames = stbt_core.frames(timeout_secs=timeout_secs) else: frames = limit_time(frames, timeout_secs) match_count = 0 last_pos = Position(0, 0) image = _load_image(image) debug("Searching for " + image.friendly_name) for frame in frames: res = match(image, match_parameters=match_parameters, region=region, frame=frame) if res.match and (match_count == 0 or res.position == last_pos): match_count += 1 else: match_count = 0 last_pos = res.position if match_count == consecutive_matches: debug("Matched " + image.friendly_name) return res raise MatchTimeout(res.frame, image.friendly_name, timeout_secs) # pylint:disable=undefined-loop-variable
def wait_for_motion( timeout_secs=10, consecutive_frames=None, noise_threshold=None, mask=None, region=Region.ALL, frames=None): """Search for motion in the device-under-test's video stream. "Motion" is difference in pixel values between two frames. :type timeout_secs: int or float or None :param timeout_secs: A timeout in seconds. This function will raise `MotionTimeout` if no motion is detected within this time. :type consecutive_frames: int or str :param consecutive_frames: Considers the video stream to have motion if there were differences between the specified number of consecutive frames. This can be: * a positive integer value, or * a string in the form "x/y", where "x" is the number of frames with motion detected out of a sliding window of "y" frames. This defaults to "10/20". You can override the global default value by setting ``consecutive_frames`` in the ``[motion]`` section of :ref:`.stbt.conf`. :param float noise_threshold: See `detect_motion`. :param mask: See `detect_motion`. :param region: See `detect_motion`. :param frames: See `detect_motion`. :returns: `MotionResult` when motion is detected. The MotionResult's ``time`` and ``frame`` attributes correspond to the first frame in which motion was detected. :raises: `MotionTimeout` if no motion is detected after ``timeout_secs`` seconds. """ if frames is None: import stbt_core frames = stbt_core.frames() if consecutive_frames is None: consecutive_frames = get_config('motion', 'consecutive_frames') consecutive_frames = str(consecutive_frames) if '/' in consecutive_frames: motion_frames = int(consecutive_frames.split('/')[0]) considered_frames = int(consecutive_frames.split('/')[1]) else: motion_frames = int(consecutive_frames) considered_frames = int(consecutive_frames) if motion_frames > considered_frames: raise ConfigurationError( "`motion_frames` exceeds `considered_frames`") debug("Waiting for %d out of %d frames with motion" % ( motion_frames, considered_frames)) if mask is not None: mask = load_image(mask, cv2.IMREAD_GRAYSCALE) debug("Using mask %s" % (mask.relative_filename or "<Image>")) matches = deque(maxlen=considered_frames) motion_count = 0 last_frame = None for res in detect_motion( timeout_secs, noise_threshold, mask, region, frames): motion_count += bool(res) if len(matches) == matches.maxlen: motion_count -= bool(matches.popleft()) matches.append(res) if motion_count >= motion_frames: debug("Motion detected.") # We want to return the first True motion result as this is when # the motion actually started. for result in matches: if result: return result assert False, ("Logic error in wait_for_motion: This code " "should never be reached") last_frame = res.frame raise MotionTimeout(last_frame, None if mask is None else mask.relative_filename, timeout_secs)
def detect_motion(timeout_secs=10, noise_threshold=None, mask=None, region=Region.ALL, frames=None): """Generator that yields a sequence of one `MotionResult` for each frame processed from the device-under-test's video stream. The `MotionResult` indicates whether any motion was detected. Use it in a ``for`` loop like this:: for motionresult in stbt.detect_motion(): ... In most cases you should use `wait_for_motion` instead. :type timeout_secs: int or float or None :param timeout_secs: A timeout in seconds. After this timeout the iterator will be exhausted. Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)`` will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then the iterator will yield frames forever. Note that you can stop iterating (for example with ``break``) at any time. :param float noise_threshold: The amount of noise to ignore. This is only useful with noisy analogue video sources. Valid values range from 0 (all differences are considered noise; a value of 0 will never report motion) to 1.0 (any difference is considered motion). This defaults to 0.84. You can override the global default value by setting ``noise_threshold`` in the ``[motion]`` section of :ref:`.stbt.conf`. :type mask: str or `numpy.ndarray` :param mask: A black & white image that specifies which part of the image to search for motion. White pixels select the area to analyse; black pixels select the area to ignore. The mask must be the same size as the video frame. This can be a string (a filename that will be resolved as per `load_image`) or a single-channel image in OpenCV format. :type region: `Region` :param region: Only analyze the specified region of the video frame. If you specify both ``region`` and ``mask``, the mask must be the same size as the region. :type frames: Iterator[stbt.Frame] :param frames: An iterable of video-frames to analyse. Defaults to ``stbt.frames()``. """ if frames is None: import stbt_core frames = stbt_core.frames() frames = limit_time(frames, timeout_secs) # pylint: disable=redefined-variable-type if noise_threshold is None: noise_threshold = get_config( 'motion', 'noise_threshold', type_=float) debug("Searching for motion") if mask is not None: mask = load_image(mask, cv2.IMREAD_GRAYSCALE) debug("Using mask %s" % (mask.relative_filename or "<Image>")) try: frame = next(frames) except StopIteration: return region = Region.intersect(_image_region(frame), region) previous_frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY) if (mask is not None and mask.shape[:2] != previous_frame_gray.shape[:2]): raise ValueError( "The dimensions of the mask %s %s don't match the video frame %s" % (repr(mask.relative_filename) or "<Image>", mask.shape, previous_frame_gray.shape)) for frame in frames: imglog = ImageLogger("detect_motion", region=region) imglog.imwrite("source", frame) imglog.set(roi=region, noise_threshold=noise_threshold) frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY) imglog.imwrite("gray", frame_gray) imglog.imwrite("previous_frame_gray", previous_frame_gray) absdiff = cv2.absdiff(frame_gray, previous_frame_gray) imglog.imwrite("absdiff", absdiff) if mask is not None: absdiff = cv2.bitwise_and(absdiff, mask) imglog.imwrite("mask", mask) imglog.imwrite("absdiff_masked", absdiff) _, thresholded = cv2.threshold( absdiff, int((1 - noise_threshold) * 255), 255, cv2.THRESH_BINARY) eroded = cv2.erode( thresholded, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) imglog.imwrite("absdiff_threshold", thresholded) imglog.imwrite("absdiff_threshold_erode", eroded) out_region = pixel_bounding_box(eroded) if out_region: # Undo cv2.erode above: out_region = out_region.extend(x=-1, y=-1) # Undo crop: out_region = out_region.translate(region) motion = bool(out_region) if motion: # Only update the comparison frame if it's different to the previous # one. This makes `detect_motion` more sensitive to slow motion # because the differences between frames 1 and 2 might be small and # the differences between frames 2 and 3 might be small but we'd see # the difference by looking between 1 and 3. previous_frame_gray = frame_gray result = MotionResult(getattr(frame, "time", None), motion, out_region, frame) draw_on(frame, result, label="detect_motion()") debug("%s found: %s" % ( "Motion" if motion else "No motion", str(result))) _log_motion_image_debug(imglog, result) yield result
def detect_motion(timeout_secs=10, noise_threshold=None, mask=None, region=Region.ALL, frames=None): """Generator that yields a sequence of one `MotionResult` for each frame processed from the device-under-test's video stream. The `MotionResult` indicates whether any motion was detected. Use it in a ``for`` loop like this:: for motionresult in stbt.detect_motion(): ... In most cases you should use `wait_for_motion` instead. :type timeout_secs: int or float or None :param timeout_secs: A timeout in seconds. After this timeout the iterator will be exhausted. Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)`` will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then the iterator will yield frames forever. Note that you can stop iterating (for example with ``break``) at any time. :param float noise_threshold: The amount of noise to ignore. This is only useful with noisy analogue video sources. Valid values range from 0 (all differences are considered noise; a value of 0 will never report motion) to 1.0 (any difference is considered motion). This defaults to 0.84. You can override the global default value by setting ``noise_threshold`` in the ``[motion]`` section of :ref:`.stbt.conf`. :type mask: str or `numpy.ndarray` :param mask: A black & white image that specifies which part of the image to search for motion. White pixels select the area to analyse; black pixels select the area to ignore. This can be a string (a filename that will be resolved as per `load_image`) or a single-channel image in OpenCV format. If you specify ``region``, the mask must be the same size as the region. Otherwise the mask must be the same size as the frame. :type region: `Region` :param region: Only analyze the specified region of the video frame. :type frames: Iterator[stbt.Frame] :param frames: An iterable of video-frames to analyse. Defaults to ``stbt.frames()``. """ if frames is None: import stbt_core frames = stbt_core.frames() frames = limit_time(frames, timeout_secs) # pylint: disable=redefined-variable-type debug("Searching for motion") if mask is not None: mask = load_image(mask, cv2.IMREAD_GRAYSCALE) debug("Using mask %s" % (mask.relative_filename or "<Image>")) try: frame = next(frames) except StopIteration: return differ = MotionDiff(frame, region, mask, noise_threshold=noise_threshold) for frame in frames: result = differ.diff(frame) draw_on(frame, result, label="detect_motion()") debug("%s found: %s" % ("Motion" if result.motion else "No motion", str(result))) yield result