def _create_reference_png(filename):
    # Throw away some frames to let everything settle
    pop_with_progress(stbt.frames(), 50)

    average = None
    for frame in pop_with_progress(stbt.frames(), FRAME_AVERAGE_COUNT):
        if average is None:
            average = numpy.zeros(shape=frame[0].shape, dtype=numpy.uint16)
        average += frame[0]
    average /= FRAME_AVERAGE_COUNT
    cv2.imwrite(filename, numpy.array(average, dtype=numpy.uint8))
Example #2
0
def _create_reference_png(filename):
    # Throw away some frames to let everything settle
    pop_with_progress(stbt.frames(), 50)

    average = None
    for frame in pop_with_progress(stbt.frames(), FRAME_AVERAGE_COUNT):
        if average is None:
            average = numpy.zeros(shape=frame[0].shape, dtype=numpy.uint16)
        average += frame[0]
    average /= FRAME_AVERAGE_COUNT
    cv2.imwrite(filename, numpy.array(average, dtype=numpy.uint8))
def analyse_colours_video(number=None):
    """RGB!"""
    errors_in_a_row = 0
    n = 0
    qrscanner = QRScanner()
    for frame, _ in stbt.frames():
        if number is not None and n >= number:
            return
        n = n + 1

        # The colour is written above and below the rectangle because we want
        # to be sure that the top of the colour box is from the same frame as
        # the bottom.
        codes = qrscanner.read_qr_codes(frame)

        if (len(codes) == 4 and re.match('#[0-9a-f]{6}', codes[0])
                and all(c == codes[0] for c in codes)):
            colour_hex = codes[0]
            desired = numpy.array((int(colour_hex[1:3],
                                       16), int(colour_hex[3:5],
                                                16), int(colour_hex[5:7], 16)))
            colour = cv2.mean(frame[240:480, 520:760])
            colour = (colour[2], colour[1], colour[0])
            yield (n, desired, colour)
            errors_in_a_row = 0
        else:
            errors_in_a_row += 1
            if errors_in_a_row > 50:
                raise RuntimeError(
                    "Failed to find hexidecimal colour description")
def analyse_colours_video(number=None):
    """RGB!"""
    errors_in_a_row = 0
    n = 0
    qrscanner = QRScanner()
    for frame, _ in stbt.frames():
        if number is not None and n >= number:
            return
        n = n + 1

        # The colour is written above and below the rectangle because we want
        # to be sure that the top of the colour box is from the same frame as
        # the bottom.
        codes = qrscanner.read_qr_codes(frame)

        if (len(codes) == 4 and re.match('#[0-9a-f]{6}', codes[0])
                and all(c == codes[0] for c in codes)):
            colour_hex = codes[0]
            desired = numpy.array((
                int(colour_hex[1:3], 16),
                int(colour_hex[3:5], 16),
                int(colour_hex[5:7], 16)))
            colour = cv2.mean(frame[240:480, 520:760])
            colour = (colour[2], colour[1], colour[0])
            yield (n, desired, colour)
            errors_in_a_row = 0
        else:
            errors_in_a_row += 1
            if errors_in_a_row > 50:
                raise RuntimeError(
                    "Failed to find hexidecimal colour description")
Example #5
0
def wait_for_match(image,
                   timeout_secs=10,
                   consecutive_matches=1,
                   match_parameters=None,
                   region=Region.ALL,
                   frames=None):
    """Search for an image in the device-under-test's video stream.

    :param image: The image to search for. See `match`.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. This function will raise `MatchTimeout` if no
        match is found within this time.

    :param int consecutive_matches:
        Forces this function to wait for several consecutive frames with a
        match found at the same x,y position. Increase ``consecutive_matches``
        to avoid false positives due to noise, or to wait for a moving
        selection to stop moving.

    :param match_parameters: See `match`.
    :param region: See `match`.

    :type frames: Iterator[stbt.Frame]
    :param frames: An iterable of video-frames to analyse. Defaults to
        ``stbt.frames()``.

    :returns: `MatchResult` when the image is found.
    :raises: `MatchTimeout` if no match is found after ``timeout_secs`` seconds.
    """
    if match_parameters is None:
        match_parameters = MatchParameters()

    if frames is None:
        import stbt
        frames = stbt.frames(timeout_secs=timeout_secs)
    else:
        frames = limit_time(frames, timeout_secs)

    match_count = 0
    last_pos = Position(0, 0)
    image = _load_image(image)
    debug("Searching for " + image.friendly_name)
    for frame in frames:
        res = match(image,
                    match_parameters=match_parameters,
                    region=region,
                    frame=frame)
        if res.match and (match_count == 0 or res.position == last_pos):
            match_count += 1
        else:
            match_count = 0
        last_pos = res.position
        if match_count == consecutive_matches:
            debug("Matched " + image.friendly_name)
            return res

    raise MatchTimeout(res.frame, image.friendly_name, timeout_secs)  # pylint:disable=undefined-loop-variable
def await_blank(brightness):
    for frame, _ in stbt.frames(10):
        grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        min_, max_, _, _ = cv2.minMaxLoc(grayscale)
        contrast = max_ - min_
        if contrast < 100 and abs(numpy.median(frame) - brightness) < 100:
            break
    else:
        sys.stderr.write("WARNING: Did not detect blank frame of brightness %i" % brightness)
Example #7
0
def await_blank(brightness):
    for frame, _ in stbt.frames(10):
        grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        min_, max_, _, _ = cv2.minMaxLoc(grayscale)
        contrast = max_ - min_
        if contrast < 100 and abs(numpy.median(frame) - brightness) < 100:
            break
    else:
        sys.stderr.write(
            "WARNING: Did not detect blank frame of brightness %i" % brightness)
Example #8
0
def wait_for_match(image, timeout_secs=10, consecutive_matches=1,
                   match_parameters=None, region=Region.ALL, frames=None):
    """Search for an image in the device-under-test's video stream.

    :param image: The image to search for. See `match`.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. This function will raise `MatchTimeout` if no
        match is found within this time.

    :param int consecutive_matches:
        Forces this function to wait for several consecutive frames with a
        match found at the same x,y position. Increase ``consecutive_matches``
        to avoid false positives due to noise, or to wait for a moving
        selection to stop moving.

    :param match_parameters: See `match`.
    :param region: See `match`.

    :type frames: Iterator[stbt.Frame]
    :param frames: An iterable of video-frames to analyse. Defaults to
        ``stbt.frames()``.

    :returns: `MatchResult` when the image is found.
    :raises: `MatchTimeout` if no match is found after ``timeout_secs`` seconds.
    """
    if match_parameters is None:
        match_parameters = MatchParameters()

    if frames is None:
        import stbt
        frames = stbt.frames(timeout_secs=timeout_secs)
    else:
        frames = limit_time(frames, timeout_secs)

    match_count = 0
    last_pos = Position(0, 0)
    image = _load_image(image)
    debug("Searching for " + image.friendly_name)
    for frame in frames:
        res = match(image, match_parameters=match_parameters,
                    region=region, frame=frame)
        if res.match and (match_count == 0 or res.position == last_pos):
            match_count += 1
        else:
            match_count = 0
        last_pos = res.position
        if match_count == consecutive_matches:
            debug("Matched " + image.friendly_name)
            return res

    raise MatchTimeout(res.frame, image.friendly_name, timeout_secs)  # pylint:disable=undefined-loop-variable
Example #9
0
def test_measure_latency():
    data = numpy.ndarray(shape=(NUM_SAMPLES), dtype=RECORD)

    for n, (frame, _) in enumerate(stbt.frames(50)):
        if n >= NUM_SAMPLES:
            break
        stbt_receive_time = numpy.array(
            [(frame.time, time.time())], dtype=OWN_TIMESTAMPS)
        timestamps = read_timestamps(frame)
        data[n] = merge_arrays([timestamps, stbt_receive_time], flatten=True,
                               usemask=False)

    # Sometimes we'll lose a row but no biggie
    numpy.savetxt("latency-test.txt", data[:n])
Example #10
0
def measure_channel_change_time(key, mask):
    import google

    # start timer after key press
    stbt.press(key)
    start_time = time.time()

    assert stbt.wait_until(lambda: stbt.is_screen_black(mask=mask)), \
        "Screen never went black"

    for frame, _ in stbt.frames(timeout_secs=30):
        if not stbt.is_screen_black(mask=mask, frame=frame):
            end_time = frame.time
            break
    else:
        assert False, "Channel change didn't complete after 30s"

    google.GoogleSheet().record_measurement(
        start_time, "channel_change", {"duration": end_time - start_time})
Example #11
0
def detect_match(image,
                 timeout_secs=10,
                 match_parameters=None,
                 region=Region.ALL,
                 frames=None):
    """Generator that yields a sequence of one `MatchResult` for each frame
    processed from the device-under-test's video stream.

    :param image: See `match`.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. After this timeout the iterator will be exhausted.
        If ``timeout_secs`` is ``None`` then the iterator will yield frames
        forever. Note that you can stop iterating (for example with ``break``)
        at any time.

    :param match_parameters: See `match`.
    :param region: See `match`.

    :type frames: Iterator[stbt.Frame]
    :param frames: An iterable of video-frames to analyse. Defaults to
        ``stbt.frames()``.
    """
    if frames is None:
        import stbt
        frames = stbt.frames(timeout_secs=timeout_secs)
    else:
        frames = limit_time(frames, timeout_secs)

    template = _load_image(image)

    debug("Searching for " + template.friendly_name)

    for frame in frames:
        result = match(template,
                       frame=frame,
                       match_parameters=match_parameters,
                       region=region)
        draw_on(frame,
                result,
                label="match(%r)" % os.path.basename(template.friendly_name))
        yield result
Example #12
0
def analyse_colours_video(number=None):
    """RGB!"""
    errors_in_a_row = 0
    n = 0
    for frame, _ in stbt.frames():
        if number is not None and n >= number:
            return
        colour_hex = ''
        n = n + 1

        def read_hex(region, frame_=frame):
            return stbt.ocr(frame_,
                            region,
                            stbt.OcrMode.SINGLE_LINE,
                            tesseract_config={
                                'tessedit_char_whitelist': '#0123456789abcdef'
                            },
                            tesseract_user_patterns=['#\n\n\n\n\n\n'
                                                     ]).replace(' ', '')

        # The colour is written above and below the rectangle because we want
        # to be sure that the top of the colour box is from the same frame as
        # the bottom.
        colour_hex = read_hex(stbt.Region(490, 100, 300, 70))
        colour_hex_bottom = read_hex(stbt.Region(490, 550, 300, 70))

        if (len(colour_hex) >= 7 and colour_hex[0] == '#'
                and all(c in string.hexdigits for c in colour_hex[1:7])
                and colour_hex == colour_hex_bottom):
            desired = numpy.array((int(colour_hex[1:3],
                                       16), int(colour_hex[3:5],
                                                16), int(colour_hex[5:7], 16)))
            colour = cv2.mean(frame[240:480, 520:760])
            colour = (colour[2], colour[1], colour[0])
            yield (n, desired, colour)
            errors_in_a_row = 0
        else:
            errors_in_a_row += 1
            if errors_in_a_row > 50:
                raise RuntimeError(
                    "Failed to find hexidecimal colour description")
def analyse_colours_video(number=None):
    """RGB!"""
    errors_in_a_row = 0
    n = 0
    for frame, _ in stbt.frames():
        if number is not None and n >= number:
            return
        colour_hex = ""
        n = n + 1

        def read_hex(region, frame_=frame):
            return stbt.ocr(
                frame_,
                region,
                stbt.OcrMode.SINGLE_LINE,
                tesseract_config={"tessedit_char_whitelist": "#0123456789abcdef"},
                tesseract_user_patterns=["#\n\n\n\n\n\n"],
            ).replace(" ", "")

        # The colour is written above and below the rectangle because we want
        # to be sure that the top of the colour box is from the same frame as
        # the bottom.
        colour_hex = read_hex(stbt.Region(490, 100, 300, 70))
        colour_hex_bottom = read_hex(stbt.Region(490, 550, 300, 70))

        if (
            len(colour_hex) >= 7
            and colour_hex[0] == "#"
            and all(c in string.hexdigits for c in colour_hex[1:7])
            and colour_hex == colour_hex_bottom
        ):
            desired = numpy.array((int(colour_hex[1:3], 16), int(colour_hex[3:5], 16), int(colour_hex[5:7], 16)))
            colour = cv2.mean(frame[240:480, 520:760])
            colour = (colour[2], colour[1], colour[0])
            yield (n, desired, colour)
            errors_in_a_row = 0
        else:
            errors_in_a_row += 1
            if errors_in_a_row > 50:
                raise RuntimeError("Failed to find hexidecimal colour description")
Example #14
0
def detect_match(image,
                 timeout_secs=10,
                 match_parameters=None,
                 region=Region.ALL,
                 frames=None):
    """Generator that yields a sequence of one `MatchResult` for each frame
    processed from the device-under-test's video stream.

    `image` is the image used as the template during matching.  See `stbt.match`
    for more information.

    Returns after `timeout_secs` seconds. (Note that the caller can also choose
    to stop iterating over this function's results at any time.)

    Specify `match_parameters` to customise the image matching algorithm. See
    the documentation for `MatchParameters` for details.

    :type frames: Iterator[stbt.Frame]
    :param frames: An iterable of video-frames to analyse. Defaults to
        ``stbt.frames()``.
    """
    if frames is None:
        import stbt
        frames = stbt.frames(timeout_secs=timeout_secs)
    else:
        frames = limit_time(frames, timeout_secs)

    template = _load_image(image)

    debug("Searching for " + template.friendly_name)

    for frame in frames:
        result = match(template,
                       frame=frame,
                       match_parameters=match_parameters,
                       region=region)
        draw_on(frame,
                result,
                label="match(%r)" % os.path.basename(template.friendly_name))
        yield result
Example #15
0
def detect_motion(timeout_secs=10, noise_threshold=None, mask=None,
                  region=Region.ALL, frames=None):
    """Generator that yields a sequence of one `MotionResult` for each frame
    processed from the device-under-test's video stream.

    The `MotionResult` indicates whether any motion was detected -- that is,
    any difference between two consecutive frames.

    Use it in a ``for`` loop like this::

        for motionresult in stbt.detect_motion():
            ...

    In most cases you should use `wait_for_motion` instead.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. After this timeout the iterator will be exhausted.
        Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)``
        will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then
        the iterator will yield frames forever. Note that you can stop
        iterating (for example with ``break``) at any time.

    :param float noise_threshold:
        The amount of noise to ignore. This is only useful with noisy analogue
        video sources. Valid values range from 0 (all differences are
        considered noise; a value of 0 will never report motion) to 1.0 (any
        difference is considered motion).

        This defaults to 0.84. You can override the global default value by
        setting ``noise_threshold`` in the ``[motion]`` section of
        :ref:`.stbt.conf`.

    :type mask: str or `numpy.ndarray`
    :param mask:
        A black & white image that specifies which part of the image to search
        for motion. White pixels select the area to analyse; black pixels select
        the area to ignore. The mask must be the same size as the video frame.

        This can be a string (a filename that will be resolved as per
        `load_image`) or a single-channel image in OpenCV format.

    :type region: `Region`
    :param region:
        Only analyze the specified region of the video frame.

        If you specify both ``region`` and ``mask``, the mask must be the same
        size as the region.

    :type frames: Iterator[stbt.Frame]
    :param frames: An iterable of video-frames to analyse. Defaults to
        ``stbt.frames()``.

    | Added in v28: The ``region`` parameter.
    | Added in v29: The ``frames`` parameter.
    """
    if frames is None:
        import stbt
        frames = stbt.frames()

    frames = limit_time(frames, timeout_secs)  # pylint: disable=redefined-variable-type

    if noise_threshold is None:
        noise_threshold = get_config(
            'motion', 'noise_threshold', type_=float)

    debug("Searching for motion")

    if mask is None:
        mask = _ImageFromUser(None, None, None)
    else:
        mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
        debug("Using mask %s" % mask.friendly_name)

    frame = next(frames)

    region = Region.intersect(_image_region(frame), region)

    previous_frame_gray = cv2.cvtColor(crop(frame, region),
                                       cv2.COLOR_BGR2GRAY)
    if (mask.image is not None and
            mask.image.shape[:2] != previous_frame_gray.shape[:2]):
        raise ValueError(
            "The dimensions of the mask '%s' %s don't match the "
            "video frame %s" % (
                mask.friendly_name, mask.image.shape,
                previous_frame_gray.shape))

    for frame in frames:
        imglog = ImageLogger("detect_motion", region=region)
        imglog.imwrite("source", frame)
        imglog.set(roi=region, noise_threshold=noise_threshold)

        frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
        imglog.imwrite("gray", frame_gray)
        imglog.imwrite("previous_frame_gray", previous_frame_gray)

        absdiff = cv2.absdiff(frame_gray, previous_frame_gray)
        previous_frame_gray = frame_gray
        imglog.imwrite("absdiff", absdiff)

        if mask.image is not None:
            absdiff = cv2.bitwise_and(absdiff, mask.image)
            imglog.imwrite("mask", mask.image)
            imglog.imwrite("absdiff_masked", absdiff)

        _, thresholded = cv2.threshold(
            absdiff, int((1 - noise_threshold) * 255), 255,
            cv2.THRESH_BINARY)
        eroded = cv2.erode(
            thresholded,
            cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
        imglog.imwrite("absdiff_threshold", thresholded)
        imglog.imwrite("absdiff_threshold_erode", eroded)

        out_region = pixel_bounding_box(eroded)
        if out_region:
            # Undo cv2.erode above:
            out_region = out_region.extend(x=-1, y=-1)
            # Undo crop:
            out_region = out_region.translate(region.x, region.y)

        motion = bool(out_region)

        result = MotionResult(getattr(frame, "time", None), motion,
                              out_region, frame)
        draw_on(frame, result, label="detect_motion()")
        debug("%s found: %s" % (
            "Motion" if motion else "No motion", str(result)))
        _log_motion_image_debug(imglog, result)
        yield result
Example #16
0
def wait_for_motion(
        timeout_secs=10, consecutive_frames=None,
        noise_threshold=None, mask=None, region=Region.ALL, frames=None):
    """Search for motion in the device-under-test's video stream.

    "Motion" is difference in pixel values between two consecutive frames.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. This function will raise `MotionTimeout` if no
        motion is detected within this time.

    :type consecutive_frames: int or str
    :param consecutive_frames:
        Considers the video stream to have motion if there were differences
        between the specified number of consecutive frames. This can be:

        * a positive integer value, or
        * a string in the form "x/y", where "x" is the number of frames with
          motion detected out of a sliding window of "y" frames.

        This defaults to "10/20". You can override the global default value by
        setting ``consecutive_frames`` in the ``[motion]`` section of
        :ref:`.stbt.conf`.

    :param float noise_threshold: See `detect_motion`.

    :param mask: See `detect_motion`.

    :param region: See `detect_motion`.

    :param frames: See `detect_motion`.

    :returns: `MotionResult` when motion is detected. The MotionResult's
        ``time`` and ``frame`` attributes correspond to the first frame in
        which motion was detected.
    :raises: `MotionTimeout` if no motion is detected after ``timeout_secs``
        seconds.

    | Added in v28: The ``region`` parameter.
    | Added in v29: The ``frames`` parameter.
    """
    if frames is None:
        import stbt
        frames = stbt.frames()

    if consecutive_frames is None:
        consecutive_frames = get_config('motion', 'consecutive_frames')

    consecutive_frames = str(consecutive_frames)
    if '/' in consecutive_frames:
        motion_frames = int(consecutive_frames.split('/')[0])
        considered_frames = int(consecutive_frames.split('/')[1])
    else:
        motion_frames = int(consecutive_frames)
        considered_frames = int(consecutive_frames)

    if motion_frames > considered_frames:
        raise ConfigurationError(
            "`motion_frames` exceeds `considered_frames`")

    debug("Waiting for %d out of %d frames with motion" % (
        motion_frames, considered_frames))

    if mask is None:
        mask = _ImageFromUser(None, None, None)
    else:
        mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
        debug("Using mask %s" % mask.friendly_name)

    matches = deque(maxlen=considered_frames)
    motion_count = 0
    last_frame = None
    for res in detect_motion(
            timeout_secs, noise_threshold, mask, region, frames):
        motion_count += bool(res)
        if len(matches) == matches.maxlen:
            motion_count -= bool(matches.popleft())
        matches.append(res)
        if motion_count >= motion_frames:
            debug("Motion detected.")
            # We want to return the first True motion result as this is when
            # the motion actually started.
            for result in matches:
                if result:
                    return result
            assert False, ("Logic error in wait_for_motion: This code "
                           "should never be reached")
        last_frame = res.frame

    raise MotionTimeout(last_frame, mask.friendly_name, timeout_secs)
Example #17
0
def detect_motion(timeout_secs=10,
                  noise_threshold=None,
                  mask=None,
                  region=Region.ALL,
                  frames=None):
    """Generator that yields a sequence of one `MotionResult` for each frame
    processed from the device-under-test's video stream.

    The `MotionResult` indicates whether any motion was detected -- that is,
    any difference between two consecutive frames.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. After this timeout the iterator will be exhausted.
        Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)``
        will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then
        the iterator will yield frames forever. Note that you can stop
        iterating (for example with ``break``) at any time.

    :param float noise_threshold:
        The amount of noise to ignore. This is only useful with noisy analogue
        video sources. Valid values range from 0 (all differences are
        considered noise; a value of 0 will never report motion) to 1.0 (any
        difference is considered motion).

        This defaults to 0.84. You can override the global default value by
        setting ``noise_threshold`` in the ``[motion]`` section of
        :ref:`.stbt.conf`.

    :type mask: str or `numpy.ndarray`
    :param mask:
        A black & white image that specifies which part of the image to search
        for motion. White pixels select the area to analyse; black pixels select
        the area to ignore. The mask must be the same size as the video frame.

        This can be a string (a filename that will be resolved as per
        `load_image`) or a single-channel image in OpenCV format.

    :type region: `Region`
    :param region:
        Only analyze the specified region of the video frame.

        If you specify both ``region`` and ``mask``, the mask must be the same
        size as the region.

    :type frames: Iterator[stbt.Frame]
    :param frames: An iterable of video-frames to analyse. Defaults to
        ``stbt.frames()``.

    | Added in v28: The ``region`` parameter.
    | Added in v29: The ``frames`` parameter.
    """
    if frames is None:
        import stbt
        frames = stbt.frames()

    frames = limit_time(frames, timeout_secs)  # pylint: disable=redefined-variable-type

    if noise_threshold is None:
        noise_threshold = get_config('motion', 'noise_threshold', type_=float)

    debug("Searching for motion")

    if mask is None:
        mask = _ImageFromUser(None, None, None)
    else:
        mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
        debug("Using mask %s" % mask.friendly_name)

    frame = next(frames)

    region = Region.intersect(_image_region(frame), region)

    previous_frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
    if (mask.image is not None
            and mask.image.shape[:2] != previous_frame_gray.shape[:2]):
        raise ValueError(
            "The dimensions of the mask '%s' %s don't match the "
            "video frame %s" %
            (mask.friendly_name, mask.image.shape, previous_frame_gray.shape))

    for frame in frames:
        frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)

        imglog = ImageLogger("detect_motion")
        imglog.imwrite("source", frame_gray)

        absdiff = cv2.absdiff(frame_gray, previous_frame_gray)
        previous_frame_gray = frame_gray
        imglog.imwrite("absdiff", absdiff)

        if mask.image is not None:
            absdiff = cv2.bitwise_and(absdiff, mask.image)
            imglog.imwrite("mask", mask.image)
            imglog.imwrite("absdiff_masked", absdiff)

        _, thresholded = cv2.threshold(absdiff, int(
            (1 - noise_threshold) * 255), 255, cv2.THRESH_BINARY)
        eroded = cv2.erode(
            thresholded, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
        imglog.imwrite("absdiff_threshold", thresholded)
        imglog.imwrite("absdiff_threshold_erode", eroded)

        out_region = _pixel_bounding_box(eroded)
        if out_region:
            # Undo cv2.erode above:
            out_region = out_region.extend(x=-1, y=-1)
            # Undo crop:
            out_region = out_region.translate(region.x, region.y)

        motion = bool(out_region)

        result = MotionResult(getattr(frame, "time", None), motion, out_region,
                              frame)
        draw_on(frame, result, label="detect_motion()")
        debug("%s found: %s" %
              ("Motion" if motion else "No motion", str(result)))
        yield result
Example #18
0
def wait_for_motion(timeout_secs=10,
                    consecutive_frames=None,
                    noise_threshold=None,
                    mask=None,
                    region=Region.ALL,
                    frames=None):
    """Search for motion in the device-under-test's video stream.

    "Motion" is difference in pixel values between two consecutive frames.

    :type timeout_secs: int or float or None
    :param timeout_secs:
        A timeout in seconds. This function will raise `MotionTimeout` if no
        motion is detected within this time.

    :type consecutive_frames: int or str
    :param consecutive_frames:
        Considers the video stream to have motion if there were differences
        between the specified number of consecutive frames. This can be:

        * a positive integer value, or
        * a string in the form "x/y", where "x" is the number of frames with
          motion detected out of a sliding window of "y" frames.

        This defaults to "10/20". You can override the global default value by
        setting ``consecutive_frames`` in the ``[motion]`` section of
        :ref:`.stbt.conf`.

    :param float noise_threshold: See `detect_motion`.

    :param mask: See `detect_motion`.

    :param region: See `detect_motion`.

    :param frames: See `detect_motion`.

    :returns: `MotionResult` when motion is detected. The MotionResult's
        ``time`` and ``frame`` attributes correspond to the first frame in
        which motion was detected.
    :raises: `MotionTimeout` if no motion is detected after ``timeout_secs``
        seconds.

    Added in v28: The ``region`` parameter.
    """
    if frames is None:
        import stbt
        frames = stbt.frames()

    if consecutive_frames is None:
        consecutive_frames = get_config('motion', 'consecutive_frames')

    consecutive_frames = str(consecutive_frames)
    if '/' in consecutive_frames:
        motion_frames = int(consecutive_frames.split('/')[0])
        considered_frames = int(consecutive_frames.split('/')[1])
    else:
        motion_frames = int(consecutive_frames)
        considered_frames = int(consecutive_frames)

    if motion_frames > considered_frames:
        raise ConfigurationError("`motion_frames` exceeds `considered_frames`")

    debug("Waiting for %d out of %d frames with motion" %
          (motion_frames, considered_frames))

    if mask is None:
        mask = _ImageFromUser(None, None, None)
    else:
        mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
        debug("Using mask %s" % mask.friendly_name)

    matches = deque(maxlen=considered_frames)
    motion_count = 0
    last_frame = None
    for res in detect_motion(timeout_secs, noise_threshold, mask, region,
                             frames):
        motion_count += bool(res)
        if len(matches) == matches.maxlen:
            motion_count -= bool(matches.popleft())
        matches.append(res)
        if motion_count >= motion_frames:
            debug("Motion detected.")
            # We want to return the first True motion result as this is when
            # the motion actually started.
            for result in matches:
                if result:
                    return result
            assert False, ("Logic error in wait_for_motion: This code "
                           "should never be reached")
        last_frame = res.frame

    raise MotionTimeout(last_frame, mask.friendly_name, timeout_secs)