Beispiel #1
0
def _find_batteries(im):
    blue = extract_color(im, 216 / 2, (100, 255), (50, 150))
    green = extract_color(im, 105 / 2, (200, 255), (200, 255))
    color = blue + green
    structuring_element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))
    # structuring_element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    color = cv2.morphologyEx(color, cv2.MORPH_CLOSE, structuring_element1)
    # color = cv2.morphologyEx(color, cv2.MORPH_OPEN, structuring_element2)

    # color = scale(color, 0.25)
    # im = scale(im, 0.25)
    # show(color)

    contours, _ = cv2.findContours(color.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # show(get_drawn_contours(color, contours, True))
    contours = [contour_bounding_box_for_contour(c) for c in contours]
    height, width = im.shape[:2]

    def is_large_enough(contour):
        x, y, w, h = cv2.boundingRect(contour)
        return w >= width * 0.1 and h >= height * 0.1

    contours = [c for c in contours if is_large_enough(c)]

    return [four_point_transform(im, c) for c in contours]
def get_connections(im):
    """
    Returns a list of tuples, each with (color, destination, (to_click_x, to_click_y)).
    The list is ordered from top to bottom.
    """
    blue = extract_color(im, 115, (100, 255), (0, 255))
    red = extract_color(im, 5, (100, 255), (0, 255))
    black = extract_color(im, (0, 255), (0, 255), (0, 5))
    colors = (
        (Color.BLUE, blue),
        (Color.BLACK, black),
        (Color.RED, red),
    )

    mask = blue.copy()
    height, width = mask.shape

    rows = (0.33, 0.48, 0.62)
    cols = (0.30, 0.55)
    row_pxs = [int(row_percent * height) for row_percent in rows]
    start_col_px, end_col_px = [int(col_percent * width) for col_percent in cols]

    def _get_output_for_connection(color, start_row_idx, end_row_idx):
        """Returns a tuple with (color, destination, (to_click_x, to_click_y))"""
        # Use the start_row to find a point to click to cut the wire.
        to_click = (start_col_px, row_pxs[start_row_idx])
        # Figure out which letter we're connecting to based on the end row.
        destination = ORDERED_DESTINATIONS[end_row_idx]
        return color, destination, to_click

    output = []
    for start_row, start_row_px in enumerate(row_pxs):
        current_output = None
        for end_row, end_row_px in enumerate(row_pxs):
            # Reset the mask
            mask[:] = 0
            # Draw a line where the connection should be on the mask
            cv2.line(mask, (start_col_px, start_row_px), (end_col_px, end_row_px), 255, 15)
            # Convert the mask to be usable in a masked_array
            bool_mask = np.invert(mask.astype(bool))
            # We only want to consider it a wire there if there's at least 75% of the mask filled in
            activation_threshold = bool_mask.sum() * .66

            for color, color_mat in colors:
                activated_amount = np.ma.masked_array(color_mat, mask=bool_mask).sum()
                if activated_amount > activation_threshold:
                    new_output = _get_output_for_connection(color, start_row, end_row)
                    new_output_and_activated = (new_output, activated_amount)
                    if current_output is None:
                        current_output = new_output_and_activated
                    else:
                        current_output = sorted((current_output, new_output_and_activated), key=lambda x: x[1])[-1]
                # combined = np.zeros_like(im)
                # combined[:, :, 0] = color_mat
                # combined[:, :, 2] = mask
                # show(combined)
        if current_output is not None:
            print current_output
            output.append(current_output[0])
    return output
Beispiel #3
0
def _get_button_images(im):
    im_mono = extract_color(im, (0, 180), (0, 120), (0, 120))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = [cv2.approxPolyDP(c, 6, True) for c in contours]
    midpoint_y = im.shape[1] / 2
    contours = [c for c in contours if len(c) == 4 and cv2.boundingRect(c)[1] > midpoint_y and cv2.isContourConvex(c)]
    button_box = sorted(contours, key=cv2.contourArea)[-1]
    button_box = button_box.reshape((4, 2))
    button_im = four_point_transform(im, button_box)

    button_im_mono = extract_color(button_im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(button_im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # There's 4 buttons, which will be the largest 4 contours
    contours = sorted(contours, key=cv2.contourArea)[-4:]
    contour_rects = [cv2.boundingRect(c) for c in contours]
    # Sort from left to right
    contour_rects = sorted(contour_rects, key=lambda rect: rect[0])

    buttons = []
    for x, y, w, h in contour_rects:
        button = four_point_transform(button_im, np.array(((x, y), (x + w, y), (x, y + h), (x + w, y + h))), -6)
        button = extract_color(button, 18, (50, 100), (175, 255))
        buttons.append(button)

    return buttons
Beispiel #4
0
def _get_button_images(im):
    im_mono = extract_color(im, (0, 180), (0, 120), (0, 120))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    contours = [cv2.approxPolyDP(c, 6, True) for c in contours]
    midpoint_y = im.shape[1] / 2
    contours = [
        c for c in contours if len(c) == 4
        and cv2.boundingRect(c)[1] > midpoint_y and cv2.isContourConvex(c)
    ]
    button_box = sorted(contours, key=cv2.contourArea)[-1]
    button_box = button_box.reshape((4, 2))
    button_im = four_point_transform(im, button_box)

    button_im_mono = extract_color(button_im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(button_im_mono, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    # There's 4 buttons, which will be the largest 4 contours
    contours = sorted(contours, key=cv2.contourArea)[-4:]
    contour_rects = [cv2.boundingRect(c) for c in contours]
    # Sort from left to right
    contour_rects = sorted(contour_rects, key=lambda rect: rect[0])

    buttons = []
    for x, y, w, h in contour_rects:
        button = four_point_transform(
            button_im,
            np.array(((x, y), (x + w, y), (x, y + h), (x + w, y + h))), -6)
        button = extract_color(button, 18, (50, 100), (175, 255))
        buttons.append(button)

    return buttons
Beispiel #5
0
def _get_button_text(im, tesseract):
    im = get_subset(im, _TEXT_X_PERCENTS, _TEXT_Y_PERCENTS)
    black_text = extract_color(im, (0, 255), (0, 255), (0, 65))
    white_text = extract_color(im, (0, 255), (0, 40), (230, 255))
    if black_text.any():
        text_image = black_text
    else:
        assert white_text.any(), "Neither black nor white text have any pixels."
        text_image = white_text

    tesseract.SetImage(Image.fromarray(text_image))
    word = tesseract.GetUTF8Text().strip()
    # It messes up a lot, just take the one with the closest edit distance
    return sorted(ButtonLabel, key=lambda s: editdistance.eval(s.value, word))[0]
Beispiel #6
0
def _get_button_text(im, tesseract):
    im = get_subset(im, _TEXT_X_PERCENTS, _TEXT_Y_PERCENTS)
    black_text = extract_color(im, (0, 255), (0, 255), (0, 65))
    white_text = extract_color(im, (0, 255), (0, 40), (230, 255))
    if black_text.any():
        text_image = black_text
    else:
        assert white_text.any(
        ), "Neither black nor white text have any pixels."
        text_image = white_text

    tesseract.SetImage(Image.fromarray(text_image))
    word = tesseract.GetUTF8Text().strip()
    # It messes up a lot, just take the one with the closest edit distance
    return sorted(ButtonLabel,
                  key=lambda s: editdistance.eval(s.value, word))[0]
Beispiel #7
0
def _get_screen_image(im):
    im_mono = extract_color(im, 76, (50, 150), (50, 150))
    # show(im_mono)
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)

    midpoint_y = im.shape[1] * 2 / 3
    contours_filtered = []
    for contour in contours:
        # contour = cv2.approxPolyDP(contour, 2, True)
        x, y, w, h = cv2.boundingRect(contour)
        # if y + h < midpoint_y and cv2.isContourConvex(contour):
        if y + h < midpoint_y:
            contours_filtered.append(contour)

    contours = sorted(contours_filtered, key=cv2.contourArea)[-4:]
    boxes = [cv2.boundingRect(c) for c in contours]
    x1 = min(x for x, y, w, h in boxes)
    y1 = min(y for x, y, w, h in boxes)
    x2 = max(x + w for x, y, w, h in boxes)
    y2 = max(y + h for x, y, w, h in boxes)
    points = np.array(((x1, y1), (x1, y2), (x2, y1), (x2, y2)))
    screen = four_point_transform(im, points)
    screen_mono = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
    _, screen_mono = cv2.threshold(screen_mono, 245, 255, cv2.THRESH_BINARY)

    return screen_mono
Beispiel #8
0
def get_button_locations(im):
    mono = extract_color(im, 120, (0, 100), (0, 100))

    contours, hierarchy = cv2.findContours(mono, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = [cv2.approxPolyDP(contour, 6, True) for contour in contours]

    height, width = im.shape[:2]

    def is_in_middle_quarter_vertically_or_horizontally(contour_to_check):
        x, y, w, h = cv2.boundingRect(contour_to_check)
        return (y > height / 4 and y + h < height * 3 / 4) or \
               (x > width / 4 and x + w < width * 3 / 4)

    contours = [c for c in contours if is_in_middle_quarter_vertically_or_horizontally(c)]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:4]

    assert len(contours) == 4, "Expected to find 4 buttons, found %s" % len(contours)
    centers = []
    for c in contours:
        x, y, w, h = cv2.boundingRect(c)
        centers.append((x + (w / 2), y + (h / 2)))
    x_sort = sorted(centers, key=lambda center: center[0])
    y_sort = sorted(centers, key=lambda center: center[1])
    top = y_sort[0]
    bottom = y_sort[-1]
    left = x_sort[0]
    right = x_sort[-1]

    assert top != bottom != left != right, "Expected each point to be different"

    return top, right, bottom, left
Beispiel #9
0
def _get_screen_image(im):
    im_mono = extract_color(im, 76, (50, 150), (50, 150))
    # show(im_mono)
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    midpoint_y = im.shape[1] * 2 / 3
    contours_filtered = []
    for contour in contours:
        # contour = cv2.approxPolyDP(contour, 2, True)
        x, y, w, h = cv2.boundingRect(contour)
        # if y + h < midpoint_y and cv2.isContourConvex(contour):
        if y + h < midpoint_y:
            contours_filtered.append(contour)

    contours = sorted(contours_filtered, key=cv2.contourArea)[-4:]
    boxes = [cv2.boundingRect(c) for c in contours]
    x1 = min(x for x, y, w, h in boxes)
    y1 = min(y for x, y, w, h in boxes)
    x2 = max(x + w for x, y, w, h in boxes)
    y2 = max(y + h for x, y, w, h in boxes)
    points = np.array(((x1, y1), (x1, y2), (x2, y1), (x2, y2)))
    screen = four_point_transform(im, points)
    screen_mono = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
    _, screen_mono = cv2.threshold(screen_mono, 245, 255, cv2.THRESH_BINARY)

    return screen_mono
def _get_leds_are_lit(im):
    leds_are_lit = []
    for i in range(len(_TOP_X_BOUNDARIES) - 1):
        led = get_subset(im, _TOP_X_BOUNDARIES[i:i + 2], _LED_Y_BOUNDARIES)
        lit_led = extract_color(led, 51 / 2, (40, 90), (220, 255))
        leds_are_lit.append(lit_led.any())
        # show(lit_led)
    return leds_are_lit
Beispiel #11
0
def _get_wire_positions(im,
                        wire_color,
                        hue,
                        saturation=(150, 255),
                        value=(100, 255)):
    color = extract_color(im, hue, saturation, value)
    contours = get_contours(color, close_and_open=False)
    return [(get_center_for_contour(c), wire_color) for c in contours]
Beispiel #12
0
def _get_leds_are_lit(im):
    leds_are_lit = []
    for i in range(len(_TOP_X_BOUNDARIES) - 1):
        led = get_subset(im, _TOP_X_BOUNDARIES[i:i + 2], _LED_Y_BOUNDARIES)
        lit_led = extract_color(led, 51 / 2, (40, 90), (220, 255))
        leds_are_lit.append(lit_led.any())
        # show(lit_led)
    return leds_are_lit
Beispiel #13
0
def _get_button_locations(im):
    im_mono = extract_color(im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # There's two buttons, which will be the largest two contours
    contours = sorted(contours, key=cv2.contourArea)[-4:]
    centers = [get_center_for_contour(c) for c in contours]
    # Sort them from left to right
    centers = sorted(centers, key=lambda center: center[0])
    return centers
def get_down_button(im):
    im_mono = extract_color(im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # There's two buttons, which will be the largest two contours
    contours = sorted(contours, key=cv2.contourArea)[-2:]
    centers = [get_center_for_contour(c) for c in contours]
    # We want the lower one on screen, which is the larger y value
    centers = sorted(centers, key=lambda center: center[1])
    return centers[-1]
Beispiel #15
0
def _get_button_locations(im):
    im_mono = extract_color(im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    # There's two buttons, which will be the largest two contours
    contours = sorted(contours, key=cv2.contourArea)[-4:]
    centers = [get_center_for_contour(c) for c in contours]
    # Sort them from left to right
    centers = sorted(centers, key=lambda center: center[0])
    return centers
Beispiel #16
0
def get_strip_color(im):
    im = get_subset(im, _STRIP_X_PERCENTS, _STRIP_Y_PERCENTS)
    colors = [color for mat, color in (
        (extract_color(im, (0, 180), (0, 5), (190, 255)), StripColor.WHITE),
        (extract_color_2(im, 50, 94, 84), StripColor.YELLOW),
        (extract_color_2(im, 0, 82, 76), StripColor.RED),
        (extract_color_2(im, 218, 85, 79), StripColor.BLUE),
    ) if mat.any()]
    assert len(colors) == 1, "Strip does not look like one color"
    return colors[0]
Beispiel #17
0
def get_down_button(im):
    im_mono = extract_color(im, 18, (50, 100), (175, 255))
    contours, hierarchy = cv2.findContours(im_mono, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    # There's two buttons, which will be the largest two contours
    contours = sorted(contours, key=cv2.contourArea)[-2:]
    centers = [get_center_for_contour(c) for c in contours]
    # We want the lower one on screen, which is the larger y value
    centers = sorted(centers, key=lambda center: center[1])
    return centers[-1]
Beispiel #18
0
def _extract_side(im, is_bottom):
    if is_bottom:
        color = extract_color(im, 32 / 2, (120, 255), (100, 220))
    else:
        color = extract_color(im, 32 / 2, (100, 255), (100, 255))

    structuring_element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
    structuring_element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    color = cv2.morphologyEx(color, cv2.MORPH_CLOSE, structuring_element1)
    color = cv2.morphologyEx(color, cv2.MORPH_OPEN, structuring_element2)

    # show(im)
    # show(color)

    height, width = color.shape[:2]
    contours, _ = cv2.findContours(color.copy(), cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)
    # contours = [cv2.approxPolyDP(c, 0.03 * cv2.arcLength(c, True), True) for c in contours]
    contours = [
        cv2.approxPolyDP(c, 0.015 * cv2.arcLength(c, True), True)
        for c in contours
    ]
    # show(get_drawn_contours(c2,  contours, True))
    # for c in contours:
    #     print len(c)
    #     show(get_drawn_contours(c2, [c], True))
    contours = [c for c in contours if len(c) == 4]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
    a, b = contours
    points = list(a) + list(b)

    tl = point_closest_to(points, 0, 0)
    tr = point_closest_to(points, width, 0)
    bl = point_closest_to(points, 0, height)
    br = point_closest_to(points, width, height)

    contour = np.array([tl, tr, br, bl])
    contour = contour.reshape((4, 2))

    # show(get_drawn_contours(c2, [contour], True))

    return four_point_transform(im, contour, margin_percent=5)
def _get_has_stars(im):
    has_stars = []
    for i in range(len(_BOTTOM_X_BOUNDARIES) - 1):
        star = get_subset(im, _BOTTOM_X_BOUNDARIES[i:i + 2], _STAR_Y_BOUNDARIES)
        has_star = extract_color(star, 33 / 2, (75, 125), (0, 70))
        # show(has_star)
        w, h = get_dimens(star)
        star_ratio = float(cv2.countNonZero(has_star)) / (w * h)
        # print star_ratio
        has_stars.append(star_ratio > _STAR_RATIO_THRESHOLD)
    return has_stars
Beispiel #20
0
def get_clock_time_from_full_screenshot(full_screenshot,
                                        current_module_position,
                                        screenshot_helper):
    clock_im = _get_clock_image_from_full_screenshot(full_screenshot,
                                                     current_module_position,
                                                     screenshot_helper)
    assert clock_im is not None, "Unable to find clock"
    clock_bg = extract_color(clock_im, (0, 180), (0, 255), (0, 50))

    contours = [
        c for c in simplify_contours(get_contours(clock_bg), 0.001)
        if len(c) == 4
    ]
    contour = max(contours, key=cv2.contourArea)

    display = four_point_transform(clock_im, contour)

    digits_im = extract_color(display, 0, (250, 255), (250, 255))

    digit_images = [
        get_subset(digits_im,
                   _CLOCK_DIGIT_X_PERCENTS[digit_index:digit_index + 2],
                   (0, 100))
        for digit_index in range(len(_CLOCK_DIGIT_X_PERCENTS) - 1)
    ]
    # Determine if there is a colon or period separator.
    is_colon = get_subset(digit_images[2], (0, 100), (0, 50)).any()
    if is_colon:
        minutes_images = digit_images[:2]
        seconds_images = digit_images[-2:]
    else:
        minutes_images = []
        seconds_images = digit_images[:2]

    minutes_digits = [_get_digit_from_image(im) for im in minutes_images]
    seconds_digits = [_get_digit_from_image(im) for im in seconds_images]

    if not minutes_digits:
        minutes_digits = [0]

    return minutes_digits, seconds_digits
Beispiel #21
0
def _get_has_stars(im):
    has_stars = []
    for i in range(len(_BOTTOM_X_BOUNDARIES) - 1):
        star = get_subset(im, _BOTTOM_X_BOUNDARIES[i:i + 2],
                          _STAR_Y_BOUNDARIES)
        has_star = extract_color(star, 33 / 2, (75, 125), (0, 70))
        # show(has_star)
        w, h = get_dimens(star)
        star_ratio = float(cv2.countNonZero(has_star)) / (w * h)
        # print star_ratio
        has_stars.append(star_ratio > _STAR_RATIO_THRESHOLD)
    return has_stars
Beispiel #22
0
def get_strip_color(im):
    im = get_subset(im, _STRIP_X_PERCENTS, _STRIP_Y_PERCENTS)
    colors = [
        color for mat, color in (
            (extract_color(im, (0, 180), (0, 5), (190, 255)),
             StripColor.WHITE),
            (extract_color_2(im, 50, 94, 84), StripColor.YELLOW),
            (extract_color_2(im, 0, 82, 76), StripColor.RED),
            (extract_color_2(im, 218, 85, 79), StripColor.BLUE),
        ) if mat.any()
    ]
    assert len(colors) == 1, "Strip does not look like one color"
    return colors[0]
Beispiel #23
0
def _extract_side(im, is_bottom):
    if is_bottom:
        color = extract_color(im, 32 / 2, (120, 255), (100, 220))
    else:
        color = extract_color(im, 32 / 2, (100, 255), (100, 255))

    structuring_element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
    structuring_element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    color = cv2.morphologyEx(color, cv2.MORPH_CLOSE, structuring_element1)
    color = cv2.morphologyEx(color, cv2.MORPH_OPEN, structuring_element2)

    # show(im)
    # show(color)

    height, width = color.shape[:2]
    contours, _ = cv2.findContours(color.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # contours = [cv2.approxPolyDP(c, 0.03 * cv2.arcLength(c, True), True) for c in contours]
    contours = [cv2.approxPolyDP(c, 0.015 * cv2.arcLength(c, True), True) for c in contours]
    # show(get_drawn_contours(c2,  contours, True))
    # for c in contours:
    #     print len(c)
    #     show(get_drawn_contours(c2, [c], True))
    contours = [c for c in contours if len(c) == 4]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
    a, b = contours
    points = list(a) + list(b)

    tl = point_closest_to(points, 0, 0)
    tr = point_closest_to(points, width, 0)
    bl = point_closest_to(points, 0, height)
    br = point_closest_to(points, width, height)

    contour = np.array([tl, tr, br, bl])
    contour = contour.reshape((4, 2))

    # show(get_drawn_contours(c2, [contour], True))

    return four_point_transform(im, contour, margin_percent=5)
def get_has_parallel_port_for_side(side):
    side_w, side_h = get_dimens(side)
    if side_h > side_w:
        side = cv2.transpose(side)
        side_w, side_h = get_dimens(side)
    color = extract_color(side, 337 / 2, (75, 175), (175, 255))
    contours = sorted(get_contours(color), key=cv2.contourArea)
    if not contours:
        return False
    contour = contours[-1]
    x, y, w, h = cv2.boundingRect(contour)
    width_percent = float(w) / side_w
    height_percent = float(h) / side_h
    return width_percent > _WIDTH_THRESHOLD and height_percent > _HEIGHT_THRESHOLD
Beispiel #25
0
def get_has_parallel_port_for_side(side):
    side_w, side_h = get_dimens(side)
    if side_h > side_w:
        side = cv2.transpose(side)
        side_w, side_h = get_dimens(side)
    color = extract_color(side, 337 / 2, (75, 175), (175, 255))
    contours = sorted(get_contours(color), key=cv2.contourArea)
    if not contours:
        return False
    contour = contours[-1]
    x, y, w, h = cv2.boundingRect(contour)
    width_percent = float(w) / side_w
    height_percent = float(h) / side_h
    return width_percent > _WIDTH_THRESHOLD and height_percent > _HEIGHT_THRESHOLD
Beispiel #26
0
def _get_count_for_subsection(battery):
    b_height, b_width = battery.shape[:2]
    if b_height > b_width:
        b_height, b_width = b_width, b_height
        # Rotate battery to be left-right oriented
        battery = cv2.transpose(battery)

    blue = extract_color(battery, 216 / 2, (100, 255), (50, 150))

    cropped = blue[b_height/3:2*b_height/3, b_width/3:2*b_width/3]

    if cropped.any():
        return 2
    else:
        return 1
Beispiel #27
0
def get_clock_time_from_full_screenshot(full_screenshot,
                                        current_module_position,
                                        screenshot_helper):
    clock_im = _get_clock_image_from_full_screenshot(full_screenshot,
                                                     current_module_position,
                                                     screenshot_helper)
    assert clock_im is not None, "Unable to find clock"
    clock_bg = extract_color(clock_im, (0, 180), (0, 255), (0, 50))

    contours = [c for c in simplify_contours(get_contours(clock_bg), 0.001) if len(c) == 4]
    contour = max(contours, key=cv2.contourArea)

    display = four_point_transform(clock_im, contour)

    digits_im = extract_color(display, 0, (250, 255), (250, 255))

    digit_images = [
        get_subset(digits_im, _CLOCK_DIGIT_X_PERCENTS[digit_index:digit_index + 2], (0, 100))
        for digit_index in range(len(_CLOCK_DIGIT_X_PERCENTS) - 1)
    ]
    # Determine if there is a colon or period separator.
    is_colon = get_subset(digit_images[2], (0, 100), (0, 50)).any()
    if is_colon:
        minutes_images = digit_images[:2]
        seconds_images = digit_images[-2:]
    else:
        minutes_images = []
        seconds_images = digit_images[:2]

    minutes_digits = [_get_digit_from_image(im) for im in minutes_images]
    seconds_digits = [_get_digit_from_image(im) for im in seconds_images]

    if not minutes_digits:
        minutes_digits = [0]

    return minutes_digits, seconds_digits
Beispiel #28
0
def _get_largest_contour_area_ratio_for_color(im, color):
    hue_out_of_360 = {
        LitSquare.YELLOW: 60,
        LitSquare.BLUE: 193,
        LitSquare.RED: 19,
        LitSquare.GREEN: 149,
    }[color]

    color_mat = extract_color(im, hue_out_of_360 / 2, (100, 255), (225, 255))
    contour_areas = [cv2.contourArea(c) for c in get_contours(color_mat)]
    if contour_areas:
        largest_area = sorted(contour_areas)[-1]
    else:
        largest_area = 0

    w, h = get_dimens(im)
    im_area = w * h
    area_ratio = float(largest_area) / im_area
    return area_ratio, color
def _get_largest_contour_area_ratio_for_color(im, color):
    hue_out_of_360 = {
        LitSquare.YELLOW: 60,
        LitSquare.BLUE: 193,
        LitSquare.RED: 19,
        LitSquare.GREEN: 149,
    }[color]

    color_mat = extract_color(im, hue_out_of_360/2, (100, 255), (225, 255))
    contour_areas = [cv2.contourArea(c) for c in get_contours(color_mat)]
    if contour_areas:
        largest_area = sorted(contour_areas)[-1]
    else:
        largest_area = 0

    w, h = get_dimens(im)
    im_area = w * h
    area_ratio = float(largest_area) / im_area
    return area_ratio, color
Beispiel #30
0
def find_arrows(im):
    mono = extract_color(im, 120, (50, 100), (50, 100))

    contours, hierarchy = cv2.findContours(mono, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = [cv2.approxPolyDP(contour, 6, True) for contour in contours]

    one_quarter_height = im.shape[0] / 4
    three_quarter_height = im.shape[0] * 3 / 4

    def is_in_middle_quarter_vertically(contour_to_check):
        x, y, w, h = cv2.boundingRect(contour_to_check)
        return y > one_quarter_height and y + h < three_quarter_height

    contours = [c for c in contours if cv2.isContourConvex(c) and is_in_middle_quarter_vertically(c)]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]

    centers = [get_center_for_contour(c) for c in contours]
    centers = sorted(centers, key=lambda center: center[0])

    return centers
Beispiel #31
0
def get_button_locations(im):
    mono = extract_color(im, 120, (0, 100), (0, 100))

    contours, hierarchy = cv2.findContours(mono, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)
    contours = [cv2.approxPolyDP(contour, 6, True) for contour in contours]

    height, width = im.shape[:2]

    def is_in_middle_quarter_vertically_or_horizontally(contour_to_check):
        x, y, w, h = cv2.boundingRect(contour_to_check)
        return (y > height / 4 and y + h < height * 3 / 4) or \
               (x > width / 4 and x + w < width * 3 / 4)

    contours = [
        c for c in contours
        if is_in_middle_quarter_vertically_or_horizontally(c)
    ]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:4]

    assert len(
        contours) == 4, "Expected to find 4 buttons, found %s" % len(contours)
    centers = []
    for c in contours:
        x, y, w, h = cv2.boundingRect(c)
        centers.append((x + (w / 2), y + (h / 2)))
    x_sort = sorted(centers, key=lambda center: center[0])
    y_sort = sorted(centers, key=lambda center: center[1])
    top = y_sort[0]
    bottom = y_sort[-1]
    left = x_sort[0]
    right = x_sort[-1]

    assert top != bottom != left != right, "Expected each point to be different"

    return top, right, bottom, left
Beispiel #32
0
def _get_indicator_images_and_light_statuses(im):
    red = extract_color(im, 0, (50, 200), (50, 200))
    # show(red)
    w_total, h_total = get_dimens(im)
    w_threshold = int(_INDICATOR_WIDTH_PERCENT_THRESHOLD * w_total)
    h_threshold = int(_INDICATOR_HEIGHT_PERCENT_THRESHOLD * h_total)

    def is_indicator_big_enough(contour):
        _, _, contour_w, contour_h = cv2.boundingRect(contour)
        return contour_w > w_threshold and contour_h > h_threshold

    contours = [
        contour_bounding_box_for_contour(c) for c in get_contours(red)
        if is_indicator_big_enough(c)
    ]
    indicators = [four_point_transform(im, c) for c in contours]
    indicators_and_lights = []
    for indicator in indicators:
        w, h = get_dimens(indicator)
        if w < h:
            # Rotate 90 degrees so it's horizontal
            indicator = rotate_image_clockwise(indicator)
            w, h = get_dimens(indicator)

        # Check if light is on left or right, flip accordingly
        light_width_threshold = w * _LIGHT_WIDTH_THRESHOLD
        light_height_threshold = h * _LIGHT_HEIGHT_THRESHOLD
        light_on = extract_color(indicator, (0, 180), (0, 0), (255, 255))
        light_off = extract_color(indicator, (0, 180), (0, 40), (0, 50))

        # show(light_on)
        # show(light_off)

        def is_light_big_enough(contour):
            _, _, contour_w, contour_h = cv2.boundingRect(contour)
            return contour_w > light_width_threshold and contour_h > light_height_threshold

        light_on_contours = [
            contour_bounding_box_for_contour(c) for c in get_contours(light_on)
            if is_light_big_enough(c)
        ]
        light_off_contours = [
            contour_bounding_box_for_contour(c)
            for c in get_contours(light_off) if is_light_big_enough(c)
        ]
        assert len(light_on_contours) + len(light_off_contours) == 1, \
            "Expected to find exactly one light on the indicator"

        if light_on_contours:
            light_is_on = True
            light_contour = light_on_contours[0]
        else:
            light_is_on = False
            light_contour = light_off_contours[0]

        light_x, _ = get_center_for_contour(light_contour)
        if light_x > (w / 2.0):
            # Light is on the wrong side, need to flip 180
            indicator = rotate_image_180(indicator)

        indicators_and_lights.append((indicator, light_is_on))
    return indicators_and_lights
Beispiel #33
0
def get_connections(im):
    """
    Returns a list of tuples, each with (color, destination, (to_click_x, to_click_y)).
    The list is ordered from top to bottom.
    """
    blue = extract_color(im, 115, (100, 255), (0, 255))
    red = extract_color(im, 5, (100, 255), (0, 255))
    black = extract_color(im, (0, 255), (0, 255), (0, 5))
    colors = (
        (Color.BLUE, blue),
        (Color.BLACK, black),
        (Color.RED, red),
    )

    mask = blue.copy()
    height, width = mask.shape

    rows = (0.33, 0.48, 0.62)
    cols = (0.30, 0.55)
    row_pxs = [int(row_percent * height) for row_percent in rows]
    start_col_px, end_col_px = [
        int(col_percent * width) for col_percent in cols
    ]

    def _get_output_for_connection(color, start_row_idx, end_row_idx):
        """Returns a tuple with (color, destination, (to_click_x, to_click_y))"""
        # Use the start_row to find a point to click to cut the wire.
        to_click = (start_col_px, row_pxs[start_row_idx])
        # Figure out which letter we're connecting to based on the end row.
        destination = ORDERED_DESTINATIONS[end_row_idx]
        return color, destination, to_click

    output = []
    for start_row, start_row_px in enumerate(row_pxs):
        current_output = None
        for end_row, end_row_px in enumerate(row_pxs):
            # Reset the mask
            mask[:] = 0
            # Draw a line where the connection should be on the mask
            cv2.line(mask, (start_col_px, start_row_px),
                     (end_col_px, end_row_px), 255, 15)
            # Convert the mask to be usable in a masked_array
            bool_mask = np.invert(mask.astype(bool))
            # We only want to consider it a wire there if there's at least 75% of the mask filled in
            activation_threshold = bool_mask.sum() * .66

            for color, color_mat in colors:
                activated_amount = np.ma.masked_array(color_mat,
                                                      mask=bool_mask).sum()
                if activated_amount > activation_threshold:
                    new_output = _get_output_for_connection(
                        color, start_row, end_row)
                    new_output_and_activated = (new_output, activated_amount)
                    if current_output is None:
                        current_output = new_output_and_activated
                    else:
                        current_output = sorted(
                            (current_output, new_output_and_activated),
                            key=lambda x: x[1])[-1]
                # combined = np.zeros_like(im)
                # combined[:, :, 0] = color_mat
                # combined[:, :, 2] = mask
                # show(combined)
        if current_output is not None:
            print current_output
            output.append(current_output[0])
    return output
def _get_wire_color_and_mat_or_none(wire, hue, saturation, value, color):
    mat = extract_color(wire, hue, saturation, value)
    if mat.any():
        return color, mat
    else:
        return None
Beispiel #35
0
def _get_indicator_text(indicator, tesseract):
    indicator = get_subset(indicator, (40, 100), (0, 100))
    color = extract_color(indicator, 47/2, (0, 50), (190, 240))
    tesseract.SetImage(Image.fromarray(color))
    return tesseract.GetUTF8Text().strip()
Beispiel #36
0
def _get_wire_color_and_mat_or_none(wire, hue, saturation, value, color):
    mat = extract_color(wire, hue, saturation, value)
    if mat.any():
        return color, mat
    else:
        return None
Beispiel #37
0
def _get_indicator_images_and_light_statuses(im):
    red = extract_color(im, 0, (50, 200), (50, 200))
    # show(red)
    w_total, h_total = get_dimens(im)
    w_threshold = int(_INDICATOR_WIDTH_PERCENT_THRESHOLD * w_total)
    h_threshold = int(_INDICATOR_HEIGHT_PERCENT_THRESHOLD * h_total)

    def is_indicator_big_enough(contour):
        _, _, contour_w, contour_h = cv2.boundingRect(contour)
        return contour_w > w_threshold and contour_h > h_threshold

    contours = [
        contour_bounding_box_for_contour(c) for c in get_contours(red)
        if is_indicator_big_enough(c)
    ]
    indicators = [four_point_transform(im, c) for c in contours]
    indicators_and_lights = []
    for indicator in indicators:
        w, h = get_dimens(indicator)
        if w < h:
            # Rotate 90 degrees so it's horizontal
            indicator = rotate_image_clockwise(indicator)
            w, h = get_dimens(indicator)

        # Check if light is on left or right, flip accordingly
        light_width_threshold = w * _LIGHT_WIDTH_THRESHOLD
        light_height_threshold = h * _LIGHT_HEIGHT_THRESHOLD
        light_on = extract_color(indicator, (0, 180), (0, 0), (255, 255))
        light_off = extract_color(indicator, (0, 180), (0, 40), (0, 50))

        # show(light_on)
        # show(light_off)

        def is_light_big_enough(contour):
            _, _, contour_w, contour_h = cv2.boundingRect(contour)
            return contour_w > light_width_threshold and contour_h > light_height_threshold

        light_on_contours = [
            contour_bounding_box_for_contour(c) for c in get_contours(light_on)
            if is_light_big_enough(c)
        ]
        light_off_contours = [
            contour_bounding_box_for_contour(c) for c in get_contours(light_off)
            if is_light_big_enough(c)
        ]
        assert len(light_on_contours) + len(light_off_contours) == 1, \
            "Expected to find exactly one light on the indicator"

        if light_on_contours:
            light_is_on = True
            light_contour = light_on_contours[0]
        else:
            light_is_on = False
            light_contour = light_off_contours[0]

        light_x, _ = get_center_for_contour(light_contour)
        if light_x > (w / 2.0):
            # Light is on the wrong side, need to flip 180
            indicator = rotate_image_180(indicator)

        indicators_and_lights.append((indicator, light_is_on))
    return indicators_and_lights
def get_is_done(im):
    im = get_subset(im, (80, 95), (5, 20))
    color = extract_color(im, 128/2, (225, 255), (150, 255))
    return cv2.countNonZero(color) != 0
def _get_cleaned_up_text_subsection(im):
    # type: (np.array) -> Optional[np.array]
    red1 = extract_color(im, (0, 6), (200, 255), (100, 150))
    red2 = extract_color(im, (176, 180), (200, 255), (100, 150))
    red = red1 + red2
    color = extract_color(im, 45 / 2, (20, 50), (200, 255))
    # show(red, .25)
    # show(yellow, .25)
    # im = scale(im, .25)
    # color = scale(color, .25)
    red_contour = _get_box_for_largest_rect_contour(red)
    text_contour = _get_box_for_largest_rect_contour(color)
    if red_contour is None or text_contour is None:
        # if red_contour is not None:
        #     print "RED"
        #     show(get_drawn_contours(red, [red_contour], True))
        #     show(color)
        # if text_contour is not None:
        #     print "TEXT"
        #     show(get_drawn_contours(color, [text_contour], True))
        #     show(red)

        # if not (red_contour is None and text_contour is None):
            # show(red)
            # show(color)
        # assert red_contour is None and text_contour is None, \
        #     "Error parsing serial number, didn't find one of the text or its label."
        return None
    
    red_center = get_center_for_contour(red_contour)
    text_center = get_center_for_contour(text_contour)
    text_subsection = four_point_transform(im, text_contour)
    # show(get_drawn_contours(color, text_contour, True), .25)
    # show(get_drawn_contours(red, red_contour, True), .25)
    # show(text_subsection)
    height, width = im.shape[:2]
    # Rotation logic from http://stackoverflow.com/a/5912847/3000133
    if height > width:
        # Determine if red is left or right of text
        if text_center[0] < red_center[0]:
            text_subsection = rotate_image_counter_clockwise(text_subsection)
        else:
            # Rotate clockwise 90
            text_subsection = rotate_image_clockwise(text_subsection)
    else:
        if text_center[1] > red_center[1]:
            # We're fine
            pass
        else:
            # Rotate 180
            text_subsection = rotate_image_180(text_subsection)

    # show(get_drawn_contours(im, [text_contour], True))
    # show(text_subsection)
    text_subsection_gray = cv2.cvtColor(text_subsection, cv2.COLOR_BGR2GRAY)
    # show(text_subsection_gray)
    _, text_threshold = cv2.threshold(text_subsection_gray, 50, 255, 0)
    text_threshold = 255 - text_threshold
    # show(text_threshold)
    height, width = text_threshold.shape[:2]
    text_threshold[:height / 10, :] = 0
    text_threshold[9 * height / 10:, :] = 0
    return text_threshold
def _get_wire_positions(im, wire_color, hue, saturation=(150, 255), value=(100, 255)):
    color = extract_color(im, hue, saturation, value)
    contours = get_contours(color, close_and_open=False)
    return [(get_center_for_contour(c), wire_color) for c in contours]
Beispiel #41
0
def _get_cleaned_up_text_subsection(im):
    # type: (np.array) -> Optional[np.array]
    red1 = extract_color(im, (0, 6), (200, 255), (100, 150))
    red2 = extract_color(im, (176, 180), (200, 255), (100, 150))
    red = red1 + red2
    color = extract_color(im, 45 / 2, (20, 50), (200, 255))
    # show(red, .25)
    # show(yellow, .25)
    # im = scale(im, .25)
    # color = scale(color, .25)
    red_contour = _get_box_for_largest_rect_contour(red)
    text_contour = _get_box_for_largest_rect_contour(color)
    if red_contour is None or text_contour is None:
        # if red_contour is not None:
        #     print "RED"
        #     show(get_drawn_contours(red, [red_contour], True))
        #     show(color)
        # if text_contour is not None:
        #     print "TEXT"
        #     show(get_drawn_contours(color, [text_contour], True))
        #     show(red)

        # if not (red_contour is None and text_contour is None):
        # show(red)
        # show(color)
        # assert red_contour is None and text_contour is None, \
        #     "Error parsing serial number, didn't find one of the text or its label."
        return None

    red_center = get_center_for_contour(red_contour)
    text_center = get_center_for_contour(text_contour)
    text_subsection = four_point_transform(im, text_contour)
    # show(get_drawn_contours(color, text_contour, True), .25)
    # show(get_drawn_contours(red, red_contour, True), .25)
    # show(text_subsection)
    height, width = im.shape[:2]
    # Rotation logic from http://stackoverflow.com/a/5912847/3000133
    if height > width:
        # Determine if red is left or right of text
        if text_center[0] < red_center[0]:
            text_subsection = rotate_image_counter_clockwise(text_subsection)
        else:
            # Rotate clockwise 90
            text_subsection = rotate_image_clockwise(text_subsection)
    else:
        if text_center[1] > red_center[1]:
            # We're fine
            pass
        else:
            # Rotate 180
            text_subsection = rotate_image_180(text_subsection)

    # show(get_drawn_contours(im, [text_contour], True))
    # show(text_subsection)
    text_subsection_gray = cv2.cvtColor(text_subsection, cv2.COLOR_BGR2GRAY)
    # show(text_subsection_gray)
    _, text_threshold = cv2.threshold(text_subsection_gray, 50, 255, 0)
    text_threshold = 255 - text_threshold
    # show(text_threshold)
    height, width = text_threshold.shape[:2]
    text_threshold[:height / 10, :] = 0
    text_threshold[9 * height / 10:, :] = 0
    return text_threshold
Beispiel #42
0
def _get_indicator_text(indicator, tesseract):
    indicator = get_subset(indicator, (40, 100), (0, 100))
    color = extract_color(indicator, 47 / 2, (0, 50), (190, 240))
    tesseract.SetImage(Image.fromarray(color))
    return tesseract.GetUTF8Text().strip()
Beispiel #43
0
def get_is_done(im):
    im = get_subset(im, (80, 95), (5, 20))
    color = extract_color(im, 128 / 2, (225, 255), (150, 255))
    return cv2.countNonZero(color) != 0