def split_edge_candidate_clusters(img, mask, keep_edges):
    from refiner.image_processing.draw import draw_corners_on_image
    from refiner.clustering.dbscan import dbscan_with_masked_image
    if keep_edges is None:
        return keep_edges
    masked_image = get_grayscaled_image(img.copy()) / 255
    masked_image[mask == 0] = 0
    mask_size = int(np.sum(masked_image))
    remove_keys = []
    new_edges = []
    for key, val in keep_edges.items():
        edge_size = get_n_unique_rows(val)
        if edge_size / mask_size > 0.05:
            logger.debug(
                "Splitting edge since it is big: {}% of total mask".format(round(100 * val.shape[0] / mask_size, 0)))
            img_keep_edges = draw_corners_on_image(val, get_colored_image(np.zeros_like(img)))
            corners = get_harris_corners(img_keep_edges)
            img_keep_edges = draw_corners_on_image(corners.tolist(), img_keep_edges, tuple([0, 0, 0]), radius=25)
            clustered_edges = dbscan_with_masked_image(get_grayscaled_image(img_keep_edges), eps=cfg.clustering_eps,
                                                       min_samples=cfg.clustering_min_sample)
            if len(clustered_edges) > 1:
                clustered_edges = filter_edges(masked_image * 255, clustered_edges)
                new_edges.append(clustered_edges)
                remove_keys.append(key)

    for key in remove_keys:
        del keep_edges[key]
    for i, edge in enumerate(new_edges):
        for key, val in edge.items():
            keep_edges['split_{}_{}'.format(i, key)] = val
    return keep_edges
예제 #2
0
def filter_edges(masked_image, edges):
    # Compute mask size
    mask = get_grayscaled_image(masked_image) / 255
    mask_size = int(np.sum(mask))

    remove_keys = []
    for key, val in edges.items():
        # Compute edge size
        edge_size = get_n_unique_rows(val)
        if edge_size / mask_size < 0.01:  # remove small ones
            remove_keys.append(key)
        else:  # remove "centered ones" since we are looking for lines
            median = np.median(val, axis=0)
            dists = np.linalg.norm(val - median, axis=1)
            var = np.var(dists)
            threshold = (mask.shape[0] + mask.shape[1]) / 25
            if var < threshold:
                logger.debug(
                    "Removed edges due to small variance in distribution {} < {}"
                    .format(round(var, 2), round(threshold, 2)))
                remove_keys.append(key)

    for key in remove_keys:
        del edges[key]
    return edges
예제 #3
0
def get_edge_candidate_clusters_from_mask(image, mask, n_mask, ksize,
                                          output_directory):
    from refiner.clustering.dbscan import dbscan_with_masked_image
    from refiner.image_processing.draw import draw_masks_on_image
    if ksize % 2 == 0:
        raise ValueError("Kernel size must be odd")

    img_masked = draw_masks_on_image(image, [(mask * 255).astype(np.uint8)])
    if cfg.visualization_dict['widened_contour']:
        cv2.imwrite(
            os.path.join(
                output_directory,
                "mask_{}_around_edges.jpg".format(str(n_mask).zfill(2))),
            img_masked)
    image[mask == 0] = 0
    image = get_grayscaled_image(image)
    clustered_edges = dbscan_with_masked_image(
        image, eps=cfg.clustering_eps, min_samples=cfg.clustering_min_sample)
    if len(clustered_edges.values()) > 50:
        logger.debug("First run not successful (found {} edges)".format(
            len(clustered_edges.values())))
        clustered_edges = dbscan_with_masked_image(
            image,
            eps=cfg.clustering_eps * 2,
            min_samples=cfg.clustering_min_sample)
    clustered_edges = filter_edges(image, clustered_edges)
    return clustered_edges
예제 #4
0
def canny_edge_detection(img, threshold1=100, threshold2=200, adaptive=True):
    imgray = get_grayscaled_image(img)
    if adaptive:
        threshold2, _ = cv2.threshold(imgray, 0, 255,
                                      cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        threshold1 = 0.4 * threshold2
    edges = cv2.Canny(imgray, threshold1, threshold2)
    return edges
예제 #5
0
def get_lines_from_hough_transformation(image, mask, cluster_lines, output_directory=None):
    from refiner.hough_transform.hough_transformation import detect_lines
    if not os.path.exists(output_directory):
        os.mkdir(output_directory)
    prob = False
    img_masked = get_grayscaled_image(image.copy())
    img_masked[mask == 0] = 0
    lines = detect_lines(img_masked, probabilistic=prob, cluster_lines=cluster_lines,
                         output_directory=output_directory)
    return lines
예제 #6
0
def draw_contour_around_points_no_mask(points, mask_shape, color=255):
    mask = np.zeros(mask_shape)
    if isinstance(points, list):
        if len(points) == 0:
            return np.zeros_like(mask_shape)
        else:
            points = np.array(points)
    mask = cv2.drawContours(mask, [points], -1, tuple([color]), thickness=-1)
    mask = get_grayscaled_image(mask)
    return mask
def fast_corner_detection(img):
    # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_fast/py_fast.html
    fast = cv2.FastFeatureDetector_create()
    img = get_grayscaled_image(img)

    # find and draw the keypoints
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, color=(255, 0, 0), outImage=img)
    return img2, [
        tuple((int(keypoint.pt[0]), int(keypoint.pt[1]))) for keypoint in kp
    ]
def compute_refined_mask_and_lines(img_org, img_edges, mask, dir_results, i):
    mask_extract_contour = compute_widened_contour_line_around_mask(
        mask, ksize=cfg.mask_size)
    logger.info("Computing lines with Hough Transformation")
    # Hough transform
    lines_hough = get_lines_from_hough_transformation(
        img_edges,
        mask_extract_contour,
        cluster_lines=True,
        output_directory=os.path.join(dir_results, cfg.dir_result_details))
    # DBSCAN clustering
    logger.info("Computing lines with clustering")
    lines_clustering = get_lines_from_clustering(img_edges,
                                                 mask_extract_contour,
                                                 mask,
                                                 i,
                                                 ksize=cfg.mask_size,
                                                 output_directory=os.path.join(
                                                     dir_results,
                                                     cfg.dir_result_details))
    lines = lines_clustering + lines_hough
    if len(lines) == 0:
        # use planercnn mask as fallback
        return mask, None

    # Find best lines
    logger.info(f"Find best lines in {len(lines)} detected lines")
    lines = cluster_lines_together_and_choose_best(
        lines, get_grayscaled_image(img_edges.copy()))
    lines = find_line_end_points_detailed(
        get_grayscaled_image(img_edges.copy()), lines)
    points = np.array(sum(lines, [])).reshape(-1, 2)
    if lines is not None and len(lines) > 0:
        logger.info(f"Refining mask along the best {points.shape[0]} points")
        refined_mask = adjust_mask_with_points(img_org, points, mask)
        if refined_mask is not None:
            save_image_with_refined_mask(img_edges, refined_mask, points, i,
                                         dir_results)
            return refined_mask, lines
    return None, None
예제 #9
0
def find_line_masked_image(equation, image, n_mask, n_edge, output_directory):
    # mask the line across image
    line_mask = np.zeros_like(get_grayscaled_image(image))
    pt_start = tuple(
        (0, image.shape[0] - equation(np.array([0]).reshape(1, -1))))
    pt_end = tuple(
        (image.shape[1],
         image.shape[0] - equation(np.array([image.shape[1]]).reshape(1, -1))))
    line_mask = cv2.line(line_mask,
                         pt_start,
                         pt_end,
                         tuple([255]),
                         thickness=15)
    # Overlay line_mask and image
    masked_img = cv2.bitwise_and(image, image, mask=line_mask)
    if cfg.visualization_dict['mask_folder']:
        dir_mask = os.path.join(output_directory,
                                "mask_{}".format(str(n_mask).zfill(2)))
        if not os.path.exists(dir_mask):
            os.mkdir(dir_mask)
        cv2.imwrite(os.path.join(dir_mask, "line_" + str(n_edge) + ".jpg"),
                    masked_img)
    return get_grayscaled_image(masked_img)
예제 #10
0
def draw_convex_hull_around_points_no_mask(points, mask_shape, color=255):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        mask = np.zeros(mask_shape)
        if isinstance(points, list):
            if len(points) == 0:
                return np.zeros_like(mask_shape)
            else:
                points = np.array(points)
        convex_hull = cv2.convexHull(points.astype(np.float32))  # need float input
        convex_hull = convex_hull.astype(np.int32).reshape(-1, 2)  # fill needs int input
        mask = cv2.fillConvexPoly(get_colored_image(mask), convex_hull, tuple([color, color, color]))
        mask = get_grayscaled_image(mask)
        return mask
def harris_corner_detection(img):
    img_gray = get_grayscaled_image(img)
    corners = cv2.cornerHarris(img_gray, 2, 3, 0.04)
    corners2 = cv2.dilate(corners, None, iterations=3)
    img[corners2 > 0.01 * corners2.max()] = [255, 0, 0]
    return img
예제 #12
0
def draw_convex_hull_around_points(points, mask, color=255):
    convex_hull = cv2.convexHull(points.astype(np.float32))  # need float input
    convex_hull = convex_hull.astype(np.int32).reshape(-1, 2)  # fill needs int input
    mask = cv2.fillConvexPoly(get_colored_image(mask), convex_hull, tuple([color, color, color]))
    mask = get_grayscaled_image(mask)
    return mask
예제 #13
0
def enlarge_contours(image, ksize):
    image = get_grayscaled_image(image)
    mask = cv2.GaussianBlur(image, (ksize, ksize), 0)  # enlarge mask
    image[mask > 0] = 255
    return get_colored_image(image)