def debug_blocks(img, points, block_dims, threshold_function):
    '''
  To be used for debugging. Saves images of blocks that throw errors,
  and an additional image showing how the blocks are distributed.
  
  '''
    from debug import Debug
    from skimage.draw import line, circle
    from skimage.color import gray2rgb

    bad_block_points = []

    for center in points:
        block = get_block(img, center, block_dims)
        try:
            if (type(block) is MaskedArray):
                threshold_function(block.compressed())
            else:
                threshold_function(block)

        except Exception, e:
            print "threshold block error"
            print e
            bad_block_points.append(center)
            Debug.save_image("threshold", "bad_block_" + str(center), block)
Exemplo n.º 2
0
def create_image_cube(img, sigma_list, axis):
    gaussian_blurs = [gaussian_filter1d(img, s, axis=axis) for s in sigma_list]
    num_scales = len(gaussian_blurs) - 1
    image_cube = np.zeros((img.shape[0], img.shape[1], num_scales))
    for i in range(num_scales):
        image_cube[:, :, i] = ((gaussian_blurs[i] - gaussian_blurs[i + 1]))
        Debug.save_image("ridges", "image_cube-" + pad(i), image_cube[:, :, i])
    return image_cube
def get_box_lines(boundary, image=None):
    height, width = boundary.shape
    [half_width, half_height] = np.floor([0.5 * width,
                                          0.5 * height]).astype(int)

    timeStart("split image")
    image_regions = {
        "left": boundary[0:height, 0:half_width],
        "right": boundary[0:height, half_width:width],
        "top": boundary[0:half_height, 0:width],
        "bottom": boundary[half_height:height, 0:width]
    }
    timeEnd("split image")

    timeStart("get hough lines")
    hough_lines = {
        "left":
        np.array(
            get_hough_lines(image_regions["left"], min_angle=-10,
                            max_angle=10)),
        "right":
        np.array(
            get_hough_lines(image_regions["right"],
                            min_angle=-10,
                            max_angle=10)),
        "top":
        np.array(
            get_hough_lines(image_regions["top"],
                            min_angle=-120,
                            max_angle=-70)),
        "bottom":
        np.array(
            get_hough_lines(image_regions["bottom"],
                            min_angle=-120,
                            max_angle=-70))
    }
    timeEnd("get hough lines")

    hough_lines["bottom"] += [0, half_height]
    hough_lines["right"] += [half_width, 0]

    print "found these hough lines:"
    print hough_lines

    if Debug.active:
        image = gray2rgb(boundary)
        line_coords = [
            skidraw.line(line[0][1], line[0][0], line[1][1], line[1][0])
            for line in hough_lines.itervalues()
        ]
        for line in line_coords:
            rr, cc = line
            mask = (rr >= 0) & (rr < image.shape[0]) & (cc >= 0) & (
                cc < image.shape[1])
            image[rr[mask], cc[mask]] = [255, 0, 0]
        Debug.save_image("roi", "hough_lines", image)

    return hough_lines
def get_boundary(grayscale_image, scale=1):
    timeStart("threshold image")
    black_and_white_image = otsu_threshold_image(grayscale_image)
    timeEnd("threshold image")

    Debug.save_image("roi", "black_and_white_image", black_and_white_image)

    timeStart("morphological open image")
    filter_element_opening = disk(PARAMS["trace-width"](scale))
    opened_image = cv2.morphologyEx(
        255 * black_and_white_image.astype(np.uint8), cv2.MORPH_OPEN,
        filter_element_opening)
    timeEnd("morphological open image")

    Debug.save_image("roi", "opened_image", opened_image)

    timeStart("invert image")
    opened_image = np.invert(opened_image)
    timeEnd("invert image")

    timeStart("segment image into connected regions")
    labeled_components, num_components = label(opened_image)
    timeEnd("segment image into connected regions")

    timeStart("calculate region areas")
    # Have to cast to np.intp with ndarray.astype because of a numpy bug
    # see: https://github.com/numpy/numpy/pull/4366
    areas = np.bincount(labeled_components.flatten().astype(np.intp))[1:]
    timeEnd("calculate region areas")

    timeStart("calculate region boundaries")
    image_boundaries = find_boundaries(labeled_components,
                                       connectivity=1,
                                       mode="inner",
                                       background=0)
    timeEnd("calculate region boundaries")

    Debug.save_image("roi", "image_boundaries", image_boundaries)

    timeStart("mask region of interest")
    largest_component_id = np.argmax(areas) + 1
    region_of_interest_mask = (labeled_components != largest_component_id)
    region_of_interest_boundary = np.copy(image_boundaries)
    region_of_interest_boundary[region_of_interest_mask] = 0
    timeEnd("mask region of interest")

    Debug.save_image("roi", "region_of_interest_boundary",
                     region_of_interest_boundary)

    return region_of_interest_boundary
Exemplo n.º 5
0
def create_exclusion_cube(img, image_cube, dark_pixels, convex_pixels, axis,
                          convex_threshold):

    timeStart("get slopes")
    slopes = get_slopes(img, axis=axis)
    timeEnd("get slopes")

    Debug.save_image("ridges", "slopes", slopes)

    exclusion_cube = np.zeros(image_cube.shape, dtype=bool)
    exclusion_cube[:,:,0] = dark_pixels | convex_pixels | slopes \
                          | (image_cube[:,:,0] < -convex_threshold)

    Debug.save_image("ridges", "exclusion_cube_base", exclusion_cube[:, :, 0])

    num_scales = image_cube.shape[2]
    for i in range(1, num_scales):
        # each layer of the exclusion cube contains the previous layer
        # plus all convex pixels in the current image_cube layer
        exclusion_cube[:,:,i] = (exclusion_cube[:,:,i-1] \
                                | (image_cube[:,:,i] < -convex_threshold))
        Debug.save_image("ridges", "exclusion_cube-" + pad(i),
                         exclusion_cube[:, :, i])

    return exclusion_cube
Exemplo n.º 6
0
def analyze_image(in_file,
                  out_dir,
                  stats_file=False,
                  scale=1,
                  debug_dir=False,
                  fix_seed=False):
    from lib.dir import ensure_dir_exists
    from lib.debug import Debug
    from lib.stats_recorder import Record

    if debug_dir:
        Debug.set_directory(debug_dir)

    if fix_seed:
        Debug.set_seed(1234567890)

    if stats_file:
        Record.activate()

    ensure_dir_exists(out_dir)

    from lib.timer import timeStart, timeEnd

    from lib.load_image import get_grayscale_image, image_as_float
    from skimage.morphology import medial_axis
    from lib.roi_detection import get_roi, corners_to_geojson
    from lib.polygon_mask import mask_image
    from lib.meanline_detection import detect_meanlines, meanlines_to_geojson
    from lib.threshold import flatten_background
    from lib.ridge_detection import find_ridges
    from lib.binarization import binary_image
    from lib.intersection_detection import find_intersections
    from lib.trace_segmentation import get_segments, segments_to_geojson
    from lib.geojson_io import save_features, save_json
    from lib.utilities import encode_labeled_image_as_rgb
    from scipy import misc
    import numpy as np

    paths = {
        "roi": out_dir + "/roi.json",
        "meanlines": out_dir + "/meanlines.json",
        "intersections": out_dir + "/intersections.json",
        "intersections_raster": out_dir + "/intersections_raster.png",
        "segments": out_dir + "/segments.json",
        "segment_regions": out_dir + "/segment_regions.png",
        "segment_assignments": out_dir + "/segment_assignments.json"
    }

    timeStart("get all metadata")

    timeStart("read image")
    img_gray = image_as_float(get_grayscale_image(in_file))
    timeEnd("read image")

    print "\n--ROI--"
    timeStart("get region of interest")
    corners = get_roi(img_gray, scale=scale)
    timeEnd("get region of interest")

    timeStart("convert roi to geojson")
    corners_as_geojson = corners_to_geojson(corners)
    timeEnd("convert roi to geojson")

    timeStart("saving roi as geojson")
    save_features(corners_as_geojson, paths["roi"])
    timeEnd("saving roi as geojson")

    print "\n--MASK IMAGE--"
    roi_polygon = corners_as_geojson["geometry"]["coordinates"][0]

    timeStart("mask image")
    masked_image = mask_image(img_gray, roi_polygon)
    timeEnd("mask image")

    Debug.save_image("main", "masked_image", masked_image.filled(0))

    if Record.active:
        non_masked_values = 255 * masked_image.compressed()
        bins = np.arange(257)
        image_hist, _ = np.histogram(non_masked_values, bins=bins)
        Record.record("roi_intensity_hist", image_hist.tolist())

    print "\n--MEANLINES--"
    meanlines = detect_meanlines(masked_image, corners, scale=scale)

    timeStart("convert meanlines to geojson")
    meanlines_as_geojson = meanlines_to_geojson(meanlines)
    timeEnd("convert meanlines to geojson")

    timeStart("saving meanlines as geojson")
    save_features(meanlines_as_geojson, paths["meanlines"])
    timeEnd("saving meanlines as geojson")

    print "\n--FLATTEN BACKGROUND--"
    img_dark_removed, background = \
      flatten_background(masked_image, prob_background=0.95,
                         return_background=True, img_gray=img_gray)

    Debug.save_image("main", "flattened_background", img_dark_removed)

    masked_image = None

    print "\n--RIDGES--"
    timeStart("get horizontal and vertical ridges")
    ridges_h, ridges_v = find_ridges(img_dark_removed, background)
    ridges = ridges_h | ridges_v
    timeEnd("get horizontal and vertical ridges")

    print "\n--THRESHOLDING--"
    timeStart("get binary image")
    img_bin = binary_image(img_dark_removed,
                           markers_trace=ridges,
                           markers_background=background)
    timeEnd("get binary image")

    img_dark_removed = None
    background = None

    print "\n--SKELETONIZE--"
    timeStart("get medial axis skeleton and distance transform")
    img_skel, dist = medial_axis(img_bin, return_distance=True)
    timeEnd("get medial axis skeleton and distance transform")

    Debug.save_image("skeletonize", "skeleton", img_skel)

    print "\n--INTERSECTIONS--"
    intersections = find_intersections(img_bin, img_skel, dist, figure=False)

    timeStart("convert to geojson")
    intersection_json = intersections.asGeoJSON()
    timeEnd("convert to geojson")

    timeStart("saving intersections as geojson")
    save_features(intersection_json, paths["intersections"])
    timeEnd("saving intersections as geojson")

    timeStart("convert to image")
    intersection_image = intersections.asImage()
    timeEnd("convert to image")

    Debug.save_image("intersections", "intersections", intersection_image)
    timeStart("save intersections raster")
    misc.imsave(paths["intersections_raster"], intersection_image)
    timeEnd("save intersections raster")

    print "\n--SEGMENTS--"
    timeStart("get segments")
    segments, labeled_regions = \
      get_segments(img_gray, img_bin, img_skel, dist, intersection_image,
                   ridges_h, ridges_v, figure=True)
    timeEnd("get segments")

    timeStart("encode labels as rgb values")
    rgb_segments = encode_labeled_image_as_rgb(labeled_regions)
    timeEnd("encode labels as rgb values")

    timeStart("save segment regions")
    misc.imsave(paths["segment_regions"], rgb_segments)
    timeEnd("save segment regions")

    timeStart("convert centerlines to geojson")
    segments_as_geojson = segments_to_geojson(segments)
    timeEnd("convert centerlines to geojson")

    timeStart("saving centerlines as geojson")
    save_features(segments_as_geojson, paths["segments"])
    timeEnd("saving centerlines as geojson")

    #return (img_gray, ridges, img_bin, intersections, img_seg)
    # return segments
    # detect center lines

    # connect segments

    # output data

    time_elapsed = timeEnd("get all metadata")

    Record.record("time_elapsed", float("%.2f" % time_elapsed))

    if (stats_file):
        Record.export_as_json(stats_file)

    # TODO: refactor this into some sort of status module.
    # For now, since this is our only problematic status,
    # it's hard to know what to generalize. Eventually
    # we might want to flag several different statuses
    # for specific conditions.
    max_segments_reasonable = 11000
    if (len(segments) > max_segments_reasonable):
        print "STATUS>>>problematic<<<"
    else:
        print "STATUS>>>complete<<<"
def get_corners(lines, image=None):
    timeStart("find intersections")
    corners = {
        "top_left": seg_intersect(lines["top"], lines["left"]),
        "top_right": seg_intersect(lines["top"], lines["right"]),
        "bottom_left": seg_intersect(lines["bottom"], lines["left"]),
        "bottom_right": seg_intersect(lines["bottom"], lines["right"])
    }

    # turn corners into tuples of the form (x, y), where x and y are integers
    corners = {
        corner_name: tuple(coord.astype(int))
        for corner_name, coord in corners.iteritems()
    }
    timeEnd("find intersections")

    if Debug.active:
        image_copy = np.copy(image)
        inner_circles = {
            corner_name: skidraw.circle(corner[1],
                                        corner[0],
                                        10,
                                        shape=image.shape)
            for corner_name, corner in corners.iteritems()
        }
        outer_circles = {
            corner_name: skidraw.circle(corner[1],
                                        corner[0],
                                        50,
                                        shape=image.shape)
            for corner_name, corner in corners.iteritems()
        }
        for corner_name in inner_circles:
            image_copy[outer_circles[corner_name]] = 0.0
            image_copy[inner_circles[corner_name]] = 1.0
        Debug.save_image("roi", "roi_corners", image_copy)

    if Record.active:
        from lib.utilities import poly_area2D
        from lib.quality_control import points_to_rho_theta

        corners_clockwise = [
            corners["top_left"], corners["top_right"], corners["bottom_right"],
            corners["bottom_left"]
        ]
        roi_area = poly_area2D(corners_clockwise)
        _, roi_angle_top = points_to_rho_theta(corners["top_left"],
                                               corners["top_right"])
        _, roi_angle_bottom = points_to_rho_theta(corners["bottom_right"],
                                                  corners["bottom_left"])
        _, roi_angle_left = points_to_rho_theta(corners["top_left"],
                                                corners["bottom_left"])
        _, roi_angle_right = points_to_rho_theta(corners["bottom_right"],
                                                 corners["top_right"])

        Record.record("roi_area", roi_area)
        Record.record("roi_angle_top", float("%.4f" % roi_angle_top))
        Record.record("roi_angle_bottom", float("%.4f" % roi_angle_bottom))
        Record.record("roi_angle_left", float("%.4f" % roi_angle_left))
        Record.record("roi_angle_right", float("%.4f" % roi_angle_right))

    return corners
def flatten_background(img,
                       prob_background=1,
                       num_blocks=None,
                       block_dims=None,
                       return_background=False,
                       img_gray=None):
    '''
  Finds the pixel intensity at every location in the image below which the
  pixel is likely part of the dark background. Pixels darker than this
  threshold are replaced by the value of the threshold at its location. This
  eliminates unusually dark regions.

  Parameters
  ------------
  img : 2-D numpy array
    The grayscale image. Can be either floats on the interval [0,1] or
    ints on the interval [0,255].
  prob_background : float, optional
    The minimum probability (estimated) that a pixel below the threshold
    is part of the backround. Must be <= 1. Lower numbers will result in
    higher thresholds.
  num_blocks : int, optional
    The number of blocks of the image to use to create the threshold.
  block_dims : tuple or numpy array
    The dimensions of the rectangular blocks. Dimensions should be less
    than the dimensions of the image. If left unspecified, the blocks will
    be squares with area approximately equal to two times the area of the
    image, divided by num_blocks.

  Returns
  --------
  flattened : 2-D numpy array
    An image equal to the input grayscale image where pixels are above
    the brightness of the background threshold and equal to the threshold
    everywhere else.
  background : 2-D numpy array of bools
    The pixels below the background threshold.
  '''
    # Default number of blocks assumes 500x500 blocks are a good size
    if num_blocks is None:
        num_blocks = int(np.ceil(2 * img.size / 250000))

    timeStart("calculate background threshold with %s blocks" % num_blocks)
    get_background_thresh = make_background_thresh_fun(prob_background)
    background_level = threshold(img,
                                 get_background_thresh,
                                 num_blocks,
                                 block_dims,
                                 smoothing=0.003)
    timeEnd("calculate background threshold with %s blocks" % num_blocks)

    timeStart("select dark pixels")
    dark_pixels = img < background_level
    timeEnd("select dark pixels")

    timeStart("raise dark pixels")
    flattened = np.where(dark_pixels, background_level, img)
    timeEnd("raise dark pixels")

    local_min_gray = local_min(img_gray)
    Debug.save_image("threshold", "local_min_gray", local_min_gray)

    timeStart("union dark pixels, minima, and mask regions")
    background = dark_pixels | local_min_gray | img.mask
    timeEnd("union dark pixels, minima, and mask regions")
    Debug.save_image("threshold", "background", background)
    '''
  background_with_mask barely differs from background, but it
  somehow differs enough to cause the pipeline to find different
  numbers of intersections and segments. The differences seem to be
  at the boundary of the mask, so I suspect they only result in
  segments that will end up being deleted anyway.

  TODO: Come back and check this after adding automated deletion
  of segments that pass outside the ROI (assuming this isn't
  already a thing).

  bennlich 9/21

  '''
    # local_min_mask = local_min(img)
    # Debug.save_image("threshold", "local_min_mask", local_min_mask)

    # timeStart("union dark pixels, minima, and mask regions")
    # background_with_mask = dark_pixels | local_min_mask | img.mask
    # timeEnd("union dark pixels, minima, and mask regions")
    # Debug.save_image("threshold", "background_with_mask", background_with_mask)

    if return_background is False:
        return flattened
    else:
        return (flattened, background)
        line(*corners[2] + corners[3]),
        line(*corners[3] + corners[0])
    ] for corners in block_corners]

    for block in block_line_coords:
        for line_coords in block:
            rr, cc = line_coords
            mask = (rr >= 0) & (rr < debug_image.shape[0]) & (cc >= 0) & (
                cc < debug_image.shape[1])
            debug_image[rr[mask], cc[mask]] = [1.0, 0, 0]

    for center in points:
        rr, cc = circle(center[0], center[1], 20)
        debug_image[rr, cc] = [1.0, 0, 0]

    Debug.save_image("threshold", "threshold_blocks", debug_image)


def get_block(img, center, block_dims):
    '''
  Returns the rectangular subarray of **img** centered at **center**, with
  dimensions at most equal to **block_dims**.

  Parameters
  -----------
  img : 2-D numpy array
  center : tuple or numpy array
    The coordinates of the center of the block. Should be integer-valued.
  block_dims : tuple or numpy array
    The dimensions of the block. If **center** is too close to the edge of
    **img**, the returned block will have dimensions smaller than
Exemplo n.º 10
0
def analyze_image(in_file,
                  out_dir,
                  stats_file=False,
                  scale=1,
                  debug_dir=False,
                  fix_seed=False):
    from lib.dir import ensure_dir_exists
    from lib.debug import Debug
    from lib.stats_recorder import Record

    if debug_dir:
        Debug.set_directory(debug_dir)

    if fix_seed:
        Debug.set_seed(1234567890)

    if stats_file:
        Record.activate()

    ensure_dir_exists(out_dir)

    from lib.timer import timeStart, timeEnd

    from lib.load_image import get_grayscale_image, image_as_float
    from lib.roi_detection import get_roi, corners_to_geojson
    from lib.polygon_mask import mask_image
    from lib.meanline_detection import detect_meanlines, meanlines_to_geojson
    from lib.geojson_io import save_features

    paths = {
        "roi": out_dir + "/roi.json",
        "meanlines": out_dir + "/meanlines.json",
        "intersections": out_dir + "/intersections.json",
        "segments": out_dir + "/segments.json",
        "segment_assignments": out_dir + "/segment_assignments.json"
    }

    timeStart("get roi and meanlines")

    timeStart("read image")
    img_gray = image_as_float(get_grayscale_image(in_file))
    timeEnd("read image")

    print "\n--ROI--"
    timeStart("get region of interest")
    corners = get_roi(img_gray, scale=scale)
    timeEnd("get region of interest")

    timeStart("convert roi to geojson")
    corners_as_geojson = corners_to_geojson(corners)
    timeEnd("convert roi to geojson")

    timeStart("saving roi as geojson")
    save_features(corners_as_geojson, paths["roi"])
    timeEnd("saving roi as geojson")

    print "\n--MASK IMAGE--"
    roi_polygon = corners_as_geojson["geometry"]["coordinates"][0]

    timeStart("mask image")
    masked_image = mask_image(img_gray, roi_polygon)
    timeEnd("mask image")

    Debug.save_image("main", "masked_image", masked_image.filled(0))

    print "\n--MEANLINES--"
    meanlines = detect_meanlines(masked_image, corners, scale=scale)

    timeStart("convert meanlines to geojson")
    meanlines_as_geojson = meanlines_to_geojson(meanlines)
    timeEnd("convert meanlines to geojson")

    timeStart("saving meanlines as geojson")
    save_features(meanlines_as_geojson, paths["meanlines"])
    timeEnd("saving meanlines as geojson")

    timeEnd("get roi and meanlines")

    if (stats_file):
        Record.export_as_json(stats_file)
def get_segments(img_gray,
                 img_bin,
                 img_skel,
                 dist,
                 img_intersections,
                 ridges_h,
                 ridges_v,
                 figure=False):
    timeStart("canny edge detection")
    image_canny = canny(img_gray)
    timeEnd("canny edge detection")

    Debug.save_image("segments", "edges", image_canny)

    # Strange: just noticed that none of the below had
    # ever been getting used for as long as this file
    # has existed.

    # timeStart("fill canny corners")
    # filled_corners_canny = fill_corners(image_canny)
    # timeEnd("fill canny corners")

    # Debug.save_image("segments", "edges_with_corners", filled_corners_canny)

    # timeStart("subtract canny corners from image")
    # img_bin = img_bin & (~ filled_corners_canny)
    # timeEnd("subtract canny corners from image")

    # Debug.save_image("segments", "binary_image_minus_edges", img_bin)

    timeStart("sobel filter")
    image_sobel = sobel(img_gray)
    timeEnd("sobel filter")

    Debug.save_image("segments", "slopes", image_sobel)

    timeStart("otsu threshold")
    steep_slopes = image_sobel > threshold_otsu(image_sobel)
    timeEnd("otsu threshold")

    Debug.save_image("segments", "steep_slopes", steep_slopes)

    timeStart("binary erosion")
    steep_slopes = binary_erosion(steep_slopes, square(3, dtype=bool))
    timeEnd("binary erosion")

    Debug.save_image("segments", "eroded_steep_slopes", steep_slopes)

    timeStart("subtract regions from skeleton")
    segments_bin = (img_skel & (~img_intersections) & (~image_canny) &
                    (~steep_slopes))
    timeEnd("subtract regions from skeleton")

    Debug.save_image(
        "segments",
        "skeleton_minus_intersections_minus_edges_minus_steep_slopes",
        segments_bin)

    timeStart("reverse medial axis")
    # regrow segment regions from segment skeletons
    rmat = reverse_medial_axis(segments_bin, dist)
    timeEnd("reverse medial axis")

    Debug.save_image("segments", "reverse_medial_axis", rmat)

    # maybe, instead of running medial_axis again, do nearest-neighbor interp
    timeStart("get distance transform")
    # the pixels values of rmat_dist correspond to the distance
    # between each pixel and its nearest foreground/background boundary
    _, rmat_dist = medial_axis(rmat, return_distance=True)
    timeEnd("get distance transform")

    Debug.save_image("segments", "distance_transform", rmat_dist)

    timeStart("label segment skeletons")
    image_segments, num_segments = label(segments_bin, np.ones((3, 3)))
    timeEnd("label segment skeletons")

    Debug.save_image("segments", "labeled_skeleton", image_segments)

    timeStart("watershed")
    image_segments = watershed(-rmat_dist, image_segments, mask=rmat)
    timeEnd("watershed")

    Debug.save_image("segments", "watershed", image_segments)

    # subtract 1 for the background segment
    num_traces = num_segments - 1
    print "found %s segments" % num_traces
    Record.record("num_segments", num_traces)

    segments = img_seg_to_seg_objects(image_segments, num_segments, ridges_h,
                                      ridges_v, img_gray)

    if Debug.active:
        from lib.segment_coloring import gray2prism
        # try to assign different gray values to neighboring segments
        traces_colored = (image_segments + num_traces *
                          (image_segments % 4)) / float(4 * num_traces)
        # store a background mask
        background = traces_colored == 0
        background = np.dstack((background, background, background))
        # convert gray values to colors
        traces_colored = gray2prism(traces_colored)
        # make background pixels black
        traces_colored[background] = 0
        Debug.save_image("segments", "segment_regions", traces_colored)

    if Record.active:
        timeStart("calculate histograms of segment sizes")
        # last bin includes both left and right boundaries, so add 1 to right-most bin
        segment_bins = np.arange(num_segments + 1)
        segment_sizes, _ = np.histogram(image_segments.flatten(),
                                        bins=segment_bins)

        # trim off the "background" segment
        segment_sizes = segment_sizes[1:]

        max_segment_size = np.amax(segment_sizes)
        # there are no size 0 segments
        segment_size_bins = np.arange(1, max_segment_size + 1)
        hist_of_sizes, _ = np.histogram(segment_sizes, bins=segment_size_bins)

        Record.record("segment_region_hist", hist_of_sizes.tolist())
        timeEnd("calculate histograms of segment sizes")

        timeStart("calculate histogram of centerlines")
        centerlines = [
            seg.center_line for seg in segments.itervalues()
            if seg.has_center_line
        ]
        Record.record("num_segments_with_centerlines", len(centerlines))

        centerline_lengths = map(lambda line: len(line.coords), centerlines)

        max_centerline_length = np.amax(centerline_lengths)
        # there are no size 0 centerlines
        centerline_bins = np.arange(1, max_centerline_length + 1)
        hist_of_centerlines, _ = np.histogram(centerline_lengths,
                                              bins=centerline_bins)
        Record.record("segment_centerline_hist", hist_of_centerlines.tolist())
        timeEnd("calculate histogram of centerlines")

    if figure == False:
        return segments
    else:
        return (segments, image_segments)
Exemplo n.º 12
0
def find_ridges(img,
                dark_pixels,
                min_sigma=0.7071,
                max_sigma=30,
                sigma_ratio=1.9,
                min_ridge_length=15,
                low_threshold=0.002,
                high_threshold=0.006,
                convex_threshold=0.00015,
                figures=True):
    '''
  The values for min_sigma, max_sigma, and sigma_ratio are hardcoded,
  but they ought to be a function of the scale parameter. They're related
  to the minimum and maximum expected trace width in pixels.

  If max_sigma is too small, the algorithm misses ridges of thick traces.
  Need to do more thinking about how this function works.

  '''
    # num_scales is the number of scales at which to compute a difference of gaussians

    # the following line in words: the number of times you need to multiply
    # min_sigma by sigma_ratio to get max_sigma
    num_scales = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1

    # a geometric progression of standard deviations for gaussian kernels
    sigma_list = create_sigma_list(min_sigma, sigma_ratio,
                                   np.arange(num_scales + 1))

    # convex_pixels is an image of regions with positive second derivative
    timeStart("get convex pixels")
    convex_pixels = get_convex_pixels(img, convex_threshold)
    timeEnd("get convex pixels")

    Debug.save_image("ridges", "convex_pixels", convex_pixels)

    timeStart("find horizontal ridges")
    footprint_h = np.ones((3, 1, 3), dtype=bool)
    ridges_h, max_values_h, max_scales_h = \
        extract_ridge_data(img, sobel_axis=1, dog_axis=0,
                           footprint=footprint_h, dark_pixels=dark_pixels,
                           convex_pixels=convex_pixels, sigma_list=sigma_list,
                           convex_threshold=convex_threshold, low_threshold=low_threshold)
    timeEnd("find horizontal ridges")

    timeStart("find vertical ridges")
    footprint_v = np.ones((1, 3, 3), dtype=bool)
    ridges_v, max_values_v, max_scales_v = \
        extract_ridge_data(img, sobel_axis=0, dog_axis=1,
                           footprint=footprint_v, dark_pixels=dark_pixels,
                           convex_pixels=convex_pixels, sigma_list=sigma_list,
                           convex_threshold=convex_threshold, low_threshold=low_threshold)
    timeEnd("find vertical ridges")

    # Horizontal ridges need to be prominent
    ridges_h = ridges_h & (max_values_h >= high_threshold)

    # Vertical ridges need to either be prominent or highly connected
    ridges_v = (ridges_v &
                ((max_values_v >= high_threshold) | remove_small_objects(
                    ridges_v, min_ridge_length, connectivity=2)))

    timeStart("aggregate information about maxima of horizontal ridges")
    sigmas_h = create_sigma_list(min_sigma, sigma_ratio, max_scales_h)
    ridge_data_h = compile_ridge_data(sigmas_h, ridges_h, max_values_h)
    timeEnd("aggregate information about maxima of horizontal ridges")

    timeStart("prioritize horizontal regions")
    horizontal_regions = get_ridge_region_horiz(ridge_data_h, img.shape)
    ridges_v = ridges_v & (horizontal_regions == 0)
    timeEnd("prioritize horizontal regions")

    Debug.save_image("ridges", "vertical_ridges", ridges_v)
    Debug.save_image("ridges", "horizontal_ridges", ridges_h)

    if figures == True:
        return (ridges_h, ridges_v)
    else:
        # Aggregate information about maxima of vertical ridges
        sigmas_v = create_sigma_list(min_sigma, sigma_ratio, max_scales_v)
        ridge_data_v = compile_ridge_data(sigmas_v, ridges_v, max_values_v)
        return (ridge_data_h, ridge_data_v)
Exemplo n.º 13
0
def get_convex_pixels(img, convex_threshold):
    laplacian = gaussian_laplace(img, sigma=2)
    Debug.save_image("ridges", "gaussian_laplace", laplacian)
    return laplacian > convex_threshold