コード例 #1
0
def create_exclusion_cube(img, image_cube, dark_pixels, convex_pixels, axis,
                          convex_threshold):

    timeStart("get slopes")
    slopes = get_slopes(img, axis=axis)
    timeEnd("get slopes")

    Debug.save_image("ridges", "slopes", slopes)

    exclusion_cube = np.zeros(image_cube.shape, dtype=bool)
    exclusion_cube[:,:,0] = dark_pixels | convex_pixels | slopes \
                          | (image_cube[:,:,0] < -convex_threshold)

    Debug.save_image("ridges", "exclusion_cube_base", exclusion_cube[:, :, 0])

    num_scales = image_cube.shape[2]
    for i in range(1, num_scales):
        # each layer of the exclusion cube contains the previous layer
        # plus all convex pixels in the current image_cube layer
        exclusion_cube[:,:,i] = (exclusion_cube[:,:,i-1] \
                                | (image_cube[:,:,i] < -convex_threshold))
        Debug.save_image("ridges", "exclusion_cube-" + pad(i),
                         exclusion_cube[:, :, i])

    return exclusion_cube
コード例 #2
0
def get_meanlines(in_file, out_file, roi_file, scale=1, debug_dir=False):
    from lib.debug import Debug
    if debug_dir:
        Debug.set_directory(debug_dir)

    from lib.timer import timeStart, timeEnd
    from lib.load_image import get_image
    from lib.geojson_io import get_features, save_features
    from lib.polygon_mask import mask_image
    from lib.meanline_detection import detect_meanlines, meanlines_to_geojson

    timeStart("get meanlines")

    timeStart("read image")
    image = get_image(in_file)
    timeEnd("read image")

    roi_polygon = get_features(roi_file)["geometry"]["coordinates"][0]

    timeStart("mask image")
    masked_image = mask_image(image, roi_polygon)
    timeEnd("mask image")

    meanlines = detect_meanlines(masked_image, scale=scale)

    timeStart("convert to geojson")
    meanlines_as_geojson = meanlines_to_geojson(meanlines)
    timeEnd("convert to geojson")

    timeStart("saving as geojson")
    save_features(meanlines_as_geojson, out_file)
    timeEnd("saving as geojson")

    timeEnd("get meanlines")
コード例 #3
0
def local_min(image, min_distance=2):
    if np.amax(image) <= 1:
        image = np.uint8(image * 255)

    selem = np.ones((2 * min_distance + 1, 2 * min_distance + 1))

    timeStart("morphological erosion")
    img = erosion(image, selem)
    timeEnd("morphological erosion")

    return image == img
コード例 #4
0
def extract_ridge_data(img, sobel_axis, dog_axis, footprint, dark_pixels,
                       convex_pixels, sigma_list, convex_threshold,
                       low_threshold):
    '''
  Returns
  -------
  ridges : 2D boolean array
    True at every pixel considered to be a ridge.
  max_values : 2D float array
    The maximum values across all sigma scales of image_cube.
  max_scales : 2D int array
    The scales at which the image_cube took on those maximum values.

  '''
    num_scales = len(sigma_list) - 1

    timeStart("create difference of gaussian image cube at %s scales" %
              num_scales)
    image_cube = create_image_cube(img, sigma_list, axis=dog_axis)
    timeEnd("create difference of gaussian image cube at %s scales" %
            num_scales)

    timeStart("create exclusion cube")
    exclusion = create_exclusion_cube(img,
                                      image_cube=image_cube,
                                      dark_pixels=dark_pixels,
                                      convex_pixels=convex_pixels,
                                      axis=sobel_axis,
                                      convex_threshold=convex_threshold)
    timeEnd("create exclusion cube")

    timeStart("find image cube maxima")
    maxima = find_valid_maxima(image_cube, footprint, exclusion, low_threshold)
    timeEnd("find image cube maxima")

    # set all non-maxima points in image_cube to 0
    timeStart("suppress non-maxima")
    image_cube[~maxima] = 0
    timeEnd("suppress non-maxima")

    timeStart("collapse cubes")
    # ridges is a 2D array that is true everywhere
    # that maxima has at least one true value in any scale
    ridges = np.amax(maxima, axis=-1)
    max_values = np.amax(image_cube, axis=-1)
    max_scales = np.argmax(image_cube, axis=-1)
    timeEnd("collapse cubes")

    return ridges, max_values, max_scales
コード例 #5
0
def get_endpoint_data(features):
    """
  Given a parsed GeoJson of segment data, it returns a raw data dictionary
  of the start and endpoints of all segments as well as averages and std deviations
  on the y axis.
  """
    all_x = []
    all_y = []
    ids = []
    average_y = []
    std_deviation_y = []
    startpoints = []
    endpoints = []

    timeStart("get coordinates")
    for feature in features["features"]:
        coordinates = np.array(
            feature["geometry"]["coordinates"]
        )  # turn the list of coords into a fancy 2D numpy array
        all_x.append(
            coordinates[:, 0]
        )  # numpy arrays are indexed [row, column], so [:, 0] means "all rows, 0th column"
        all_y.append(coordinates[:, 1])
        ids.append(feature["id"])
    timeEnd("get coordinates")

    for values in xrange(len(all_y)):
        average_y.append(np.mean(all_y[values]))
        std_deviation_y.append(np.std(all_y[values]))

    for starts in xrange(len(all_y)):
        x1 = all_x[starts][0]
        y1 = all_y[starts][0]
        x2 = all_x[starts][len(all_x[starts]) - 1]
        y2 = all_y[starts][len(all_y[starts]) - 1]
        startpoint = [x1, y1]
        endpoint = [x2, y2]
        startpoints.append(startpoint)
        endpoints.append(endpoint)

    return {
        "ids": ids,
        "startpoints": startpoints,
        "endpoints": endpoints,
        "average_y": average_y,
        "std_deviation_y": std_deviation_y
    }
コード例 #6
0
def get_intersections(in_file, out_file, roi_file, debug_dir=False):
    if debug_dir:
        from lib.dir import ensure_dir_exists
        ensure_dir_exists(debug_dir)

    from lib.timer import timeStart, timeEnd
    from lib.intersection_detection import find_intersections
    from lib.load_image import get_image
    from lib.load_geojson import get_features
    from lib.polygon_mask import mask_image
    from lib.geojson_io import save_features
    from skimage.io import imsave

    timeStart("get intersections")

    timeStart("read image")
    grayscale_image = get_image(in_file)
    timeEnd("read image")

    roi_polygon = get_features(roi_file)["geometry"]["coordinates"][0]

    timeStart("mask image")
    masked_image = mask_image(grayscale_image, roi_polygon)
    timeEnd("mask image")

    intersections = find_intersections(masked_image.filled(False),
                                       figure=False)

    timeStart("saving to " + out_file)
    intersections_as_geojson = intersections.asGeoJSON()
    save_features(intersections_as_geojson, out_file)
    timeEnd("saving to " + out_file)

    if debug_dir:
        debug_filepath = debug_dir + "/intersections.png"
        timeStart("saving to " + debug_filepath)
        intersections_as_image = intersections.asImage().astype(float)
        imsave(debug_filepath, intersections_as_image)
        timeEnd("saving to " + debug_filepath)

    timeEnd("get intersections")
コード例 #7
0
def get_roi(in_file, out_file, scale=1, debug_dir=False):
  if debug_dir:
    from lib.dir import ensure_dir_exists
    ensure_dir_exists(debug_dir)

  from lib.timer import timeStart, timeEnd
  from lib.load_image import get_image
  from lib.roi_detection import get_roi, corners_to_geojson
  from lib.geojson_io import save_features

  timeStart("ROI")

  timeStart("read image")
  image = get_image(in_file)
  timeEnd("read image")

  corners = get_roi(image, scale=scale)

  timeStart("convert to geojson")
  corners_as_geojson = corners_to_geojson(corners)
  timeEnd("convert to geojson")

  if debug_dir:
    from lib.polygon_mask import mask_image
    from scipy import misc
    roi_polygon = corners_as_geojson["geometry"]["coordinates"][0]
    timeStart("mask image")
    masked_image = mask_image(image, roi_polygon)
    timeEnd("mask image")
    misc.imsave(debug_dir+"/masked_image.png", masked_image.filled(0))

  if out_file:
    timeStart("saving as geojson")
    save_features(corners_as_geojson, out_file)
    timeEnd("saving as geojson")
  else:
    print corners_as_geojson

  timeEnd("ROI")
コード例 #8
0
def binary_image(image,
                 markers_trace=None,
                 markers_background=None,
                 min_trace_size=6,
                 min_background_size=4):
    '''
  Creates a binary image.

  Parameters
  ------------
  image : numpy array
    Can either be a color (3-D) or grayscale (2-D) image.

  Returns
  ---------
  image_bin : 2-D Boolean numpy array
    A 2-D array with the same shape as the input image. Foreground pixels
    are True, and background pixels are False.
  '''
    if image.ndim != 2:
        image = color.rgb2gray(image)
    if markers_background is None:
        markers_background = get_background_markers(image)
    if markers_trace is None:
        markers_trace = get_trace_markers(image, markers_background)

    timeStart("watershed segmentation")
    image_bin = watershed_segmentation(image, markers_trace,
                                       markers_background)
    timeEnd("watershed segmentation")
    #image_bin = image_bin & (~ fill_corners(canny(image)))

    timeStart("remove small segments and edges")
    image_bin = remove_small_segments_and_edges(image_bin, min_trace_size,
                                                min_background_size)
    timeEnd("remove small segments and edges")
    return image_bin
コード例 #9
0
def resize_image(in_file, out_file, scale):
  timeStart("load image")
  image = get_image(in_file)
  timeEnd("load image")

  timeStart("resize image")
  resized_image = imresize(image, scale)
  timeEnd("resize image")

  timeStart("save image")
  imsave(out_file, resized_image)
  timeEnd("save image")
コード例 #10
0
def get_thresholded_image(in_file, out_file):
    from lib.timer import timeStart, timeEnd
    from lib.otsu_threshold_image import otsu_threshold_image
    from lib.load_image import get_image
    from scipy import misc

    timeStart("read image")
    grayscale_image = get_image(in_file)
    timeEnd("read image")

    timeStart("threshold image")
    thresholded_image = otsu_threshold_image(grayscale_image)
    timeEnd("threshold image")

    timeStart("save image")
    misc.imsave(out_file, thresholded_image)
    timeEnd("save image")
コード例 #11
0
def get_segments(in_file, out_file, intersections_file):
    from lib.timer import timeStart, timeEnd
    from lib.load_image import get_image
    from lib.load_geojson import get_features
    from lib.segment_detection import get_segments
    from lib.segment_detection import save_segments_as_geojson

    timeStart("get segments")

    timeStart("read image")
    image = get_image(in_file)
    timeEnd("read image")

    intersections = get_features(intersections_file)

    timeStart("calculate segments")
    segments = get_segments(image, intersections)
    timeEnd("calculate segments")

    save_segments_as_geojson(segments, out_file)
    timeEnd("get segments")
コード例 #12
0
def flatten_background(img,
                       prob_background=1,
                       num_blocks=None,
                       block_dims=None,
                       return_background=False,
                       img_gray=None):
    '''
  Finds the pixel intensity at every location in the image below which the
  pixel is likely part of the dark background. Pixels darker than this
  threshold are replaced by the value of the threshold at its location. This
  eliminates unusually dark regions.

  Parameters
  ------------
  img : 2-D numpy array
    The grayscale image. Can be either floats on the interval [0,1] or
    ints on the interval [0,255].
  prob_background : float, optional
    The minimum probability (estimated) that a pixel below the threshold
    is part of the backround. Must be <= 1. Lower numbers will result in
    higher thresholds.
  num_blocks : int, optional
    The number of blocks of the image to use to create the threshold.
  block_dims : tuple or numpy array
    The dimensions of the rectangular blocks. Dimensions should be less
    than the dimensions of the image. If left unspecified, the blocks will
    be squares with area approximately equal to two times the area of the
    image, divided by num_blocks.

  Returns
  --------
  flattened : 2-D numpy array
    An image equal to the input grayscale image where pixels are above
    the brightness of the background threshold and equal to the threshold
    everywhere else.
  background : 2-D numpy array of bools
    The pixels below the background threshold.
  '''
    # Default number of blocks assumes 500x500 blocks are a good size
    if num_blocks is None:
        num_blocks = int(np.ceil(2 * img.size / 250000))

    timeStart("calculate background threshold with %s blocks" % num_blocks)
    get_background_thresh = make_background_thresh_fun(prob_background)
    background_level = threshold(img,
                                 get_background_thresh,
                                 num_blocks,
                                 block_dims,
                                 smoothing=0.003)
    timeEnd("calculate background threshold with %s blocks" % num_blocks)

    timeStart("select dark pixels")
    dark_pixels = img < background_level
    timeEnd("select dark pixels")

    timeStart("raise dark pixels")
    flattened = np.where(dark_pixels, background_level, img)
    timeEnd("raise dark pixels")

    local_min_gray = local_min(img_gray)
    Debug.save_image("threshold", "local_min_gray", local_min_gray)

    timeStart("union dark pixels, minima, and mask regions")
    background = dark_pixels | local_min_gray | img.mask
    timeEnd("union dark pixels, minima, and mask regions")
    Debug.save_image("threshold", "background", background)
    '''
  background_with_mask barely differs from background, but it
  somehow differs enough to cause the pipeline to find different
  numbers of intersections and segments. The differences seem to be
  at the boundary of the mask, so I suspect they only result in
  segments that will end up being deleted anyway.

  TODO: Come back and check this after adding automated deletion
  of segments that pass outside the ROI (assuming this isn't
  already a thing).

  bennlich 9/21

  '''
    # local_min_mask = local_min(img)
    # Debug.save_image("threshold", "local_min_mask", local_min_mask)

    # timeStart("union dark pixels, minima, and mask regions")
    # background_with_mask = dark_pixels | local_min_mask | img.mask
    # timeEnd("union dark pixels, minima, and mask regions")
    # Debug.save_image("threshold", "background_with_mask", background_with_mask)

    if return_background is False:
        return flattened
    else:
        return (flattened, background)
コード例 #13
0
def analyze_image(in_file,
                  out_dir,
                  stats_file=False,
                  scale=1,
                  debug_dir=False,
                  fix_seed=False):
    from lib.dir import ensure_dir_exists
    from lib.debug import Debug
    from lib.stats_recorder import Record

    if debug_dir:
        Debug.set_directory(debug_dir)

    if fix_seed:
        Debug.set_seed(1234567890)

    if stats_file:
        Record.activate()

    ensure_dir_exists(out_dir)

    from lib.timer import timeStart, timeEnd

    from lib.load_image import get_grayscale_image, image_as_float
    from skimage.morphology import medial_axis
    from lib.roi_detection import get_roi, corners_to_geojson
    from lib.polygon_mask import mask_image
    from lib.meanline_detection import detect_meanlines, meanlines_to_geojson
    from lib.threshold import flatten_background
    from lib.ridge_detection import find_ridges
    from lib.binarization import binary_image
    from lib.intersection_detection import find_intersections
    from lib.trace_segmentation import get_segments, segments_to_geojson
    from lib.geojson_io import save_features, save_json
    from lib.utilities import encode_labeled_image_as_rgb
    from scipy import misc
    import numpy as np

    paths = {
        "roi": out_dir + "/roi.json",
        "meanlines": out_dir + "/meanlines.json",
        "intersections": out_dir + "/intersections.json",
        "intersections_raster": out_dir + "/intersections_raster.png",
        "segments": out_dir + "/segments.json",
        "segment_regions": out_dir + "/segment_regions.png",
        "segment_assignments": out_dir + "/segment_assignments.json"
    }

    timeStart("get all metadata")

    timeStart("read image")
    img_gray = image_as_float(get_grayscale_image(in_file))
    timeEnd("read image")

    print "\n--ROI--"
    timeStart("get region of interest")
    corners = get_roi(img_gray, scale=scale)
    timeEnd("get region of interest")

    timeStart("convert roi to geojson")
    corners_as_geojson = corners_to_geojson(corners)
    timeEnd("convert roi to geojson")

    timeStart("saving roi as geojson")
    save_features(corners_as_geojson, paths["roi"])
    timeEnd("saving roi as geojson")

    print "\n--MASK IMAGE--"
    roi_polygon = corners_as_geojson["geometry"]["coordinates"][0]

    timeStart("mask image")
    masked_image = mask_image(img_gray, roi_polygon)
    timeEnd("mask image")

    Debug.save_image("main", "masked_image", masked_image.filled(0))

    if Record.active:
        non_masked_values = 255 * masked_image.compressed()
        bins = np.arange(257)
        image_hist, _ = np.histogram(non_masked_values, bins=bins)
        Record.record("roi_intensity_hist", image_hist.tolist())

    print "\n--MEANLINES--"
    meanlines = detect_meanlines(masked_image, corners, scale=scale)

    timeStart("convert meanlines to geojson")
    meanlines_as_geojson = meanlines_to_geojson(meanlines)
    timeEnd("convert meanlines to geojson")

    timeStart("saving meanlines as geojson")
    save_features(meanlines_as_geojson, paths["meanlines"])
    timeEnd("saving meanlines as geojson")

    print "\n--FLATTEN BACKGROUND--"
    img_dark_removed, background = \
      flatten_background(masked_image, prob_background=0.95,
                         return_background=True, img_gray=img_gray)

    Debug.save_image("main", "flattened_background", img_dark_removed)

    masked_image = None

    print "\n--RIDGES--"
    timeStart("get horizontal and vertical ridges")
    ridges_h, ridges_v = find_ridges(img_dark_removed, background)
    ridges = ridges_h | ridges_v
    timeEnd("get horizontal and vertical ridges")

    print "\n--THRESHOLDING--"
    timeStart("get binary image")
    img_bin = binary_image(img_dark_removed,
                           markers_trace=ridges,
                           markers_background=background)
    timeEnd("get binary image")

    img_dark_removed = None
    background = None

    print "\n--SKELETONIZE--"
    timeStart("get medial axis skeleton and distance transform")
    img_skel, dist = medial_axis(img_bin, return_distance=True)
    timeEnd("get medial axis skeleton and distance transform")

    Debug.save_image("skeletonize", "skeleton", img_skel)

    print "\n--INTERSECTIONS--"
    intersections = find_intersections(img_bin, img_skel, dist, figure=False)

    timeStart("convert to geojson")
    intersection_json = intersections.asGeoJSON()
    timeEnd("convert to geojson")

    timeStart("saving intersections as geojson")
    save_features(intersection_json, paths["intersections"])
    timeEnd("saving intersections as geojson")

    timeStart("convert to image")
    intersection_image = intersections.asImage()
    timeEnd("convert to image")

    Debug.save_image("intersections", "intersections", intersection_image)
    timeStart("save intersections raster")
    misc.imsave(paths["intersections_raster"], intersection_image)
    timeEnd("save intersections raster")

    print "\n--SEGMENTS--"
    timeStart("get segments")
    segments, labeled_regions = \
      get_segments(img_gray, img_bin, img_skel, dist, intersection_image,
                   ridges_h, ridges_v, figure=True)
    timeEnd("get segments")

    timeStart("encode labels as rgb values")
    rgb_segments = encode_labeled_image_as_rgb(labeled_regions)
    timeEnd("encode labels as rgb values")

    timeStart("save segment regions")
    misc.imsave(paths["segment_regions"], rgb_segments)
    timeEnd("save segment regions")

    timeStart("convert centerlines to geojson")
    segments_as_geojson = segments_to_geojson(segments)
    timeEnd("convert centerlines to geojson")

    timeStart("saving centerlines as geojson")
    save_features(segments_as_geojson, paths["segments"])
    timeEnd("saving centerlines as geojson")

    #return (img_gray, ridges, img_bin, intersections, img_seg)
    # return segments
    # detect center lines

    # connect segments

    # output data

    time_elapsed = timeEnd("get all metadata")

    Record.record("time_elapsed", float("%.2f" % time_elapsed))

    if (stats_file):
        Record.export_as_json(stats_file)

    # TODO: refactor this into some sort of status module.
    # For now, since this is our only problematic status,
    # it's hard to know what to generalize. Eventually
    # we might want to flag several different statuses
    # for specific conditions.
    max_segments_reasonable = 11000
    if (len(segments) > max_segments_reasonable):
        print "STATUS>>>problematic<<<"
    else:
        print "STATUS>>>complete<<<"
コード例 #14
0
def find_ridges(img,
                dark_pixels,
                min_sigma=0.7071,
                max_sigma=30,
                sigma_ratio=1.9,
                min_ridge_length=15,
                low_threshold=0.002,
                high_threshold=0.006,
                convex_threshold=0.00015,
                figures=True):
    '''
  The values for min_sigma, max_sigma, and sigma_ratio are hardcoded,
  but they ought to be a function of the scale parameter. They're related
  to the minimum and maximum expected trace width in pixels.

  If max_sigma is too small, the algorithm misses ridges of thick traces.
  Need to do more thinking about how this function works.

  '''
    # num_scales is the number of scales at which to compute a difference of gaussians

    # the following line in words: the number of times you need to multiply
    # min_sigma by sigma_ratio to get max_sigma
    num_scales = int(log(float(max_sigma) / min_sigma, sigma_ratio)) + 1

    # a geometric progression of standard deviations for gaussian kernels
    sigma_list = create_sigma_list(min_sigma, sigma_ratio,
                                   np.arange(num_scales + 1))

    # convex_pixels is an image of regions with positive second derivative
    timeStart("get convex pixels")
    convex_pixels = get_convex_pixels(img, convex_threshold)
    timeEnd("get convex pixels")

    Debug.save_image("ridges", "convex_pixels", convex_pixels)

    timeStart("find horizontal ridges")
    footprint_h = np.ones((3, 1, 3), dtype=bool)
    ridges_h, max_values_h, max_scales_h = \
        extract_ridge_data(img, sobel_axis=1, dog_axis=0,
                           footprint=footprint_h, dark_pixels=dark_pixels,
                           convex_pixels=convex_pixels, sigma_list=sigma_list,
                           convex_threshold=convex_threshold, low_threshold=low_threshold)
    timeEnd("find horizontal ridges")

    timeStart("find vertical ridges")
    footprint_v = np.ones((1, 3, 3), dtype=bool)
    ridges_v, max_values_v, max_scales_v = \
        extract_ridge_data(img, sobel_axis=0, dog_axis=1,
                           footprint=footprint_v, dark_pixels=dark_pixels,
                           convex_pixels=convex_pixels, sigma_list=sigma_list,
                           convex_threshold=convex_threshold, low_threshold=low_threshold)
    timeEnd("find vertical ridges")

    # Horizontal ridges need to be prominent
    ridges_h = ridges_h & (max_values_h >= high_threshold)

    # Vertical ridges need to either be prominent or highly connected
    ridges_v = (ridges_v &
                ((max_values_v >= high_threshold) | remove_small_objects(
                    ridges_v, min_ridge_length, connectivity=2)))

    timeStart("aggregate information about maxima of horizontal ridges")
    sigmas_h = create_sigma_list(min_sigma, sigma_ratio, max_scales_h)
    ridge_data_h = compile_ridge_data(sigmas_h, ridges_h, max_values_h)
    timeEnd("aggregate information about maxima of horizontal ridges")

    timeStart("prioritize horizontal regions")
    horizontal_regions = get_ridge_region_horiz(ridge_data_h, img.shape)
    ridges_v = ridges_v & (horizontal_regions == 0)
    timeEnd("prioritize horizontal regions")

    Debug.save_image("ridges", "vertical_ridges", ridges_v)
    Debug.save_image("ridges", "horizontal_ridges", ridges_h)

    if figures == True:
        return (ridges_h, ridges_v)
    else:
        # Aggregate information about maxima of vertical ridges
        sigmas_v = create_sigma_list(min_sigma, sigma_ratio, max_scales_v)
        ridge_data_v = compile_ridge_data(sigmas_v, ridges_v, max_values_v)
        return (ridge_data_h, ridge_data_v)
コード例 #15
0
def img_seg_to_seg_objects(img_seg, num_segments, ridges_h, ridges_v,
                           img_gray):
    '''
  Creates segment objects from an array of labeled pixels.

  Parameters
  ------------
  img_seg : 2-D numpy array of ints
    An array with each pixel labeled according to its segment.

  Returns
  --------
  segments : list of segments
    A list containing all the trace segments.
  '''

    # segment_coordinates becomes a list of segments, where
    # each segment is a list of all of its pixel coordinates
    segment_coordinates = [[] for i in xrange(num_segments)]

    timeStart("get segment coordinates")
    it = np.nditer(img_seg, flags=['multi_index'])
    while not it.finished:
        if it[0] == 0:
            it.iternext()
            continue

        segment_idx = int(it[0] - 1)
        segment_coordinates[segment_idx].append(np.array(it.multi_index))
        it.iternext()

    # if the number of segments is small relative
    # to the size of the image, the commented algorithm below
    # is faster than that above

    # timeStart("nonzero")
    # segment_coords = np.nonzero(img_seg)
    # timeEnd("nonzero")

    # coords = np.column_stack(segment_coords)

    # timeStart("nzvals")
    # nzvals = img_seg[segment_coords[0], segment_coords[1]]
    # timeEnd("nzvals")

    # timeStart("index coords")
    # segment_coordinates = [ coords[nzvals == k] for k in range(1, num_segments + 1) ]
    # timeEnd("index coords")

    timeEnd("get segment coordinates")

    segments = {}
    timeStart("create segment objects")
    for (segment_idx, pixel_coords) in enumerate(segment_coordinates):
        segment_id = segment_idx + 1
        pixel_coords = np.array(pixel_coords)
        values = get_image_values(img_gray, pixel_coords)
        ridge_line = get_ridge_line(ridges_h, ridges_v, pixel_coords)

        new_segment = segment(coords=pixel_coords,
                              values=np.array(values),
                              id=segment_id,
                              ridge_line=ridge_line)

        if (new_segment.has_center_line):
            center_line_values = get_image_values(
                img_gray, new_segment.center_line.coords)
            new_segment.add_center_line_values(center_line_values)

        segments[segment_idx] = new_segment
    timeEnd("create segment objects")

    return segments
コード例 #16
0
def get_segments(img_gray,
                 img_bin,
                 img_skel,
                 dist,
                 img_intersections,
                 ridges_h,
                 ridges_v,
                 figure=False):
    timeStart("canny edge detection")
    image_canny = canny(img_gray)
    timeEnd("canny edge detection")

    Debug.save_image("segments", "edges", image_canny)

    # Strange: just noticed that none of the below had
    # ever been getting used for as long as this file
    # has existed.

    # timeStart("fill canny corners")
    # filled_corners_canny = fill_corners(image_canny)
    # timeEnd("fill canny corners")

    # Debug.save_image("segments", "edges_with_corners", filled_corners_canny)

    # timeStart("subtract canny corners from image")
    # img_bin = img_bin & (~ filled_corners_canny)
    # timeEnd("subtract canny corners from image")

    # Debug.save_image("segments", "binary_image_minus_edges", img_bin)

    timeStart("sobel filter")
    image_sobel = sobel(img_gray)
    timeEnd("sobel filter")

    Debug.save_image("segments", "slopes", image_sobel)

    timeStart("otsu threshold")
    steep_slopes = image_sobel > threshold_otsu(image_sobel)
    timeEnd("otsu threshold")

    Debug.save_image("segments", "steep_slopes", steep_slopes)

    timeStart("binary erosion")
    steep_slopes = binary_erosion(steep_slopes, square(3, dtype=bool))
    timeEnd("binary erosion")

    Debug.save_image("segments", "eroded_steep_slopes", steep_slopes)

    timeStart("subtract regions from skeleton")
    segments_bin = (img_skel & (~img_intersections) & (~image_canny) &
                    (~steep_slopes))
    timeEnd("subtract regions from skeleton")

    Debug.save_image(
        "segments",
        "skeleton_minus_intersections_minus_edges_minus_steep_slopes",
        segments_bin)

    timeStart("reverse medial axis")
    # regrow segment regions from segment skeletons
    rmat = reverse_medial_axis(segments_bin, dist)
    timeEnd("reverse medial axis")

    Debug.save_image("segments", "reverse_medial_axis", rmat)

    # maybe, instead of running medial_axis again, do nearest-neighbor interp
    timeStart("get distance transform")
    # the pixels values of rmat_dist correspond to the distance
    # between each pixel and its nearest foreground/background boundary
    _, rmat_dist = medial_axis(rmat, return_distance=True)
    timeEnd("get distance transform")

    Debug.save_image("segments", "distance_transform", rmat_dist)

    timeStart("label segment skeletons")
    image_segments, num_segments = label(segments_bin, np.ones((3, 3)))
    timeEnd("label segment skeletons")

    Debug.save_image("segments", "labeled_skeleton", image_segments)

    timeStart("watershed")
    image_segments = watershed(-rmat_dist, image_segments, mask=rmat)
    timeEnd("watershed")

    Debug.save_image("segments", "watershed", image_segments)

    # subtract 1 for the background segment
    num_traces = num_segments - 1
    print "found %s segments" % num_traces
    Record.record("num_segments", num_traces)

    segments = img_seg_to_seg_objects(image_segments, num_segments, ridges_h,
                                      ridges_v, img_gray)

    if Debug.active:
        from lib.segment_coloring import gray2prism
        # try to assign different gray values to neighboring segments
        traces_colored = (image_segments + num_traces *
                          (image_segments % 4)) / float(4 * num_traces)
        # store a background mask
        background = traces_colored == 0
        background = np.dstack((background, background, background))
        # convert gray values to colors
        traces_colored = gray2prism(traces_colored)
        # make background pixels black
        traces_colored[background] = 0
        Debug.save_image("segments", "segment_regions", traces_colored)

    if Record.active:
        timeStart("calculate histograms of segment sizes")
        # last bin includes both left and right boundaries, so add 1 to right-most bin
        segment_bins = np.arange(num_segments + 1)
        segment_sizes, _ = np.histogram(image_segments.flatten(),
                                        bins=segment_bins)

        # trim off the "background" segment
        segment_sizes = segment_sizes[1:]

        max_segment_size = np.amax(segment_sizes)
        # there are no size 0 segments
        segment_size_bins = np.arange(1, max_segment_size + 1)
        hist_of_sizes, _ = np.histogram(segment_sizes, bins=segment_size_bins)

        Record.record("segment_region_hist", hist_of_sizes.tolist())
        timeEnd("calculate histograms of segment sizes")

        timeStart("calculate histogram of centerlines")
        centerlines = [
            seg.center_line for seg in segments.itervalues()
            if seg.has_center_line
        ]
        Record.record("num_segments_with_centerlines", len(centerlines))

        centerline_lengths = map(lambda line: len(line.coords), centerlines)

        max_centerline_length = np.amax(centerline_lengths)
        # there are no size 0 centerlines
        centerline_bins = np.arange(1, max_centerline_length + 1)
        hist_of_centerlines, _ = np.histogram(centerline_lengths,
                                              bins=centerline_bins)
        Record.record("segment_centerline_hist", hist_of_centerlines.tolist())
        timeEnd("calculate histogram of centerlines")

    if figure == False:
        return segments
    else:
        return (segments, image_segments)
コード例 #17
0
def analyze_image(in_file,
                  out_dir,
                  stats_file=False,
                  scale=1,
                  debug_dir=False,
                  fix_seed=False):
    from lib.dir import ensure_dir_exists
    from lib.debug import Debug
    from lib.stats_recorder import Record

    if debug_dir:
        Debug.set_directory(debug_dir)

    if fix_seed:
        Debug.set_seed(1234567890)

    if stats_file:
        Record.activate()

    ensure_dir_exists(out_dir)

    from lib.timer import timeStart, timeEnd

    from lib.load_image import get_grayscale_image, image_as_float
    from lib.roi_detection import get_roi, corners_to_geojson
    from lib.polygon_mask import mask_image
    from lib.meanline_detection import detect_meanlines, meanlines_to_geojson
    from lib.geojson_io import save_features

    paths = {
        "roi": out_dir + "/roi.json",
        "meanlines": out_dir + "/meanlines.json",
        "intersections": out_dir + "/intersections.json",
        "segments": out_dir + "/segments.json",
        "segment_assignments": out_dir + "/segment_assignments.json"
    }

    timeStart("get roi and meanlines")

    timeStart("read image")
    img_gray = image_as_float(get_grayscale_image(in_file))
    timeEnd("read image")

    print "\n--ROI--"
    timeStart("get region of interest")
    corners = get_roi(img_gray, scale=scale)
    timeEnd("get region of interest")

    timeStart("convert roi to geojson")
    corners_as_geojson = corners_to_geojson(corners)
    timeEnd("convert roi to geojson")

    timeStart("saving roi as geojson")
    save_features(corners_as_geojson, paths["roi"])
    timeEnd("saving roi as geojson")

    print "\n--MASK IMAGE--"
    roi_polygon = corners_as_geojson["geometry"]["coordinates"][0]

    timeStart("mask image")
    masked_image = mask_image(img_gray, roi_polygon)
    timeEnd("mask image")

    Debug.save_image("main", "masked_image", masked_image.filled(0))

    print "\n--MEANLINES--"
    meanlines = detect_meanlines(masked_image, corners, scale=scale)

    timeStart("convert meanlines to geojson")
    meanlines_as_geojson = meanlines_to_geojson(meanlines)
    timeEnd("convert meanlines to geojson")

    timeStart("saving meanlines as geojson")
    save_features(meanlines_as_geojson, paths["meanlines"])
    timeEnd("saving meanlines as geojson")

    timeEnd("get roi and meanlines")

    if (stats_file):
        Record.export_as_json(stats_file)
コード例 #18
0
def threshold(img,
              threshold_function,
              num_blocks,
              block_dims=None,
              smoothing=0.003):
    '''
  Get a smoothly varying threshold from an image by applying the threshold
  function to multiple randomly positioned blocks of the image and using
  a 2-D smoothing spline to set the threshold across the image.

  Parameters
  ------------
  img : 2-D numpy array
    The grayscale image.
  threshold_function : a function
    The threshold function should take a grayscale image as an input and
    output an int or float.
  num_blocks : int
    The number of blocks within the image to which to apply the threshold
    function. A higher number will provide better coverage across the
    image but will take longer to evaluate.
  block_dims : tuple or numpy array, optional
    The dimensions of the rectangular blocks. Dimensions should be less
    than the dimensions of the image. If left unspecified, the blocks will
    be squares with area approximately equal to two times the area of the
    image, divided by num_blocks.
  smoothing : float, optional
    A parameter to adjust the smoothness of the 2-D smoothing spline. A
    higher number increases the smoothness of the output. An input of zero
    is equivalent to interpolation.

  Returns
  ---------
  th_new : 2-D numpy array
    The threshold. The array is the same shape as the original input image.
  '''
    img_dims = img.shape
    if num_blocks >= 16:
        spline_order = 3
    else:
        spline_order = int(np.sqrt(num_blocks) - 1)
    if spline_order == 0:
        return (np.ones_like(img) * threshold_function(img))

    if (type(img) is MaskedArray):
        mask = img.mask
    else:
        mask = np.zeros(img_dims, dtype=bool)

    candidate_coords = np.transpose(np.nonzero(~mask))

    if block_dims is None:
        block_dim = int(round(np.sqrt(2 * img.size / num_blocks)))
        block_dims = (block_dim, block_dim)

    timeStart("select block centers")
    points = best_candidate_sample(candidate_coords, num_blocks)
    timeEnd("select block centers")

    def get_threshold_for_block(center):
        block = get_block(img, center, block_dims)
        if (type(block) is MaskedArray):
            return threshold_function(block.compressed())
        else:
            return threshold_function(block)

    timeStart("calculate thresholds for blocks of size %s" % block_dim)
    thresholds = np.asarray(
        [get_threshold_for_block(center) for center in points])
    timeEnd("calculate thresholds for blocks of size %s" % block_dim)

    timeStart("fit 2-D spline")
    # Maybe consider using lower-order spline for large images
    # (if large indices create problems for cubic functions)
    fit = spline2d(points[:, 0],
                   points[:, 1],
                   thresholds,
                   bbox=[0, img_dims[0], 0, img_dims[1]],
                   kx=spline_order,
                   ky=spline_order,
                   s=num_blocks * smoothing)
    th_new = fit(x=np.arange(img_dims[0]), y=np.arange(img_dims[1]))
    th_new = fix_border(th_new, points)
    timeEnd("fit 2-D spline")
    return th_new