Esempio n. 1
0
def segment_skeleton(skel_img, mask=None):
    """ Segment a skeleton image into pieces

        Inputs:
        skel_img      = Skeletonized image
        mask          = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

        Returns:
        segmented_img = Segmented debugging image
        objects       = list of contours
        hierarchy     = contour hierarchy list

        :param skel_img: numpy.ndarray
        :param mask: numpy.ndarray
        :return segmented_img: numpy.ndarray
        :return segment_objects: list
        "return segment_hierarchies: numpy.ndarray
        """

    # Store debug
    debug = params.debug
    params.debug = None

    # Find branch points
    bp = find_branch_pts(skel_img)
    bp = dilate(bp, 3, 1)

    # Subtract from the skeleton so that leaves are no longer connected
    segments = image_subtract(skel_img, bp)

    # Gather contours of leaves
    segment_objects, _ = find_objects(segments, segments)

    # Color each segment a different color
    rand_color = color_palette(len(segment_objects))

    if mask is None:
        segmented_img = skel_img.copy()
    else:
        segmented_img = mask.copy()

    segmented_img = cv2.cvtColor(segmented_img, cv2.COLOR_GRAY2RGB)
    for i, cnt in enumerate(segment_objects):
        cv2.drawContours(segmented_img, segment_objects, i, rand_color[i], params.line_thickness, lineType=8)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(segmented_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented.png'))
    elif params.debug == 'plot':
        plot_image(segmented_img)

    return segmented_img, segment_objects
Esempio n. 2
0
def main():
    # Get options
    args = options()

    # Set variables
    pcv.params.debug = args.debug  # Replace the hard-coded debug with the debug flag
    pcv.params.debug_outdir = args.outdir  # set output directory

    ### Main pipeline ###

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    img, path, filename = pcv.readimage(args.image, mode='rgb')

    # Read reference image for colour correction (currently unused)
    #ref_img, ref_path, ref_filename = pcv.readimage(
    #    "/home/leonard/Dropbox/2020-01_LAC_phenotyping/images/top/renamed/20200128_2.jpg",
    #    mode="rgb")

    # Find colour cards
    #df, start, space = pcv.transform.find_color_card(rgb_img=ref_img)
    #ref_mask = pcv.transform.create_color_card_mask(rgb_img=ref_img, radius=10, start_coord=start, spacing=space, ncols=4, nrows=6)

    df, start, space = pcv.transform.find_color_card(rgb_img=img)
    img_mask = pcv.transform.create_color_card_mask(rgb_img=img,
                                                    radius=10,
                                                    start_coord=start,
                                                    spacing=space,
                                                    ncols=4,
                                                    nrows=6)

    output_directory = "."

    # Correct colour (currently unused)
    #target_matrix, source_matrix, transformation_matrix, corrected_img = pcv.transform.correct_color(ref_img, ref_mask, img, img_mask, output_directory)

    # Check that the colour correction worked (source~target should be strictly linear)
    #pcv.transform.quick_color_check(source_matrix = source_matrix, target_matrix = target_matrix, num_chips = 24)

    # Write the spacing of the colour card to file as size marker
    with open(os.path.join(path, 'output/size_marker_trays.csv'), 'a') as f:
        writer = csv.writer(f)
        writer.writerow([filename, space[0]])

    ### Crop tray ###

    # Define a bounding rectangle around the colour card
    x_cc, y_cc, w_cc, h_cc = cv2.boundingRect(img_mask)
    x_cc = int(round(x_cc - 0.3 * w_cc))
    y_cc = int(round(y_cc - 0.3 * h_cc))
    h_cc = int(round(h_cc * 1.6))
    w_cc = int(round(w_cc * 1.6))

    # Crop out colour card
    start_point = (x_cc, y_cc)
    end_point = (x_cc + w_cc, y_cc + h_cc)
    colour = (0, 0, 0)
    thickness = -1
    card_crop_img = cv2.rectangle(img, start_point, end_point, colour,
                                  thickness)

    # Convert RGB to HSV and extract the value channel
    v = pcv.rgb2gray_hsv(card_crop_img, "v")

    # Threshold the value image
    v_thresh = pcv.threshold.binary(
        v, 100, 255, "light"
    )  # start threshold at 150 with bright corner-markers, 100 without

    # Fill out bright imperfections (siliques and other dirt on the background)
    v_thresh = pcv.fill(
        v_thresh, 100)  # fill at 500 with bright corner-markers, 100 without

    # Create bounding rectangle around the tray
    x, y, w, h = cv2.boundingRect(v_thresh)

    # Crop image to tray
    #crop_img = card_crop_img[y:y+h, x:x+int(w - (w * 0.03))] # crop extra 3% from right because of tray labels
    crop_img = card_crop_img[y:y + h, x:x + w]  # crop symmetrically

    # Save cropped image for quality control
    pcv.print_image(crop_img,
                    filename=path + "/output/" + "cropped" + filename + ".png")

    ### Threshold plants ###

    # Threshold the green-magenta, blue, and hue channels
    a_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[0, 0, 0],
                                             upper_thresh=[255, 108, 255],
                                             channel='LAB')
    b_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[0, 0, 135],
                                             upper_thresh=[255, 255, 255],
                                             channel='LAB')
    h_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[35, 0, 0],
                                             upper_thresh=[70, 255, 255],
                                             channel='HSV')

    # Join the thresholds (AND)
    ab = pcv.logical_and(b_thresh, a_thresh)
    abh = pcv.logical_and(ab, h_thresh)

    # Fill small objects depending on expected plant size based on DPG (make sure to take the correct file suffix jpg/JPG/jpeg...)
    match = re.search("(\d+).(\d)\.jpg$", filename)

    if int(match.group(1)) < 10:
        abh_clean = pcv.fill(abh, 50)
        print("50")
    elif int(match.group(1)) < 15:
        abh_clean = pcv.fill(abh, 200)
        print("200")
    else:
        abh_clean = pcv.fill(abh, 500)
        print("500")

    # Dilate to close broken borders
    abh_dilated = pcv.dilate(abh_clean, 3, 1)

    # Close holes
    # abh_fill = pcv.fill_holes(abh_dilated) # silly -- removed
    abh_fill = abh_dilated

    # Apply mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(crop_img, abh_fill, "white")

    # Save masked image for quality control
    pcv.print_image(masked,
                    filename=path + "/output/" + "masked" + filename + ".png")

    ### Filter and group contours ###

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(crop_img, abh_fill)

    # Create bounding box with margins to avoid border artifacts
    roi_y = 0 + crop_img.shape[0] * 0.05
    roi_x = 0 + crop_img.shape[0] * 0.05
    roi_h = crop_img.shape[0] - (crop_img.shape[0] * 0.1)
    roi_w = crop_img.shape[1] - (crop_img.shape[0] * 0.1)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(crop_img, roi_y, roi_x,
                                                   roi_h, roi_w)

    # Keep all objects in the bounding box
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=crop_img,
        roi_type='partial',
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy)

    # Cluster the objects by plant
    clusters, contours, hierarchies = pcv.cluster_contours(
        crop_img, roi_objects, roi_obj_hierarchy, 3, 5)

    # Split image into single plants
    out = args.outdir
    #output_path, imgs, masks = pcv.cluster_contour_splitimg(crop_img,
    #                                                        clusters,
    #                                                        contours,
    #                                                        hierarchies,
    #                                                        out,
    #                                                        file = filename)

    ### Analysis ###

    # Approximate the position of the top left plant as grid start
    coord_y = int(
        round(((crop_img.shape[0] / 3) * 0.5) + (crop_img.shape[0] * 0.025)))
    coord_x = int(
        round(((crop_img.shape[1] / 5) * 0.5) + (crop_img.shape[1] * 0.025)))

    # Set the ROI spacing relative to image dimensions
    spc_y = int((round(crop_img.shape[0] - (crop_img.shape[0] * 0.05)) / 3))
    spc_x = int((round(crop_img.shape[1] - (crop_img.shape[1] * 0.05)) / 5))

    # Set the ROI radius relative to image width
    if int(match.group(1)) < 16:
        r = int(round(crop_img.shape[1] / 12.5))
    else:
        r = int(round(crop_img.shape[1] / 20))

    # Make a grid of ROIs at the expected positions of plants
    # This allows for gaps due to dead/not germinated plants, without messing up the plant numbering
    imgs, masks = pcv.roi.multi(img=crop_img,
                                nrows=3,
                                ncols=5,
                                coord=(coord_x, coord_y),
                                radius=r,
                                spacing=(spc_x, spc_y))

    # Loop through the ROIs in the grid
    for i in range(0, len(imgs)):
        # Find objects within the ROI
        filtered_contours, filtered_hierarchy, filtered_mask, filtered_area = pcv.roi_objects(
            img=crop_img,
            roi_type="partial",
            roi_contour=imgs[i],
            roi_hierarchy=masks[i],
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy)
        # Continue only if not empty
        if len(filtered_contours) > 0:
            # Combine objects within each ROI
            plant_contour, plant_mask = pcv.object_composition(
                img=crop_img,
                contours=filtered_contours,
                hierarchy=filtered_hierarchy)

            # Analyse the shape of each plant
            analysis_images = pcv.analyze_object(img=crop_img,
                                                 obj=plant_contour,
                                                 mask=plant_mask)

            pcv.print_image(analysis_images,
                            filename=path + "/output/" + filename + "_" +
                            str(i) + "_analysed.png")

            # Determine color properties
            color_images = pcv.analyze_color(crop_img, plant_mask, "hsv")

            # Watershed plant area to count leaves (computationally intensive, use when needed)
            #watershed_images = pcv.watershed_segmentation(crop_img, plant_mask, 15)

            # Print out a .json file with the analysis data for the plant
            pcv.outputs.save_results(filename=path + "/" + filename + "_" +
                                     str(i) + '.json')

            # Clear the measurements stored globally into the Ouptuts class
            pcv.outputs.clear()
Esempio n. 3
0
def canny_edge_detect(img, mask=None, sigma=1.0, low_thresh=None, high_thresh=None, thickness=1,
                      mask_color=None, use_quantiles=False):
    """Edge filter an image using the Canny algorithm.

    Inputs:
    img           = RGB or grayscale image data
    mask          = Mask to limit the application of Canny to a certain area, takes a binary img. (OPTIONAL)
    sigma         = Standard deviation of the Gaussian filter
    low_thresh    = Lower bound for hysteresis thresholding (linking edges). If None (default) then low_thresh is set to
                    10% of the image's max (OPTIONAL)
    high_thresh   = Upper bound for hysteresis thresholding (linking edges). If None (default) then high_thresh is set
                    to 20% of the image's max (OPTIONAL)
    thickness     = How thick the edges should appear, default thickness=1 (OPTIONAL)
    mask_color    = Color of the mask provided; either None (default), 'white', or 'black'
    use_quantiles = Default is False, if True then treat low_thresh and high_thresh as quantiles of the edge magnitude
                    image, rather than the absolute edge magnitude values. If True then thresholds range is [0,1].
                    (OPTIONAL)

    Returns:
    bin_img      = Thresholded, binary image

    :param img: numpy.ndarray
    :param mask: numpy.ndarray
    :param sigma = float
    :param low_thresh: float
    :param high_thresh: float
    :param thickness: int
    :param mask_color: str
    :param use_quantiles: bool
    :return bin_img: numpy.ndarray

    Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986
    Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
    Website: http://www.cellprofiler.org
    Copyright (c) 2003-2009 Massachusetts Institute of Technology
    Copyright (c) 2009-2011 Broad Institute
    All rights reserved.
    Original author: Lee Kamentsky
    """

    params.device += 1

    # Check if the image is grayscale; if color img then make it grayscale
    dimensions = np.shape(img)
    if len(dimensions) == 3:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # skimage needs a bool mask
    if mask is not None:
        if mask_color.upper() == 'WHITE':
            mask = np.array(mask, bool)
        elif mask_color.upper() == 'BLACK':
            mask = cv2.bitwise_not(mask)
            mask = np.array(mask, bool)
        else:
            fatal_error('Mask was provided but mask_color ' + str(mask_color) + ' is not "white" or "black"!')

    # Run Canny edge detection on the grayscale image
    bool_img = feature.canny(img, sigma, low_thresh, high_thresh, mask, use_quantiles)

    # Skimage returns a bool image so convert it
    bin_img = np.copy(bool_img.astype(np.uint8) * 255)

    # Adjust line thickness
    if thickness != 1:
        debug = params.debug
        params.debug = None
        bin_img = dilate(bin_img, thickness, 1)
        params.debug = debug

    # Print or plot the binary image
    if params.debug == 'print':
        print_image(bin_img, os.path.join(params.debug_outdir, (str(params.device) + '_canny_edge_detect.png')))
    elif params.debug == 'plot':
        plot_image(bin_img, cmap='gray')

    return bin_img
def check_cycles(skel_img):
    """ Check for cycles in a skeleton image
    Inputs:
    skel_img     = Skeletonized image

    Returns:
    cycle_img    = Image with cycles identified

    :param skel_img: numpy.ndarray
    :return cycle_img: numpy.ndarray
    """

    # Create the mask needed for cv2.floodFill, must be larger than the image
    h, w = skel_img.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)

    # Copy the skeleton since cv2.floodFill will draw on it
    skel_copy = skel_img.copy()
    cv2.floodFill(skel_copy, mask=mask, seedPoint=(0, 0), newVal=255)

    # Invert so the holes are white and background black
    just_cycles = cv2.bitwise_not(skel_copy)

    # Store debug
    debug = params.debug
    params.debug = None

    # Erode slightly so that cv2.findContours doesn't think diagonal pixels are separate contours
    just_cycles = erode(just_cycles, 2, 1)

    # Use pcv.find_objects to turn plots of holes into countable contours
    cycle_objects, cycle_hierarchies = find_objects(just_cycles, just_cycles)

    # Count the number of holes
    num_cycles = len(cycle_objects)

    # Make debugging image
    cycle_img = skel_img.copy()
    cycle_img = dilate(cycle_img, params.line_thickness, 1)
    cycle_img = cv2.cvtColor(cycle_img, cv2.COLOR_GRAY2RGB)
    if num_cycles > 0:
        # Get a new color scale
        rand_color = color_palette(num=num_cycles, saved=False)
        for i, cnt in enumerate(cycle_objects):
            cv2.drawContours(cycle_img,
                             cycle_objects,
                             i,
                             rand_color[i],
                             params.line_thickness,
                             lineType=8,
                             hierarchy=cycle_hierarchies)

    # Store Cycle Data
    outputs.add_observation(variable='num_cycles',
                            trait='number of cycles',
                            method='plantcv.plantcv.morphology.check_cycles',
                            scale='none',
                            datatype=int,
                            value=int(num_cycles),
                            label='none')

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            cycle_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_cycles.png'))
    elif params.debug == 'plot':
        plot_image(cycle_img)

    return cycle_img
def segment_insertion_angle(skel_img, segmented_img, leaf_objects,
                            stem_objects, size):
    """ Find leaf insertion angles in degrees of skeleton segments. Fit a linear regression line to the stem.
        Use `size` pixels on  the portion of leaf next to the stem find a linear regression line,
        and calculate angle between the two lines per leaf object.

        Inputs:
        skel_img         = Skeletonized image
        segmented_img    = Segmented image to plot slope lines and intersection angles on
        leaf_objects     = List of leaf segments
        stem_objects     = List of stem segments
        size             = Size of inner leaf used to calculate slope lines

        Returns:
        labeled_img      = Debugging image with angles labeled

        :param skel_img: numpy.ndarray
        :param segmented_img: numpy.ndarray
        :param leaf_objects: list
        :param stem_objects: list
        :param size: int
        :return labeled_img: numpy.ndarray
        """

    # Store debug
    debug = params.debug
    params.debug = None

    rows, cols = segmented_img.shape[:2]
    labeled_img = segmented_img.copy()
    segment_slopes = []
    insertion_segments = []
    insertion_hierarchies = []
    intersection_angles = []
    label_coord_x = []
    label_coord_y = []
    valid_segment = []

    # Create a list of tip tuples to use for sorting
    tips = find_tips(skel_img)
    tip_objects, tip_hierarchies = find_objects(tips, tips)
    tip_tuples = []
    for i, cnt in enumerate(tip_objects):
        tip_tuples.append((cnt[0][0][0], cnt[0][0][1]))

    rand_color = color_palette(len(leaf_objects))

    for i, cnt in enumerate(leaf_objects):
        # Draw leaf objects
        find_segment_tangents = np.zeros(segmented_img.shape[:2], np.uint8)
        cv2.drawContours(find_segment_tangents,
                         leaf_objects,
                         i,
                         255,
                         1,
                         lineType=8)

        # Prune back ends of leaves
        pruned_segment = _iterative_prune(find_segment_tangents, size)

        # Segment ends are the portions pruned off
        segment_ends = find_segment_tangents - pruned_segment
        segment_end_obj, segment_end_hierarchy = find_objects(
            segment_ends, segment_ends)
        is_insertion_segment = []

        if not len(segment_end_obj) == 2:
            print("Size too large, contour with ID#", i,
                  "got pruned away completely.")
        else:
            # The contour can have insertion angle calculated
            valid_segment.append(cnt)

            # Determine if a segment is leaf end or leaf insertion segment
            for j, obj in enumerate(segment_end_obj):

                segment_plot = np.zeros(segmented_img.shape[:2], np.uint8)
                cv2.drawContours(segment_plot, obj, -1, 255, 1, lineType=8)
                overlap_img = logical_and(segment_plot, tips)

                # If none of the tips are within a segment_end then it's an insertion segment
                if np.sum(overlap_img) == 0:
                    insertion_segments.append(segment_end_obj[j])
                    insertion_hierarchies.append(segment_end_hierarchy[0][j])

            # Store coordinates for labels
            label_coord_x.append(leaf_objects[i][0][0][0])
            label_coord_y.append(leaf_objects[i][0][0][1])

    rand_color = color_palette(len(valid_segment))

    for i, cnt in enumerate(valid_segment):
        cv2.drawContours(labeled_img,
                         valid_segment,
                         i,
                         rand_color[i],
                         params.line_thickness,
                         lineType=8)

    # Plot stem segments
    stem_img = np.zeros(segmented_img.shape[:2], np.uint8)
    cv2.drawContours(stem_img, stem_objects, -1, 255, 2, lineType=8)
    branch_pts = find_branch_pts(skel_img)
    stem_img = stem_img + branch_pts
    stem_img = closing(stem_img)
    combined_stem, combined_stem_hier = find_objects(stem_img, stem_img)

    # Make sure stem objects are a single contour
    loop_count = 0
    while len(combined_stem) > 1 and loop_count < 50:
        loop_count += 1
        stem_img = dilate(stem_img, 2, 1)
        stem_img = closing(stem_img)
        combined_stem, combined_stem_hier = find_objects(stem_img, stem_img)

    if loop_count == 50:
        fatal_error('Unable to combine stem objects.')

    # Find slope of the stem
    [vx, vy, x, y] = cv2.fitLine(combined_stem[0], cv2.DIST_L2, 0, 0.01, 0.01)
    stem_slope = -vy / vx
    stem_slope = stem_slope[0]
    lefty = int((-x * vy / vx) + y)
    righty = int(((cols - x) * vy / vx) + y)
    cv2.line(labeled_img, (cols - 1, righty), (0, lefty), (150, 150, 150), 3)

    for t, segment in enumerate(insertion_segments):
        # Find line fit to each segment
        [vx, vy, x, y] = cv2.fitLine(segment, cv2.DIST_L2, 0, 0.01, 0.01)
        slope = -vy / vx
        left_list = int((-x * vy / vx) + y)
        right_list = int(((cols - x) * vy / vx) + y)
        segment_slopes.append(slope[0])

        # Draw slope lines if possible
        if slope > 1000000 or slope < -1000000:
            print("Slope of contour with ID#", t, "is", slope,
                  "and cannot be plotted.")
        else:
            cv2.line(labeled_img, (cols - 1, right_list), (0, left_list),
                     rand_color[t], 1)

        # Store intersection angles between insertion segment and stem line
        intersection_angle = _slope_to_intesect_angle(slope[0], stem_slope)
        # Function measures clockwise but we want the acute angle between stem and leaf insertion
        if intersection_angle > 90:
            intersection_angle = 180 - intersection_angle
        intersection_angles.append(intersection_angle)

    segment_ids = []

    for i, cnt in enumerate(insertion_segments):
        # Label slope lines
        w = label_coord_x[i]
        h = label_coord_y[i]
        text = "{:.2f}".format(intersection_angles[i])
        cv2.putText(img=labeled_img,
                    text=text,
                    org=(w, h),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size,
                    color=(150, 150, 150),
                    thickness=params.text_thickness)
        segment_label = "ID" + str(i)
        segment_ids.append(i)

    outputs.add_observation(
        variable='segment_insertion_angle',
        trait='segment insertion angle',
        method='plantcv.plantcv.morphology.segment_insertion_angle',
        scale='degrees',
        datatype=list,
        value=intersection_angles,
        label=segment_ids)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            labeled_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_segment_insertion_angles.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
                                  threshold=120,
                                  max_value=255,
                                  object_type='dark')

# Inputs:
#    bin_img  = binary image. img will be returned after filling
#    size     = minimum object area size in pixels (integer)
fill_image = pcv.fill(bin_img=img_binary, size=10)

# Dilate so that you don't lose leaves (just in case)

# Inputs:
#    gray_img = input image
#    ksize    = integer, kernel size
#    i        = iterations, i.e. number of consecutive filtering passes
dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)

# Inputs:
#    img  = image that the objects will be overlayed
#    mask = what is used for object detection
id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)

# Define region of interest (ROI)

# Inputs:
#    img   = An RGB or grayscale image to plot the ROI on.
#    x     = The x-coordinate of the upper left corner of the rectangle.
#    y     = The y-coordinate of the upper left corner of the rectangle.
#    h     = The width of the rectangle.
#    w     = The height of the rectangle.
#   roi_contour, roi_hierarchy = pcv.roi.rectangle(5, 90, 200, 390, img1)
Esempio n. 7
0
def main():
    #Import file gambar
    path = 'Image test\capture (2).jpg'
    imgraw, path, img_filename = pcv.readimage(path, mode='native')

    nilaiTerang = np.average(imgraw)

    if nilaiTerang < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    rotateimg = pcv.rotate(imgraw, -3, True)
    imgraw = rotateimg

    bersih1 = pcv.white_balance(imgraw)

    hitamputih = pcv.rgb2gray_lab(bersih1, channel='a')

    img_binary = pcv.threshold.binary(hitamputih,
                                      threshold=110,
                                      max_value=255,
                                      object_type='dark')

    fill_image = pcv.fill(bin_img=img_binary, size=10)
    dilated = pcv.dilate(gray_img=fill_image, ksize=6, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=imgraw, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=imgraw,
                                                   x=280,
                                                   y=96,
                                                   h=1104,
                                                   w=1246)
    print(type(roi_contour))
    print(type(roi_hierarchy))
    print(roi_hierarchy)
    print(roi_contour)
    roicontour = cv2.drawContours(imgraw, roi_contour, -1, (0, 0, 255), 3)
    #cv2.rectangle(imgraw, roi_contour[0], roi_contour[3])

    roi_obj, hier, kept_mask, obj_area = pcv.roi_objects(
        img=imgraw,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    cnt_i, contours, hierarchies = pcv.cluster_contours(img=imgraw,
                                                        roi_objects=roi_obj,
                                                        roi_obj_hierarchy=hier,
                                                        nrow=4,
                                                        ncol=3)
    clustered_image = pcv.visualize.clustered_contours(
        img=imgraw,
        grouped_contour_indices=cnt_i,
        roi_objects=roi_obj,
        roi_obj_hierarchy=hier)
    obj, mask = pcv.object_composition(imgraw, roi_obj, hier)
    hasil = pcv.analyze_object(imgraw, obj, mask)
    pcv.print_image(imgraw, 'Image test\Result\wel.jpg')
    pcv.print_image(clustered_image, 'Image test\Result\clustred.jpg')
    pcv.print_image(hitamputih, 'Image test\Result\Bersihe.jpg')
    pcv.print_image(dilated, 'Image test\Result\dilated.jpg')
    pcv.print_image(hasil, 'Image test\Result\hasil.jpg')
    plantHasil = pcv.outputs.observations['area']
    data1 = pcv.outputs.observations['area']['value']
    print(data1)
    print(plantHasil)
Esempio n. 8
0
def find_branch_pts(skel_img, mask=None):
    """
    The branching algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
    Inputs:
    skel_img    = Skeletonized image
    mask        = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

    Returns:
    branch_pts_img = Image with just branch points, rest 0

    :param skel_img: numpy.ndarray
    :return branch_pts_img: numpy.ndarray
    """

    ### In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to don't care ###
    # T like branch points
    t1 = np.array([[-1, 1, -1], [1, 1, 1], [-1, -1, -1]])
    t2 = np.array([[1, -1, 1], [-1, 1, -1], [1, -1, -1]])
    t3 = np.rot90(t1)
    t4 = np.rot90(t2)
    t5 = np.rot90(t3)
    t6 = np.rot90(t4)
    t7 = np.rot90(t5)
    t8 = np.rot90(t6)

    # Y like branch points
    y1 = np.array([[1, -1, 1], [0, 1, 0], [0, 1, 0]])
    y2 = np.array([[-1, 1, -1], [1, 1, 0], [-1, 0, 1]])
    y3 = np.rot90(y1)
    y4 = np.rot90(y2)
    y5 = np.rot90(y3)
    y6 = np.rot90(y4)
    y7 = np.rot90(y5)
    y8 = np.rot90(y6)
    kernels = [t1, t2, t3, t4, t5, t6, t7, t8, y1, y2, y3, y4, y5, y6, y7, y8]

    branch_pts_img = np.zeros(skel_img.shape[:2], dtype=int)

    # Store branch points
    for kernel in kernels:
        branch_pts_img = np.logical_or(
            cv2.morphologyEx(skel_img,
                             op=cv2.MORPH_HITMISS,
                             kernel=kernel,
                             borderType=cv2.BORDER_CONSTANT,
                             borderValue=0), branch_pts_img)

    # Switch type to uint8 rather than bool
    branch_pts_img = branch_pts_img.astype(np.uint8) * 255

    # Store debug
    debug = params.debug
    params.debug = None

    # Make debugging image
    if mask is None:
        dilated_skel = dilate(skel_img, params.line_thickness, 1)
        branch_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)
    else:
        # Make debugging image on mask
        mask_copy = mask.copy()
        branch_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
        skel_obj, skel_hier = find_objects(skel_img, skel_img)
        cv2.drawContours(branch_plot,
                         skel_obj,
                         -1, (150, 150, 150),
                         params.line_thickness,
                         lineType=8,
                         hierarchy=skel_hier)

    branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)
    for i in branch_objects:
        x, y = i.ravel()[:2]
        cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255),
                   -1)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            branch_plot,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_branch_pts.png'))
    elif params.debug == 'plot':
        plot_image(branch_plot)

    return branch_pts_img
Esempio n. 9
0
def join_segments_helper(L, Lines, combinedLines, segments2Join, max_scale,
                         pixelList):
    mask = Lines
    length = len(segments2Join)
    LabelNum = False
    minX = np.inf
    minY = np.inf
    maxX = 0
    maxY = 0
    for i in range(len(segments2Join)):
        X = pixelList[segments2Join[i]].coords
        minX = min(np.amin(X[:, 0]), minX)
        minY = min(np.amin(X[:, 1]), minY)
        maxX = max(np.amax(X[:, 0]), maxX)
        maxY = max(np.amax(X[:, 1]), maxY)
    # tested
    CroppedLines = Lines[minY:maxY + 1, minX:maxX + 1]

    for i in range(length - 1):
        bw1 = CroppedLines == pixelList[segments2Join[i]].label
        bw2 = CroppedLines == pixelList[segments2Join[i + 1]].label
        # TODO talk to baret, no quasi-euclidean transformation avaliable https://www.mathworks.com/help/images/ref/bwdist.html
        D1 = ndimage.distance_transform_edt(bw1 == 0)
        D2 = ndimage.distance_transform_edt(bw2 == 0)
        D = np.round((D1 + D2) * 32) / 32
        paths = matlabic_minima(D)
        paths = skeletonize(paths)

        tempMask = dilate(paths, ksize=10, i=1)

        mask = np.full(L.shape, False)

        mask[minY:maxY + 1, minX:maxX + 1] = tempMask
        AdjacentIndices = np.unique(combinedLines[mask])

        rows_to_remove = np.argwhere(AdjacentIndices == 0)
        mask_for_removal = np.ones(AdjacentIndices.shape)
        mask_for_removal[rows_to_remove] = False
        AdjacentIndices = AdjacentIndices[mask_for_removal.astype(np.bool)]

        if (len(AdjacentIndices) > 1) or (not np.any(np.logical_and(mask, L))):
            LabelNum = False
        else:
            mask = np.logical_and(mask, np.logical_not(combinedLines))
            bw1 = Lines == pixelList[segments2Join[i]].label
            bw2 = Lines == pixelList[segments2Join[i + 1]].label
            try:
                t = reconstruction(np.logical_or(bw1, bw2),
                                   np.logical_or(
                                       combinedLines == AdjacentIndices, mask),
                                   method='dilation')
            except Exception as e:
                t = reconstruction(np.logical_or(bw1, bw2),
                                   np.logical_or(
                                       combinedLines == AdjacentIndices, mask),
                                   method='dilation')
            fitting = approximate_using_piecewise_linear_pca(
                t.astype(np.int), 1, [], 0)
            if fitting[0] < 0.8 * max_scale:
                LabelNum = AdjacentIndices
            else:
                LabelNum = False
    return mask, LabelNum
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    pcv.params.debug = args.debug  #set debug mode

    # STEP 1: white balance (for comparison across images)
    # inputs:
    #   img = image object, RGB colorspace
    #   roi = region for white reference (position of ColorChecker Passport)
    img1 = pcv.white_balance(img, roi=(910, 3555, 30, 30))

    # STEP 2: Mask out color card and stake
    # inputs:
    #   img = grayscale image ('a' channel)
    #   p1 = (x,y) coordinates for top left corner of rectangle
    #   p2 = (x,y) coordinates for bottom right corner of rectangle
    #   color = color to make the mask (white here to match background)
    masked, binary, contours, hierarchy = pcv.rectangle_mask(img1, (0, 2000),
                                                             (1300, 4000),
                                                             color="white")
    masked2, binary, contours, hierarchy = pcv.rectangle_mask(masked,
                                                              (0, 3600),
                                                              (4000, 4000),
                                                              color="white")

    # STEP 3: Convert from RGB colorspace to LAB colorspace
    # Keep green-magenta channel (a)
    # inputs:
    #   img = image object, RGB colorspace
    #   channel = color subchannel ('l' = lightness, 'a' = green-magenta, 'b' = blue-yellow)
    a = pcv.rgb2gray_lab(masked2, 'l')

    # STEP 4: Set a binary threshold on the saturation channel image
    # inputs:
    #   img = img object, grayscale
    #   threshold = treshold value (0-255) - need to adjust this
    #   max_value = value to apply above treshold (255 = white)
    #   object_type = light or dark
    img_binary = pcv.threshold.binary(a, 118, 255, object_type="dark")

    # STEP 5: Apply Gaussian blur to binary image (reduced noise)
    # inputs:
    #   img = img object, binary
    #   ksize = tuple of kernel dimensions, e.g. (5,5)
    blur_image = pcv.median_blur(img_binary, 10)

    # STEP 6: Fill small objects (speckles)
    # inputs:
    #   img = img object, binary
    #   size = minimum object area size in pixels
    fill_image1 = pcv.fill(blur_image, 150000)

    # STEP 7: Invert image to fill gaps
    # inputs:
    #   img = img object, binary
    inv_image = pcv.invert(fill_image1)
    # rerun fill on inverted image
    inv_fill = pcv.fill(inv_image, 25000)
    # invert image again
    fill_image = pcv.invert(inv_fill)

    # STEP 8: Dilate to avoid losing detail
    # inputs:
    #   img = img object, binary
    #   ksize = kernel size
    #   i = iterations (number of consecutive filtering passes)
    dilated = pcv.dilate(fill_image, 2, 1)

    # STEP 9: Find objects (contours: black-white boundaries)
    # inputs:
    #   img = img object, RGB colorspace
    #   mask = binary image used for object detection
    id_objects, obj_hierarchy = pcv.find_objects(img1, dilated)

    # STEP 10: Define region of interest (ROI)
    # inputs:
    #   img = img object to overlay ROI
    #   x = x-coordinate of upper left corner for rectangle
    #   y = y-coordinate of upper left corner for rectangle
    #   h = height of rectangle
    #   w = width of rectangle
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=20,
                                                   y=10,
                                                   h=3000,
                                                   w=3000)

    # STEP 11: Keep objects that overlap with the ROI
    # inputs:
    #   img = img where selected objects will be displayed
    #   roi_type = options are 'cutto', 'partial' (objects are partially inside roi), or 'largest' (keep only the biggest boi)
    #   roi_countour = contour of roi, output from 'view and adjust roi' function (STEP 10)
    #   roi_hierarchy = contour of roi, output from 'view and adjust roi' function (STEP 10)
    #   object_contour = contours of objects, output from 'identifying objects' function (STEP 9)
    #   obj_hierarchy = hierarchy of objects, output from 'identifying objects' function (STEP 9)
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy)

    # STEP 12: Cluster multiple contours in an image based on user input of rows/columns
    # inputs:
    #   img = img object (RGB colorspace)
    #   roi_objects = object contours in an image that will be clustered (output from STEP 11)
    #   roi_obj_hierarchy = object hierarchy (also from STEP 11)
    #   nrow = number of rows for clustering (desired rows in image even if no leaf present in all)
    #   ncol = number of columns to cluster (desired columns in image even if no leaf present in all)
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img1, roi_objects, roi_obj_hierarchy, 3, 3)

    # STEP 13: select and split clustered contours to export into multiple images
    # also checks if number of inputted filenames matches number of clustered contours
    # if no filenames, objects are numbered in order
    # inputs:
    #   img = masked RGB image
    #   grouped_contour_indexes = indexes of clustered contours, output of 'cluster_contours' (STEP 12)
    #   contours = contours of cluster, output of 'cluster_contours' (STEP 12)
    #   hierarchy = object hierarchy (from STEP 12)
    #   outdir = directory to export output images
    #   file = name of input image to use as basename (uses filename from 'readimage')
    #   filenames = (optional) txt file with list of filenames ordered from top to bottom/left to right

    # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11)
    pcv.params.debug = None
    out = args.outdir
    # names = args.namesout = "./"

    output_path, imgs, masks = pcv.cluster_contour_splitimg(img1,
                                                            clusters_i,
                                                            contours,
                                                            hierarchies,
                                                            out,
                                                            file=filename,
                                                            filenames=None)
Esempio n. 11
0
def segment_skeleton(skel_img, mask=None):
    """ Segment a skeleton image into pieces

        Inputs:
        skel_img      = Skeletonized image
        mask          = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

        Returns:
        segmented_img = Segmented debugging image
        objects       = list of contours
        hierarchy     = contour hierarchy list

        :param skel_img: numpy.ndarray
        :param mask: numpy.ndarray
        :return segmented_img: numpy.ndarray
        :return segment_objects: list
        "return segment_hierarchies: numpy.ndarray
        """

    # Store debug
    debug = params.debug
    params.debug = None

    # Find branch points
    bp = find_branch_pts(skel_img)
    bp = dilate(bp, 3, 1)

    # Subtract from the skeleton so that leaves are no longer connected
    segments = image_subtract(skel_img, bp)

    # Gather contours of leaves
    segment_objects, segment_hierarchies = find_objects(segments, segments)

    # Color each segment a different color
    rand_color = color_palette(len(segment_objects))

    if mask is None:
        segmented_img = skel_img.copy()
    else:
        segmented_img = mask.copy()

    segmented_img = cv2.cvtColor(segmented_img, cv2.COLOR_GRAY2RGB)
    for i, cnt in enumerate(segment_objects):
        cv2.drawContours(segmented_img,
                         segment_objects,
                         i,
                         rand_color[i],
                         params.line_thickness,
                         lineType=8,
                         hierarchy=segment_hierarchies)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            segmented_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_segmented.png'))
    elif params.debug == 'plot':
        plot_image(segmented_img)

    return segmented_img, segment_objects, segment_hierarchies
Esempio n. 12
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    debug = args.debug

    # Pipeline step
    device = 0

    # Step 1: Check if this is a night image, for some of these datasets images were captured
    # at night, even if nothing is visible. To make sure that images are not taken at
    # night we check that the image isn't mostly dark (0=black, 255=white).
    # if it is a night image it throws a fatal error and stops the pipeline.

    if np.average(img) < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    # Step 2: Normalize the white color so you can later
    # compare color between images.
    # Inputs:
    # device = device number. Used to count steps in the workflow
    # img = image object, RGB colorspace
    # debug = None, print, or plot. Print = save to file, Plot = print to screen.
    # roi = region for white reference, if none uses the whole image,
    # otherwise (x position, y position, box width, box height)

    #white balance image based on white toughspot
    device, img1 = pcv.white_balance(device, img, debug, roi=white_balance_roi)
    # img1 = img

    # Step 3: Rotate the image

    # device, rotate_img = pcv.rotate(img1, -1, device, debug)

    #Step 4: Shift image. This step is important for clustering later on.
    # For this image it also allows you to push the green raspberry pi camera
    # out of the image. This step might not be necessary for all images.
    # The resulting image is the same size as the original.
    # Input:
    # img = image object
    # device = device number. Used to count steps in the workflow
    # number = integer, number of pixels to move image
    # side = direction to move from "top", "bottom", "right","left"
    # debug = None, print, or plot. Print = save to file, Plot = print to screen.

    # device, shift1 = pcv.shift_img(img1, device, 300, 'top', debug)
    # img1 = shift1

    # STEP 5: Convert image from RGB colorspace to LAB colorspace
    # Keep only the green-magenta channel (grayscale)
    # Inputs:
    #    img     = image object, RGB colorspace
    #    channel = color subchannel (l = lightness, a = green-magenta , b = blue-yellow)
    #    device  = device number. Used to count steps in the workflow
    #    debug   = None, print, or plot. Print = save to file, Plot = print to screen.
    device, a = pcv.rgb2gray_lab(img1, 'a', device, debug)

    # STEP 6: Set a binary threshold on the Saturation channel image
    # Inputs:
    #    img         = img object, grayscale
    #    threshold   = threshold value (0-255)
    #    maxValue    = value to apply above threshold (usually 255 = white)
    #    object_type = light or dark
    #                  - If object is light then standard thresholding is done
    #                  - If object is dark then inverse thresholding is done
    #    device      = device number. Used to count steps in the pipeline
    #    debug       = None, print, or plot. Print = save to file, Plot = print to screen.
    device, img_binary = pcv.binary_threshold(a, darkness_threshold, 255,
                                              'dark', device, debug)
    #                                            ^
    #                                            |
    #                                           adjust this value

    # STEP 7: Fill in small objects (speckles)
    # Inputs:
    #    img    = image object, grayscale. img will be returned after filling
    #    mask   = image object, grayscale. This image will be used to identify contours
    #    size   = minimum object area size in pixels (integer)
    #    device = device number. Used to count steps in the pipeline
    #    debug  = None, print, or plot. Print = save to file, Plot = print to screen.
    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, minimum_object_area_pixels,
                                  device, debug)
    #                                               ^
    #                                               |
    #                                               adjust this value

    # STEP 8: Dilate so that you don't lose leaves (just in case)
    # Inputs:
    #    img     = input image
    #    kernel  = integer
    #    i       = interations, i.e. number of consecutive filtering passes
    #    device  = device number. Used to count steps in the pipeline
    #    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    device, dilated = pcv.dilate(fill_image, 1, 1, device, debug)

    # STEP 9: Find objects (contours: black-white boundaries)
    # Inputs:
    #    img       = image that the objects will be overlayed
    #    mask      = what is used for object detection
    #    device    = device number.  Used to count steps in the pipeline
    #    debug     = None, print, or plot. Print = save to file, Plot = print to screen.
    device, id_objects, obj_hierarchy = pcv.find_objects(
        img1, dilated, device, debug)

    # STEP 10: Define region of interest (ROI)
    # Inputs:
    #    img       = img to overlay roi
    #    roi       = default (None) or user input ROI image, object area should be white and background should be black,
    #                has not been optimized for more than one ROI
    #    roi_input = type of file roi_base is, either 'binary', 'rgb', or 'default' (no ROI inputted)
    #    shape     = desired shape of final roi, either 'rectangle' or 'circle', if  user inputs rectangular roi but chooses
    #                'circle' for shape then a circle is fitted around rectangular roi (and vice versa)
    #    device    = device number.  Used to count steps in the pipeline
    #    debug     = None, print, or plot. Print = save to file, Plot = print to screen.
    #    adjust    = either 'True' or 'False', if 'True' allows user to adjust ROI
    #    x_adj     = adjust center along x axis
    #    y_adj     = adjust center along y axis
    #    w_adj     = adjust width
    #    h_adj     = adjust height
    # x=0, y=560, h=4040-560, w=3456
    roi_contour, roi_hierarchy = pcv.roi.rectangle(**total_region_of_interest,
                                                   img=img1)
    # device, roi, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, False,
    #                                             0, 0, 0, 0)
    #                                            ^                ^
    #                                            |________________|
    #                                            adjust these four values

    # STEP 11: Keep objects that overlap with the ROI
    # Inputs:
    #    img            = img to display kept objects
    #    roi_type       = 'cutto' or 'partial' (for partially inside)
    #    roi_contour    = contour of roi, output from "View and Ajust ROI" function
    #    roi_hierarchy  = contour of roi, output from "View and Ajust ROI" function
    #    object_contour = contours of objects, output from "Identifying Objects" fuction
    #    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" fuction
    #    device         = device number.  Used to count steps in the pipeline
    #    debug          = None, print, or plot. Print = save to file, Plot = print to screen.
    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # print(obj_area)

    #Step 12: This function take a image with multiple contours and
    # clusters them based on user input of rows and columns

    #Inputs:
    #    img - An RGB image array
    #    roi_objects - object contours in an image that are needed to be clustered.
    #    nrow - number of rows to cluster (this should be the approximate  number of desired rows in the entire image (even if there isn't a literal row of plants)
    #    ncol - number of columns to cluster (this should be the approximate number of desired columns in the entire image (even if there isn't a literal row of plants)
    #    file -  output of filename from read_image function
    #    filenames - input txt file with list of filenames in order from top to bottom left to right
    #    debug - print debugging images

    device, clusters_i, contours = pcv.cluster_contours(
        device, img1, roi_objects, expected_number_of_rows,
        expected_number_of_columns, debug)

    # print(contours)

    #Step 13:This function takes clustered contours and splits them into multiple images,
    #also does a check to make sure that the number of inputted filenames matches the number
    #of clustered contours. If no filenames are given then the objects are just numbered

    #Inputs:
    #    img - ideally a masked RGB image.
    #    grouped_contour_indexes - output of cluster_contours, indexes of clusters of contours
    #    contours - contours to cluster, output of cluster_contours
    #    filenames - input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes)
    #    debug - print debugging images

    out = args.outdir
    names = args.names
    device, output_path = pcv.cluster_contour_splitimg(device,
                                                       img1,
                                                       clusters_i,
                                                       contours,
                                                       out,
                                                       file=filename,
                                                       filenames=names,
                                                       debug=debug)
Esempio n. 13
0
def find_tips(skel_img, mask=None):
    """
    The endpoints algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
    Find tips in skeletonized image.

    Inputs:
    skel_img    = Skeletonized image
    mask        = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

    Returns:
    tip_img   = Image with just tips, rest 0

    :param skel_img: numpy.ndarray
    :param mask: numpy.ndarray
    :return tip_img: numpy.ndarray
    """

    # In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to dont care
    endpoint1 = np.array([[-1, -1, -1], [-1, 1, -1], [0, 1, 0]])
    endpoint2 = np.array([[-1, -1, -1], [-1, 1, 0], [-1, 0, 1]])

    endpoint3 = np.rot90(endpoint1)
    endpoint4 = np.rot90(endpoint2)
    endpoint5 = np.rot90(endpoint3)
    endpoint6 = np.rot90(endpoint4)
    endpoint7 = np.rot90(endpoint5)
    endpoint8 = np.rot90(endpoint6)

    endpoints = [
        endpoint1, endpoint2, endpoint3, endpoint4, endpoint5, endpoint6,
        endpoint7, endpoint8
    ]
    tip_img = np.zeros(skel_img.shape[:2], dtype=int)
    for endpoint in endpoints:
        tip_img = np.logical_or(
            cv2.morphologyEx(skel_img,
                             op=cv2.MORPH_HITMISS,
                             kernel=endpoint,
                             borderType=cv2.BORDER_CONSTANT,
                             borderValue=0), tip_img)
    tip_img = tip_img.astype(np.uint8) * 255
    # Store debug
    debug = params.debug
    params.debug = None
    tip_objects, _ = find_objects(tip_img, tip_img)

    if mask is None:
        # Make debugging image
        dilated_skel = dilate(skel_img, params.line_thickness, 1)
        tip_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)

    else:
        # Make debugging image on mask
        mask_copy = mask.copy()
        tip_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
        skel_obj, skel_hier = find_objects(skel_img, skel_img)
        cv2.drawContours(tip_plot,
                         skel_obj,
                         -1, (150, 150, 150),
                         params.line_thickness,
                         lineType=8,
                         hierarchy=skel_hier)

    # Initialize list of tip data points
    tip_list = []
    tip_labels = []
    for i, tip in enumerate(tip_objects):
        x, y = tip.ravel()[:2]
        coord = (int(x), int(y))
        tip_list.append(coord)
        tip_labels.append(i)
        cv2.circle(tip_plot, (x, y), params.line_thickness, (0, 255, 0), -1)

    outputs.add_observation(
        variable='tips',
        trait='list of tip coordinates identified from a skeleton',
        method='plantcv.plantcv.morphology.find_tips',
        scale='pixels',
        datatype=list,
        value=tip_list,
        label=tip_labels)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            tip_plot,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_skeleton_tips.png'))
    elif params.debug == 'plot':
        plot_image(tip_plot)

    return tip_img
Esempio n. 14
0
def find_branch_pts(skel_img, mask=None):
    """
    The branching algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
    Inputs:
    skel_img    = Skeletonized image
    mask        = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

    Returns:
    branch_pts_img = Image with just branch points, rest 0

    :param skel_img: numpy.ndarray
    :return branch_pts_img: numpy.ndarray
    """

    # Store debug
    debug = params.debug
    params.debug = None

    # In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to dont care
    # T like branch points
    t1 = np.array([[-1,  1, -1],
                   [ 1,  1,  1],
                   [-1, -1, -1]])
    t2 = np.array([[ 1, -1,  1],
                   [-1,  1, -1],
                   [ 1, -1, -1]])
    t3 = np.rot90(t1)
    t4 = np.rot90(t2)
    t5 = np.rot90(t3)
    t6 = np.rot90(t4)
    t7 = np.rot90(t5)
    t8 = np.rot90(t6)

    # Y like branch points
    y1 = np.array([[ 1, -1,  1],
                   [ 0,  1,  0],
                   [ 0,  1,  0]])
    y2 = np.array([[-1,  1, -1],
                   [ 1,  1,  0],
                   [-1,  0,  1]])
    y3 = np.rot90(y1)
    y4 = np.rot90(y2)
    y5 = np.rot90(y3)
    y6 = np.rot90(y4)
    y7 = np.rot90(y5)
    y8 = np.rot90(y6)
    kernels = [t1, t2, t3, t4, t5, t6, t7, t8, y1, y2, y3, y4, y5, y6, y7, y8]

    branch_pts_img = np.zeros(skel_img.shape[:2], dtype=int)

    # Store branch points
    for kernel in kernels:
        branch_pts_img = np.logical_or(cv2.morphologyEx(skel_img, op=cv2.MORPH_HITMISS, kernel=kernel,
                                                        borderType=cv2.BORDER_CONSTANT, borderValue=0), branch_pts_img)

    # Switch type to uint8 rather than bool
    branch_pts_img = branch_pts_img.astype(np.uint8) * 255

    # Make debugging image
    if mask is None:
        dilated_skel = dilate(skel_img, params.line_thickness, 1)
        branch_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)
    else:
        # Make debugging image on mask
        mask_copy = mask.copy()
        branch_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
        skel_obj, skel_hier = find_objects(skel_img, skel_img)
        cv2.drawContours(branch_plot, skel_obj, -1, (150, 150, 150), params.line_thickness, lineType=8,
                         hierarchy=skel_hier)

    branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)
    for i in branch_objects:
        x, y = i.ravel()[:2]
        cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255), -1)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(branch_plot, os.path.join(params.debug_outdir, str(params.device) + '_skeleton_branches.png'))
    elif params.debug == 'plot':
        plot_image(branch_plot)

    return branch_pts_img
Esempio n. 15
0
def segment_insertion_angle(skel_img, segmented_img, leaf_objects,
                            leaf_hierarchies, stem_objects, size):
    """ Find leaf insertion angles in degrees of skeleton segments. Fit a linear regression line to the stem.
        Use `size` pixels on  the portion of leaf next to the stem find a linear regression line,
        and calculate angle between the two lines per leaf object.

        Inputs:
        skel_img         = Skeletonized image
        segmented_img    = Segmented image to plot slope lines and intersection angles on
        leaf_objects     = List of leaf segments
        leaf_hierarchies = Leaf contour hierarchy NumPy array
        stem_objects     = List of stem segments
        size             = Size of inner leaf used to calculate slope lines

        Returns:
        insertion_angle_header = Leaf insertion angle headers
        insertion_angle_data   = Leaf insertion angle values
        labeled_img            = Debugging image with angles labeled

        :param skel_img: numpy.ndarray
        :param segmented_img: numpy.ndarray
        :param leaf_objects: list
        :param leaf_hierarchies: numpy.ndarray
        :param stem_objects: list
        :param size: int
        :return insertion_angle_header: list
        :return insertion_angle_data: list
        :return labeled_img: numpy.ndarray
        """

    # Store debug
    debug = params.debug
    params.debug = None

    rows, cols = segmented_img.shape[:2]
    labeled_img = segmented_img.copy()
    segment_slopes = []
    insertion_segments = []
    insertion_hierarchies = []
    intersection_angles = []
    label_coord_x = []
    label_coord_y = []

    # Create a list of tip tuples to use for sorting
    tips = find_tips(skel_img)
    tip_objects, tip_hierarchies = find_objects(tips, tips)
    tip_tuples = []
    for i, cnt in enumerate(tip_objects):
        tip_tuples.append((cnt[0][0][0], cnt[0][0][1]))

    rand_color = color_palette(len(leaf_objects))

    for i, cnt in enumerate(leaf_objects):
        # Draw leaf objects
        find_segment_tangents = np.zeros(segmented_img.shape[:2], np.uint8)
        cv2.drawContours(find_segment_tangents,
                         leaf_objects,
                         i,
                         255,
                         1,
                         lineType=8,
                         hierarchy=leaf_hierarchies)
        cv2.drawContours(labeled_img,
                         leaf_objects,
                         i,
                         rand_color[i],
                         params.line_thickness,
                         lineType=8,
                         hierarchy=leaf_hierarchies)

        # Prune back ends of leaves
        pruned_segment = prune(find_segment_tangents, size)

        # Segment ends are the portions pruned off
        segment_ends = find_segment_tangents - pruned_segment
        segment_end_obj, segment_end_hierarchy = find_objects(
            segment_ends, segment_ends)
        is_insertion_segment = []

        if not len(segment_end_obj) == 2:
            print("Size too large, contour with ID#", i,
                  "got pruned away completely.")
        else:
            # Determine if a segment is leaf end or leaf insertion segment
            for j, obj in enumerate(segment_end_obj):

                cnt_as_tuples = []
                num_pixels = len(obj)
                count = 0

                # Turn each contour into a list of tuples (can't search for list of coords, so reformat)
                while num_pixels > count:
                    x_coord = obj[count][0][0]
                    y_coord = obj[count][0][1]
                    cnt_as_tuples.append((x_coord, y_coord))
                    count += 1

                for tip_tups in tip_tuples:
                    # If a tip is inside the list of contour tuples then it is a leaf end segment
                    if tip_tups in cnt_as_tuples:
                        is_insertion_segment.append(False)
                    else:
                        is_insertion_segment.append(True)

                # If none of the tips are within a segment_end then it's an insertion segment
                if all(is_insertion_segment):
                    insertion_segments.append(segment_end_obj[j])
                    insertion_hierarchies.append(segment_end_hierarchy[0][j])

        # Store coordinates for labels
        label_coord_x.append(leaf_objects[i][0][0][0])
        label_coord_y.append(leaf_objects[i][0][0][1])

    # Plot stem segments
    stem_img = np.zeros(segmented_img.shape[:2], np.uint8)
    cv2.drawContours(stem_img, stem_objects, -1, 255, 2, lineType=8)
    branch_pts = find_branch_pts(skel_img)
    stem_img = stem_img + branch_pts
    stem_img = closing(stem_img)
    combined_stem, combined_stem_hier = find_objects(stem_img, stem_img)

    # Make sure stem objects are a single contour
    while len(combined_stem) > 1:
        stem_img = dilate(stem_img, 2, 1)
        stem_img = closing(stem_img)
        combined_stem, combined_stem_hier = find_objects(stem_img, stem_img)

    # Find slope of the stem
    [vx, vy, x, y] = cv2.fitLine(combined_stem[0], cv2.DIST_L2, 0, 0.01, 0.01)
    stem_slope = -vy / vx
    stem_slope = stem_slope[0]
    lefty = int((-x * vy / vx) + y)
    righty = int(((cols - x) * vy / vx) + y)
    cv2.line(labeled_img, (cols - 1, righty), (0, lefty), (150, 150, 150), 3)

    for t, segment in enumerate(insertion_segments):
        # Find line fit to each segment
        [vx, vy, x, y] = cv2.fitLine(segment, cv2.DIST_L2, 0, 0.01, 0.01)
        slope = -vy / vx
        left_list = int((-x * vy / vx) + y)
        right_list = int(((cols - x) * vy / vx) + y)
        segment_slopes.append(slope[0])

        # Draw slope lines if possible
        if slope > 1000000 or slope < -1000000:
            print("Slope of contour with ID#", t, "is", slope,
                  "and cannot be plotted.")
        else:
            cv2.line(labeled_img, (cols - 1, right_list), (0, left_list),
                     rand_color[t], 1)

        # Store intersection angles between insertion segment and stem line
        intersection_angle = _slope_to_intesect_angle(slope[0], stem_slope)
        # Function measures clockwise but we want the acute angle between stem and leaf insertion
        if intersection_angle > 90:
            intersection_angle = 180 - intersection_angle
        intersection_angles.append(intersection_angle)

    insertion_angle_header = ['HEADER_INSERTION_ANGLE']
    insertion_angle_data = ['INSERTION_ANGLE_DATA']

    for i, cnt in enumerate(insertion_segments):
        # Label slope lines
        w = label_coord_x[i]
        h = label_coord_y[i]
        text = "{:.2f}".format(intersection_angles[i])
        cv2.putText(img=labeled_img,
                    text=text,
                    org=(w, h),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=.55,
                    color=(150, 150, 150),
                    thickness=2)
        segment_label = "ID" + str(i)
        insertion_angle_header.append(segment_label)
    insertion_angle_data.extend(intersection_angles)

    if 'morphology_data' not in outputs.measurements:
        outputs.measurements['morphology_data'] = {}
    outputs.measurements['morphology_data'][
        'segment_insertion_angles'] = intersection_angles

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            labeled_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_segment_insertion_angles.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return insertion_angle_header, insertion_angle_data, labeled_img
Esempio n. 16
0
def canny_edge_detect(img, mask = None, sigma=1.0, low_thresh=None, high_thresh=None, thickness=1,
                      mask_color=None, use_quantiles=False):
    """Edge filter an image using the Canny algorithm.

    Inputs:
    img           = RGB or grayscale image data
    mask          = Mask to limit the application of Canny to a certain area, takes a binary img. (OPTIONAL)
    sigma         = Standard deviation of the Gaussian filter
    low_thresh    = Lower bound for hysteresis thresholding (linking edges). If None (default) then low_thresh is set to
                    10% of the image's max (OPTIONAL)
    high_thresh   = Upper bound for hysteresis thresholding (linking edges). If None (default) then high_thresh is set
                    to 20% of the image's max (OPTIONAL)
    thickness     = How thick the edges should appear, default thickness=1 (OPTIONAL)
    mask_color    = Color of the mask provided; either None (default), 'white', or 'black'
    use_quantiles = Default is False, if True then treat low_thresh and high_thresh as quantiles of the edge magnitude
                    image, rather than the absolute edge magnitude values. If True then thresholds range is [0,1].
                    (OPTIONAL)

    Returns:
    bin_img      = Thresholded, binary image

    :param img: numpy.ndarray
    :param mask: numpy.ndarray
    :param sigma = float
    :param low_thresh: float
    :param high_thresh: float
    :param thickness: int
    :param mask_color: str
    :param use_quantiles: bool
    :return bin_img: numpy.ndarray

    Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986
    Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
    Website: http://www.cellprofiler.org
    Copyright (c) 2003-2009 Massachusetts Institute of Technology
    Copyright (c) 2009-2011 Broad Institute
    All rights reserved.
    Original author: Lee Kamentsky
    """

    params.device += 1

    # Check if the image is grayscale; if color img then make it grayscale
    dimensions = np.shape(img)
    if len(dimensions) == 3:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # skimage needs a bool mask
    if mask is not None:
        if mask_color.upper() == 'WHITE':
            mask = np.array(mask, bool)
        elif mask_color.upper() == 'BLACK':
            mask = cv2.bitwise_not(mask)
            mask = np.array(mask, bool)
        else:
            fatal_error('Mask was provided but mask_color ' + str(mask_color) + ' is not "white" or "black"!')

    # Run Canny edge detection on the grayscale image
    bool_img = feature.canny(img, sigma, low_thresh, high_thresh, mask, use_quantiles)

    # skimage returns a bool image so convert it
    bin_img = np.copy(bool_img.astype(np.uint8) * 255)

    # Adjust line thickness
    if thickness != 1:
        bin_img = dilate(bin_img, thickness, 1)
    else:
        # Print or plot the binary image
        if params.debug == 'print':
            print_image(bin_img, os.path.join(params.debug_outdir, (str(params.device) + '_canny_edge_detect.png')))
        elif params.debug == 'plot':
            plot_image(bin_img, cmap='gray')

    return bin_img
Esempio n. 17
0
def segment_sort(skel_img, objects, mask=None, first_stem=True):
    """ Calculate segment curvature as defined by the ratio between geodesic and euclidean distance

        Inputs:
        skel_img          = Skeletonized image
        objects           = List of contours
        mask              = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.
        first_stem        = (Optional) if True, then the first (bottom) segment always gets classified as stem

        Returns:
        labeled_img       = Segmented debugging image with lengths labeled
        secondary_objects = List of secondary segments (leaf)
        primary_objects   = List of primary objects (stem)

        :param skel_img: numpy.ndarray
        :param objects: list
        :param mask: numpy.ndarray
        :param first_stem: bool
        :return secondary_objects: list
        :return other_objects: list
        """
    # Store debug
    debug = params.debug
    params.debug = None

    secondary_objects = []
    primary_objects = []

    if mask is None:
        labeled_img = np.zeros(skel_img.shape[:2], np.uint8)
    else:
        labeled_img = mask.copy()

    tips_img = find_tips(skel_img)
    tips_img = dilate(tips_img, 3, 1)

    # Loop through segment contours
    for i, cnt in enumerate(objects):
        segment_plot = np.zeros(skel_img.shape[:2], np.uint8)
        cv2.drawContours(segment_plot, objects, i, 255, 1, lineType=8)
        # is_leaf = False
        overlap_img = logical_and(segment_plot, tips_img)

        # The first contour is the base, and while it contains a tip, it isn't a leaf
        if i == 0 and first_stem:
            primary_objects.append(cnt)

        # Sort segments
        else:

            if np.sum(overlap_img) > 0:
                secondary_objects.append(cnt)
                # is_leaf = True
            else:
                primary_objects.append(cnt)

    # Reset debug mode
    params.debug = debug

    # Plot segments where green segments are leaf objects and fuschia are other objects
    labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_GRAY2RGB)
    for i, cnt in enumerate(primary_objects):
        cv2.drawContours(labeled_img, primary_objects, i, (255, 0, 255), params.line_thickness, lineType=8)
    for i, cnt in enumerate(secondary_objects):
        cv2.drawContours(labeled_img, secondary_objects, i, (0, 255, 0), params.line_thickness, lineType=8)

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_sorted_segments.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return secondary_objects, primary_objects
Esempio n. 18
0
def check_cycles(skel_img):
    """ Check for cycles in a skeleton image
    Inputs:
    skel_img     = Skeletonized image

    Returns:
    cycle_img    = Image with cycles identified

    :param skel_img: numpy.ndarray
    :return cycle_img: numpy.ndarray
    """

    # Store debug
    debug = params.debug
    params.debug = None

    # Create the mask needed for cv2.floodFill, must be larger than the image
    h, w = skel_img.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)

    # Copy the skeleton since cv2.floodFill will draw on it
    skel_copy = skel_img.copy()
    cv2.floodFill(skel_copy, mask=mask, seedPoint=(0, 0), newVal=255)

    # Invert so the holes are white and background black
    just_cycles = cv2.bitwise_not(skel_copy)

    # Erode slightly so that cv2.findContours doesn't think diagonal pixels are separate contours
    just_cycles = erode(just_cycles, 2, 1)

    # Use pcv.find_objects to turn plots of holes into countable contours
    cycle_objects, cycle_hierarchies = find_objects(just_cycles, just_cycles)

    # Count the number of holes
    num_cycles = len(cycle_objects)

    # Make debugging image
    cycle_img = skel_img.copy()
    cycle_img = dilate(cycle_img, params.line_thickness, 1)
    cycle_img = cv2.cvtColor(cycle_img, cv2.COLOR_GRAY2RGB)
    rand_color = color_palette(num_cycles)
    for i, cnt in enumerate(cycle_objects):
        cv2.drawContours(cycle_img, cycle_objects, i, rand_color[i], params.line_thickness, lineType=8,
                         hierarchy=cycle_hierarchies)

    # Store Cycle Data
    outputs.add_observation(variable='num_cycles', trait='number of cycles',
                            method='plantcv.plantcv.morphology.check_cycles', scale='none', datatype=int,
                            value=num_cycles, label='none')

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(cycle_img, os.path.join(params.debug_outdir, str(params.device) + '_cycles.png'))
    elif params.debug == 'plot':
        plot_image(cycle_img)

    return cycle_img