Example #1
0
def flip(img, direction):
    """Flip image.

    Inputs:
    img       = RGB or grayscale image data
    direction = "horizontal" or "vertical"

    Returns:
    vh_img    = flipped image

    :param img: numpy.ndarray
    :param direction: str
    :return vh_img: numpy.ndarray
    """
    params.device += 1
    if direction.upper() == "VERTICAL":
        vh_img = cv2.flip(img, 1)
    elif direction.upper() == "HORIZONTAL":
        vh_img = cv2.flip(img, 0)
    else:
        fatal_error(str(direction) + " is not a valid direction, must be horizontal or vertical")

    if params.debug == 'print':
        print_image(vh_img, os.path.join(params.debug_outdir, str(params.device) + "_flipped.png"))
    elif params.debug == 'plot':
        if len(np.shape(vh_img)) == 3:
            plot_image(vh_img)
        else:
            plot_image(vh_img, cmap='gray')

    return vh_img
Example #2
0
def image_add(gray_img1, gray_img2):

    """This is a function used to add images. The numpy addition function '+' is used. This is a modulo operation
       rather than the cv2.add fxn which is a saturation operation. ddepth = -1 specifies that the dimensions of output
       image will be the same as the input image.

    Inputs:
    gray_img1      = Grayscale image data to be added to image 2
    gray_img2      = Grayscale image data to be added to image 1

    Returns:
    added_img      = summed images

    :param gray_img1: numpy.ndarray
    :param gray_img2: numpy.ndarray
    :return added_img: numpy.ndarray
    """

    added_img = gray_img1 + gray_img2
    params.device += 1
    if params.debug == 'print':
        print_image(added_img, os.path.join(params.debug_outdir, str(params.device) + '_added' + '.png'))
    elif params.debug == 'plot':
        plot_image(added_img, cmap='gray')
    return added_img
Example #3
0
def median_blur(gray_img, ksize):
    """Applies a median blur filter (applies median value to central pixel within a kernel size).

    Inputs:
    gray_img  = Grayscale image data
    ksize = kernel size => integer or tuple, ksize x ksize box if integer, (n, m) size box if tuple

    Returns:
    img_mblur = blurred image


    :param gray_img: numpy.ndarray
    :param ksize: int or tuple
    :return img_mblur: numpy.ndarray
    """

    # Make sure ksize is valid
    if type(ksize) is not int and type(ksize) is not tuple:
        fatal_error("Invalid ksize, must be integer or tuple")

    img_mblur = median_filter(gray_img, size=ksize)
    params.device += 1
    if params.debug == 'print':
        print_image(img_mblur, os.path.join(params.debug_outdir,
                                            str(params.device) + '_median_blur' + str(ksize) + '.png'))
    elif params.debug == 'plot':
        plot_image(img_mblur, cmap='gray')
    return img_mblur
def distance_transform(bin_img, distance_type, mask_size):
    """Creates an image where for each object pixel, a number is assigned that corresponds to the distance to the
    nearest background pixel.

    Inputs:
    img             = Binary image data
    distance_type   = Type of distance. It can be CV_DIST_L1, CV_DIST_L2 , or CV_DIST_C which are 1, 2 and 3,
                      respectively.
    mask_size       = Size of the distance transform mask. It can be 3, 5, or CV_DIST_MASK_PRECISE (the latter option
                      is only supported by the first function). In case of the CV_DIST_L1 or CV_DIST_C distance type,
                      the parameter is forced to 3 because a 3 by 3 mask gives the same result as 5 by 5 or any larger
                      aperture.

    Returns:
    norm_image      = grayscale distance-transformed image normalized between [0, 1]

    :param bin_img: numpy.ndarray
    :param distance_type: int
    :param mask_size: int
    :return norm_image: numpy.ndarray
    """

    params.device += 1
    dist = cv2.distanceTransform(src=bin_img, distanceType=distance_type, maskSize=mask_size)
    norm_image = cv2.normalize(src=dist, dst=dist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)

    if params.debug == 'print':
        print_image(norm_image, os.path.join(params.debug, str(params.device) + '_distance_transform.png'))
    elif params.debug == 'plot':
        plot_image(norm_image, cmap='gray')

    return norm_image
Example #5
0
def resize(img, resize_x, resize_y):
    """Resize image.

    Inputs:
    img      = RGB or grayscale image data to resize
    resize_x = scaling factor
    resize_y = scaling factor

    Returns:
    reimg    = resized image

    :param img: numpy.ndarray
    :param resize_x: int
    :param resize_y: int
    :return reimg: numpy.ndarray
    """

    params.device += 1

    if resize_x <= 0 and resize_y <= 0:
        fatal_error("Resize values both cannot be 0 or negative values!")

    reimg = cv2.resize(img, (0, 0), fx=resize_x, fy=resize_y)

    if params.debug == 'print':
        print_image(reimg, os.path.join(params.debug_outdir, str(params.device) + "_resize1.png"))
    elif params.debug == 'plot':
        plot_image(reimg)

    return reimg
Example #6
0
def opening(gray_img, kernel=None):
    """Wrapper for scikit-image opening functions. Opening can remove small bright spots (i.e. salt).

    Inputs:
    gray_img = input image (grayscale or binary)
    kernel   = optional neighborhood, expressed as an array of 1s and 0s. If None, use cross-shaped structuring element.

    :param gray_img: ndarray
    :param kernel = ndarray
    :return filtered_img: ndarray
    """

    params.device += 1

    # Make sure the image is binary/grayscale
    if len(np.shape(gray_img)) != 2:
        fatal_error("Input image must be grayscale or binary")

    # If image is binary use the faster method
    if len(np.unique(gray_img)) == 2:
        bool_img = morphology.binary_opening(gray_img, kernel)
        filtered_img = np.copy(bool_img.astype(np.uint8) * 255)
    # Otherwise use method appropriate for grayscale images
    else:
        filtered_img = morphology.opening(gray_img, kernel)

    if params.debug == 'print':
        print_image(filtered_img, os.path.join(params.debug_outdir, str(params.device) + '_opening' + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
Example #7
0
def laplace_filter(gray_img, ksize, scale):
    """This is a filtering method used to identify and highlight fine edges based on the 2nd derivative. A very
       sensetive method to highlight edges but will also amplify background noise. ddepth = -1 specifies that the
       dimensions of output image will be the same as the input image.

    Inputs:
    gray_img    = Grayscale image data
    ksize       = apertures size used to calculate 2nd derivative filter, specifies the size of the kernel
                  (must be an odd integer: 1,3,5...)
    scale       = scaling factor applied (multiplied) to computed Laplacian values (scale = 1 is unscaled)

    Returns:
    lp_filtered = laplacian filtered image

    :param gray_img: numpy.ndarray
    :param kernel: int
    :param scale: int
    :return lp_filtered: numpy.ndarray
    """

    lp_filtered = cv2.Laplacian(src=gray_img, ddepth=-1, ksize=ksize, scale=scale)
    params.device += 1
    if params.debug == 'print':
        print_image(lp_filtered,
                    os.path.join(params.debug_outdir,
                                 str(params.device) + '_lp_out_k' + str(ksize) + '_scale' + str(scale) + '.png'))
    elif params.debug == 'plot':
        plot_image(lp_filtered, cmap='gray')
    return lp_filtered
Example #8
0
def find_objects(img, mask):
    """Find all objects and color them blue.

    Inputs:
    img       = RGB or grayscale image data for plotting
    mask      = Binary mask used for contour detection


    Returns:
    objects   = list of contours
    hierarchy = contour hierarchy list

    :param img: numpy.ndarray
    :param mask: numpy.ndarray
    :return objects: list
    :return hierarchy: numpy.ndarray
    """

    params.device += 1
    mask1 = np.copy(mask)
    ori_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    objects, hierarchy = cv2.findContours(mask1, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
    for i, cnt in enumerate(objects):
        cv2.drawContours(ori_img, objects, i, (255, 102, 255), -1, lineType=8, hierarchy=hierarchy)
    if params.debug == 'print':
        print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_id_objects.png'))
    elif params.debug == 'plot':
        plot_image(ori_img)

    return objects, hierarchy
Example #9
0
def skeletonize(mask):
    """Reduces binary objects to 1 pixel wide representations (skeleton)

    Inputs:
    mask       = Binary image data

    Returns:
    skeleton   = skeleton image

    :param mask: numpy.ndarray
    :return skeleton: numpy.ndarray
    """
    # Store debug
    debug = params.debug
    params.debug = None

    # Convert mask to boolean image, rather than 0 and 255 for skimage to use it
    skeleton = skmorph.skeletonize(mask.astype(bool))

    skeleton = skeleton.astype(np.uint8) * 255

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(skeleton, os.path.join(params.debug_outdir, str(params.device) + '_skeleton.png'))
    elif params.debug == 'plot':
        plot_image(skeleton, cmap='gray')

    return skeleton
Example #10
0
def scharr_filter(img, dx, dy, scale):
    """This is a filtering method used to identify and highlight gradient edges/features using the 1st derivative.
       Typically used to identify gradients along the x-axis (dx = 1, dy = 0) and y-axis (dx = 0, dy = 1) independently.
       Performance is quite similar to Sobel filter. Used to detect edges / changes in pixel intensity. ddepth = -1
       specifies that the dimensions of output image will be the same as the input image.

    Inputs:
    gray_img = Grayscale image data
    dx       = derivative of x to analyze (1-3)
    dy       = derivative of x to analyze (1-3)
    scale    = scaling factor applied (multiplied) to computed Scharr values (scale = 1 is unscaled)

    Returns:
    sr_img   = Scharr filtered image

    :param img: numpy.ndarray
    :param dx: int
    :param dy: int
    :param scale: int
    :return sr_img: numpy.ndarray
    """

    sr_img = cv2.Scharr(src=img, ddepth=-1, dx=dx, dy=dy, scale=scale)
    params.device += 1
    if params.debug == 'print':
        name = os.path.join(params.debug_outdir, str(params.device))
        name += '_sr_img_dx' + str(dx) + '_dy' + str(dy) + '_scale' + str(scale) + '.png'
        print_image(sr_img, name)
    elif params.debug == 'plot':
        plot_image(sr_img, cmap='gray')
    return sr_img
Example #11
0
def erode(gray_img, ksize, i):
    """Perform morphological 'erosion' filtering. Keeps pixel in center of the kernel if conditions set in kernel are
       true, otherwise removes pixel.

    Inputs:
    gray_img = Grayscale (usually binary) image data
    ksize   = Kernel size (int). A ksize x ksize kernel will be built. Must be greater than 1 to have an effect.
    i        = interations, i.e. number of consecutive filtering passes

    Returns:
    er_img = eroded image

    :param gray_img: numpy.ndarray
    :param ksize: int
    :param i: int
    :return er_img: numpy.ndarray
    """

    if ksize <= 1:
        raise ValueError('ksize needs to be greater than 1 for the function to have an effect')

    kernel1 = int(ksize)
    kernel2 = np.ones((kernel1, kernel1), np.uint8)
    er_img = cv2.erode(src=gray_img, kernel=kernel2, iterations=i)
    params.device += 1
    if params.debug == 'print':
        print_image(er_img, os.path.join(params.debug_outdir,
                                         str(params.device) + '_er_image' + str(ksize) + '_itr_' + str(i) + '.png'))
    elif params.debug == 'plot':
        plot_image(er_img, cmap='gray')
    return er_img
Example #12
0
def gaussian_blur(img, ksize, sigma_x=0, sigma_y=None):
    """Applies a Gaussian blur filter.

    Inputs:
    # img     = RGB or grayscale image data
    # ksize   = Tuple of kernel dimensions, e.g. (5, 5)
    # sigmax  = standard deviation in X direction; if 0, calculated from kernel size
    # sigmay  = standard deviation in Y direction; if sigmaY is None, sigmaY is taken to equal sigmaX

    Returns:
    img_gblur = blurred image

    :param img: numpy.ndarray
    :param ksize: tuple
    :param sigmax: int
    :param sigmay: str or int
    :return img_gblur: numpy.ndarray
    """

    img_gblur = cv2.GaussianBlur(img, ksize, sigma_x, sigma_y)

    params.device += 1
    if params.debug == 'print':
        print_image(img_gblur, os.path.join(params.debug_outdir, str(params.device) + '_gaussian_blur.png'))
    elif params.debug == 'plot':
        if len(np.shape(img_gblur)) == 3:
            plot_image(img_gblur)
        else:
            plot_image(img_gblur, cmap='gray')

    return img_gblur
Example #13
0
def sobel_filter(gray_img, dx, dy, ksize):
    """This is a filtering method used to identify and highlight gradient edges/features using the 1st derivative.
       Typically used to identify gradients along the x-axis (dx = 1, dy = 0) and y-axis (dx = 0, dy = 1) independently.
       Performance is quite similar to Scharr filter. Used to detect edges / changes in pixel intensity. ddepth = -1
       specifies that the dimensions of output image will be the same as the input image.

    Inputs:
    gray_img = Grayscale image data
    dx       = derivative of x to analyze
    dy       = derivative of x to analyze
    ksize        = specifies the size of the kernel (must be an odd integer: 1,3,5, ... , 31)

    Returns:
    sb_img   = Sobel filtered image

    :param gray_img: numpy.ndarray
    :param dx: int
    :param dy: int
    :param ksize: int
    :param scale: int
    :return sb_img: numpy.ndarray
    """
    params.device += 1
    sb_img = cv2.Sobel(src=gray_img, ddepth=-1, dx=dx, dy=dy, ksize=ksize)

    if params.debug == 'print':
        name = os.path.join(params.debug_outdir,
                            str(params.device) + '_sb_img_dx' + str(dx) + '_dy' + str(dy) + '_kernel' + str(ksize) + '.png')
        print_image(sb_img, name)
    elif params.debug == 'plot':
        plot_image(sb_img, cmap='gray')
    return sb_img
Example #14
0
def image_subtract(gray_img1, gray_img2):
    """This is a function used to subtract values of one gray-scale image array from another gray-scale image array. The
    resulting gray-scale image array has a minimum element value of zero. That is all negative values resulting from the
    subtraction are forced to zero.

    Inputs:
    gray_img1   = Grayscale image data from which gray_img2 will be subtracted
    gray_img2   = Grayscale image data which will be subtracted from gray_img1

    Returns:
    new_img = subtracted image

    :param gray_img1: numpy.ndarray
    :param gray_img2: numpy.ndarray
    :return new_img: numpy.ndarray
    """

    params.device += 1  # increment device

    # check inputs for gray-scale
    if len(np.shape(gray_img1)) != 2 or len(np.shape(gray_img2)) != 2:
        fatal_error("Input image is not gray-scale")

    new_img = gray_img1.astype(np.float64) - gray_img2.astype(np.float64)  # subtract values
    new_img[np.where(new_img < 0)] = 0  # force negative array values to zero
    new_img = new_img.astype(np.uint8)  # typecast image to 8-bit image
    # print-plot handling
    if params.debug == 'print':
        print_image(new_img, os.path.join(params.debug_outdir, str(params.device) + "_subtraction.png"))
    elif params.debug == 'plot':
        plot_image(new_img, cmap='gray')
    return new_img  # return
Example #15
0
def watershed_segmentation(rgb_img, mask, distance=10):
    """Uses the watershed algorithm to detect boundary of objects. Needs a marker file which specifies area which is
       object (white), background (grey), unknown area (black).

    Inputs:
    rgb_img             = image to perform watershed on needs to be 3D (i.e. np.shape = x,y,z not np.shape = x,y)
    mask                = binary image, single channel, object in white and background black
    distance            = min_distance of local maximum

    Returns:
    analysis_images     = list of output images

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param distance: int
    :return analysis_images: list
    """
    params.device += 1
    # # Will be depricating opencv version 2
    # if cv2.__version__[0] == '2':
    #     dist_transform = cv2.distanceTransform(mask, cv2.cv.CV_DIST_L2, maskSize=0)
    # else:
    dist_transform = cv2.distanceTransformWithLabels(mask, cv2.DIST_L2, maskSize=0)[0]

    localMax = peak_local_max(dist_transform, indices=False, min_distance=distance, labels=mask)

    markers = ndi.label(localMax, structure=np.ones((3, 3)))[0]
    dist_transform1 = -dist_transform
    labels = watershed(dist_transform1, markers, mask=mask)

    img1 = np.copy(rgb_img)

    for x in np.unique(labels):
        rand_color = color_palette(len(np.unique(labels)))
        img1[labels == x] = rand_color[x]

    img2 = apply_mask(img1, mask, 'black')

    joined = np.concatenate((img2, rgb_img), axis=1)

    estimated_object_count = len(np.unique(markers)) - 1

    analysis_image = []
    analysis_image.append(joined)

    if params.debug == 'print':
        print_image(dist_transform, os.path.join(params.debug_outdir, str(params.device) + '_watershed_dist_img.png'))
        print_image(joined, os.path.join(params.debug_outdir, str(params.device) + '_watershed_img.png'))
    elif params.debug == 'plot':
        plot_image(dist_transform, cmap='gray')
        plot_image(joined)

    outputs.add_observation(variable='estimated_object_count', trait='estimated object count',
                            method='plantcv.plantcv.watershed', scale='none', datatype=int,
                            value=estimated_object_count, label='none')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
Example #16
0
def prune(skel_img, size):
    """
    The pruning algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
    Iteratively remove endpoints (tips) from a skeletonized image. "Prunes" barbs off a skeleton.

    Inputs:
    skel_img    = Skeletonized image
    size        = Size to get pruned off each branch

    Returns:
    pruned_img  = Pruned image

    :param skel_img: numpy.ndarray
    :param size: int
    :return pruned_img: numpy.ndarray

    """
    # Store debug
    debug = params.debug
    params.debug = None

    pruned_img = skel_img.copy()

    # Check to see if the skeleton has multiple objects
    objects, _ = find_objects(pruned_img, pruned_img)
    if not len(objects) == 1:
        print("Warning: Multiple objects detected! Pruning will further separate the difference pieces.")

    # Iteratively remove endpoints (tips) from a skeleton
    for i in range(0, size):
        endpoints = find_tips(pruned_img)
        pruned_img = image_subtract(pruned_img, endpoints)

    # Make debugging image
    pruned_plot = np.zeros(skel_img.shape[:2], np.uint8)
    pruned_plot = cv2.cvtColor(pruned_plot, cv2.COLOR_GRAY2RGB)
    skel_obj, skel_hierarchy = find_objects(skel_img, skel_img)
    pruned_obj, pruned_hierarchy = find_objects(pruned_img, pruned_img)
    cv2.drawContours(pruned_plot, skel_obj, -1, (0, 0, 255), params.line_thickness,
                     lineType=8, hierarchy=skel_hierarchy)
    cv2.drawContours(pruned_plot, pruned_obj, -1, (255, 255, 255), params.line_thickness,
                     lineType=8, hierarchy=pruned_hierarchy)

    # Reset debug mode
    params.debug = debug

    params.device += 1

    if params.debug == 'print':
        print_image(pruned_img, os.path.join(params.debug_outdir, str(params.device) + '_pruned.png'))
        print_image(pruned_plot, os.path.join(params.debug_outdir, str(params.device) + '_pruned_debug.png'))

    elif params.debug == 'plot':
        plot_image(pruned_img, cmap='gray')
        plot_image(pruned_plot)

    return pruned_img
def segment_skeleton(skel_img, mask=None):
    """ Segment a skeleton image into pieces

        Inputs:
        skel_img      = Skeletonized image
        mask          = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

        Returns:
        segmented_img = Segmented debugging image
        objects       = list of contours
        hierarchy     = contour hierarchy list

        :param skel_img: numpy.ndarray
        :param mask: numpy.ndarray
        :return segmented_img: numpy.ndarray
        :return segment_objects: list
        "return segment_hierarchies: numpy.ndarray
        """

    # Store debug
    debug = params.debug
    params.debug = None

    # Find branch points
    bp = find_branch_pts(skel_img)
    bp = dilate(bp, 3, 1)

    # Subtract from the skeleton so that leaves are no longer connected
    segments = image_subtract(skel_img, bp)

    # Gather contours of leaves
    segment_objects, _ = find_objects(segments, segments)

    # Color each segment a different color
    rand_color = color_palette(len(segment_objects))

    if mask is None:
        segmented_img = skel_img.copy()
    else:
        segmented_img = mask.copy()

    segmented_img = cv2.cvtColor(segmented_img, cv2.COLOR_GRAY2RGB)
    for i, cnt in enumerate(segment_objects):
        cv2.drawContours(segmented_img, segment_objects, i, rand_color[i], params.line_thickness, lineType=8)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(segmented_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented.png'))
    elif params.debug == 'plot':
        plot_image(segmented_img)

    return segmented_img, segment_objects
Example #18
0
def segment_id(skel_img, objects, mask=None):
    """ Plot segment ID's

            Inputs:
            skel_img      = Skeletonized image
            objects       = List of contours
            mask          = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

            Returns:
            segmented_img = Segmented image
            labeled_img   = Labeled image

            :param skel_img: numpy.ndarray
            :param objects: list
            :param mask: numpy.ndarray
            :return segmented_img: numpy.ndarray
            :return labeled_img: numpy.ndarray
            """
    label_coord_x = []
    label_coord_y = []

    if mask is None:
        segmented_img = skel_img.copy()
    else:
        segmented_img = mask.copy()

    segmented_img = cv2.cvtColor(segmented_img, cv2.COLOR_GRAY2RGB)

    # Color each segment a different color
    rand_color = color_palette(len(objects))

    # Plot all segment contours
    for i, cnt in enumerate(objects):
        cv2.drawContours(segmented_img, objects, i, rand_color[i], params.line_thickness, lineType=8)
        # Store coordinates for labels
        label_coord_x.append(objects[i][0][0][0])
        label_coord_y.append(objects[i][0][0][1])

    labeled_img = segmented_img.copy()

    for i, cnt in enumerate(objects):
        # Label slope lines
        w = label_coord_x[i]
        h = label_coord_y[i]
        text = "ID:{}".format(i)
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size, color=rand_color[i], thickness=params.text_thickness)
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented_ids.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return segmented_img, labeled_img
Example #19
0
def colorize_masks(masks, colors):
    """Plot masks with different colors
    Inputs:
        masks    = list of masks to colorize
        colors   = list of colors (either keys from the color_dict or a list of custom tuples)

        :param masks: list
        :param colors: list
        :return colored_img: ndarray
        """

    # Users must enter the exact same number of colors as classes they'd like to color
    num_classes = len(masks)
    num_colors = len(colors)
    if not num_classes == num_colors:
        fatal_error("The number of colors provided doesn't match the number of class masks provided.")

    # Check to make sure user provided at least one mask and color
    if len(colors) == 0 or len(masks) == 0:
        fatal_error("At least one class mask and color must be provided.")

    # Dictionary of colors and the BGR values, based on some of the colors listed here:
    # https://en.wikipedia.org/wiki/X11_color_names
    color_dict = {'white': (255,255,255), 'black': (0,0,0), 'aqua': (0,255,255), 'blue': (255,0,0), 'blue violet':
    (228,44,138), 'brown': (41,41,168), 'chartreuse': (0,255,128), 'dark blue': (140,0,0), 'gray': (169,169,169),
    'yellow': (0, 255, 255), 'turquoise': (210,210,64), 'red': (0, 0, 255), 'purple': (241,33,161), 'orange red':
    (0,69, 255), 'orange': (0,166,255), 'lime': (0, 255, 0), 'lime green': (52,205,52), 'fuchsia': (255,0,255),
    'crimson': (61,20,220), 'beige': (197,220,246), 'chocolate': (31,105,210), 'coral': (79,128,255), 'dark green':
    (0,100,0), 'dark orange': (0,140,255), 'green yellow': (46,255,174), 'light blue': (230,218,174), 'tomato':
    (72,100,255), 'slate gray': (143,128,113), 'gold': (0,215,255), 'goldenrod': (33,166,218), 'light green':
    (143,238,143), 'sea green': (77,141,46), 'dark red': (0,0,141), 'pink': (204,192,255), 'dark yellow': (0,205,255),
    'green': (0,255,0)}

    ix, iy = np.shape(masks[0])
    colored_img = np.zeros((ix,iy,3), dtype=np.uint8)
    # Assign pixels to the selected color

    for i in range(0, len(masks)):
        mask = np.copy(masks[i])
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        if isinstance(colors[i], tuple):
            mask[masks[i] > 0] = colors[i]
        elif isinstance(colors[i], str):
            mask[masks[i] > 0] = color_dict[colors[i]]
        else:
            fatal_error("All elements of the 'colors' list must be either str or tuple")
        colored_img = colored_img + mask


    if params.debug == 'print':
        print_image(colored_img, os.path.join(params.debug_outdir, str(params.device) + '_classes_plot.png'))
    elif params.debug == 'plot':
        plot_image(colored_img)

    return colored_img
def object_composition(img, contours, hierarchy):
    """Groups objects into a single object, usually done after object filtering.

    Inputs:
    img       = RGB or grayscale image data for plotting
    contours  = Contour list
    hierarchy = Contour hierarchy NumPy array

    Returns:
    group    = grouped contours list
    mask     = image mask

    :param img: numpy.ndarray
    :param contours: list
    :param hierarchy: numpy.ndarray
    :return group: list
    :return mask: numpy.ndarray
    """

    params.device += 1
    ori_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)

    stack = np.zeros((len(contours), 1))
    r, g, b = cv2.split(ori_img)
    mask = np.zeros(g.shape, dtype=np.uint8)

    for c, cnt in enumerate(contours):
        # if hierarchy[0][c][3] == -1:
        if hierarchy[0][c][2] == -1 and hierarchy[0][c][3] > -1:
            stack[c] = 0
        else:
            stack[c] = 1

    ids = np.where(stack == 1)[0]
    if len(ids) > 0:
        group = np.vstack(contours[i] for i in ids)
        cv2.drawContours(mask, contours, -1, 255, -1, hierarchy=hierarchy)

        if params.debug is not None:
            cv2.drawContours(ori_img, group, -1, (255, 0, 0), params.line_thickness)
            for cnt in contours:
                cv2.drawContours(ori_img, cnt, -1, (255, 0, 0), params.line_thickness)
            if params.debug == 'print':
                print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_objcomp.png'))
                print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_objcomp_mask.png'))
            elif params.debug == 'plot':
                plot_image(ori_img)
        return group, mask
    else:
        print("Warning: Invalid contour.")
        return None, None
def _call_adaptive_threshold(gray_img, max_value, adaptive_method, threshold_method, method_name):
    # Threshold the image
    bin_img = cv2.adaptiveThreshold(gray_img, max_value, adaptive_method, threshold_method, 11, 2)

    # Print or plot the binary image if debug is on
    if params.debug == 'print':
        print_image(bin_img, os.path.join(params.debug_outdir,
                                          str(params.device) + method_name + '.png'))
    elif params.debug == 'plot':
        plot_image(bin_img, cmap='gray')

    return bin_img
Example #22
0
def rotate(img, rotation_deg, crop):
    """Rotate image, sometimes it is necessary to rotate image, especially when clustering for
       multiple plants is needed.

    Inputs:
    img          = RGB or grayscale image data
    rotation_deg = rotation angle in degrees, can be a negative number,
                   positive values move counter clockwise.
    crop         = either true or false, if true, dimensions of rotated image will be same as original image.

    Returns:
    rotated_img  = rotated image

    :param img: numpy.ndarray
    :param rotation_deg: double
    :param crop: bool
    :return rotated_img: numpy.ndarray
    """
    params.device += 1

    if len(np.shape(img)) == 3:
        iy, ix, iz = np.shape(img)
    else:
        iy, ix = np.shape(img)

    m = cv2.getRotationMatrix2D((ix / 2, iy / 2), rotation_deg, 1)

    cos = np.abs(m[0, 0])
    sin = np.abs(m[0, 1])

    if not crop:
        # compute the new bounding dimensions of the image
        nw = int((iy * sin) + (ix * cos))
        nh = int((iy * cos) + (ix * sin))

        # adjust the rotation matrix to take into account translation
        m[0, 2] += (nw / 2) - (ix / 2)
        m[1, 2] += (nh / 2) - (iy / 2)

        rotated_img = cv2.warpAffine(img, m, (nw, nh))
    else:
        rotated_img = cv2.warpAffine(img, m, (ix, iy))

    if params.debug == 'print':
        print_image(rotated_img, os.path.join(params.debug_outdir, str(params.device) + str(rotation_deg) + '_rotated_img.png'))

    elif params.debug == 'plot':
        if len(np.shape(img)) == 3:
            plot_image(rotated_img)
        else:
            plot_image(rotated_img, cmap='gray')

    return rotated_img
def segment_path_length(segmented_img, objects):
    """ Use segments to calculate geodesic distance per segment

        Inputs:
        segmented_img = Segmented image to plot lengths on
        objects       = List of contours

        Returns:
        labeled_img        = Segmented debugging image with lengths labeled

        :param segmented_img: numpy.ndarray
        :param objects: list
        :return labeled_img: numpy.ndarray

        """

    label_coord_x = []
    label_coord_y = []
    segment_lengths = []
    labeled_img = segmented_img.copy()

    for i, cnt in enumerate(objects):
        # Calculate geodesic distance, divide by two since cv2 seems to be taking the perimeter of the contour
        segment_lengths.append(cv2.arcLength(objects[i], False) / 2)
        # Store coordinates for labels
        label_coord_x.append(objects[i][0][0][0])
        label_coord_y.append(objects[i][0][0][1])

    segment_ids = []

    # Put labels of length
    for c, value in enumerate(segment_lengths):
        text = "{:.2f}".format(value)
        w = label_coord_x[c]
        h = label_coord_y[c]
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
        segment_label = "ID" + str(c)
        segment_ids.append(c)

    outputs.add_observation(variable='segment_path_length', trait='segment path length',
                            method='plantcv.plantcv.morphology.segment_path_length', scale='pixels', datatype=list,
                            value=segment_lengths, label=segment_ids)

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_path_lengths.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
Example #24
0
def _draw_roi(img, roi_contour):
    """Draw an ROI

    :param img: numpy.ndarray
    :param roi_contour: list
    """
    # Make a copy of the reference image
    ref_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ref_img)) == 2:
        ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)
    # Draw the contour on the reference image
    cv2.drawContours(ref_img, roi_contour, -1, (255, 0, 0), params.line_thickness)
    if params.debug == "print":
        # If debug is print, save the image to a file
        print_image(ref_img, os.path.join(params.debug_outdir, str(params.device) + "_roi.png"))
    elif params.debug == "plot":
        # If debug is plot, print to the plotting device
        plot_image(ref_img)
Example #25
0
def readimage(filename, mode="native"):
    """Read image from file.

    Inputs:
    filename = name of image file
    mode     = mode of imread ("native", "rgb", "rgba", "gray")

    Returns:
    img      = image object as numpy array
    path     = path to image file
    img_name = name of image file

    :param filename: str
    :param mode: str
    :return img: numpy.ndarray
    :return path: str
    :return img_name: str
    """
    if mode.upper() == "GRAY" or mode.upper() == "GREY":
        img = cv2.imread(filename, 0)
    elif mode.upper() == "RGB":
        img = cv2.imread(filename)
    elif mode.upper() == "RGBA":
        img = cv2.imread(filename, -1)
    else:
        img = cv2.imread(filename, -1)

    # Default to drop alpha channel if user doesn't specify 'rgba'
    if len(np.shape(img))==3 and np.shape(img)[2] == 4 and mode.upper() == "NATIVE":
        img = cv2.imread(filename)

    if img is None:
        fatal_error("Failed to open " + filename)

    # Split path from filename
    path, img_name = os.path.split(filename)

    if params.debug == "print":
        print_image(img, os.path.join(params.debug_outdir, "input_image.png"))
    elif params.debug == "plot":
        plot_image(img)

    return img, path, img_name
Example #26
0
def rgb2gray(rgb_img):
    """Convert image from RGB colorspace to Gray.

    Inputs:
    rgb_img    = RGB image data

    Returns:
    gray   = grayscale image

    :param rgb_img: numpy.ndarray
    :return gray: numpy.ndarray
    """

    gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
    params.device += 1
    if params.debug == 'print':
        print_image(gray, os.path.join(params.debug_outdir, str(params.device) + '_gray.png'))
    elif params.debug == 'plot':
        plot_image(gray, cmap='gray')
    return gray
Example #27
0
def logical_or(bin_img1, bin_img2):
    """Join two images using the bitwise OR operator.

    Inputs:
    bin_img1   = Binary image data to be compared to bin_img2
    bin_img2   = Binary image data to be compared to bin_img1

    Returns:
    merged     = joined binary image

    :param bin_img1: numpy.ndarray
    :param bin_img2: numpy.ndarray
    :return merged: numpy.ndarray
    """

    params.device += 1
    merged = cv2.bitwise_or(bin_img1, bin_img2)
    if params.debug == 'print':
        print_image(merged, os.path.join(params.debug_outdir, str(params.device) + '_or_joined.png'))
    elif params.debug == 'plot':
        plot_image(merged, cmap='gray')
    return merged
Example #28
0
def logical_xor(bin_img1, bin_img2):
    """Join two images using the bitwise XOR operator.

    Inputs:
    bin_img1   = Binary image data to be compared to bin_img2
    bin_img2   = Binary image data to be compared to bin_img1

    Returns:
    merged     = joined binary image

    :param bin_img1: numpy.ndarray
    :param bin_img2: numpy.ndarray
    :return merged: numpy.ndarray
    """

    params.device += 1
    merged = cv2.bitwise_xor(bin_img1, bin_img2)
    if params.debug == 'print':
        print_image(merged, os.path.join(params.debug_outdir, str(params.device) + '_xor_joined.png'))
    elif params.debug == 'plot':
        plot_image(merged, cmap='gray')
    return merged
Example #29
0
def fill(bin_img, size):
    """Identifies objects and fills objects that are less than size.

    Inputs:
    bin_img      = Binary image data
    size         = minimum object area size in pixels (integer)


    Returns:
    filtered_img = image with objects filled

    :param bin_img: numpy.ndarray
    :param size: int
    :return filtered_img: numpy.ndarray
    """
    params.device += 1

    # Make sure the image is binary
    if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:
        fatal_error("Image is not binary")

    # Cast binary image to boolean
    bool_img = bin_img.astype(bool)

    # Find and fill contours
    bool_img = remove_small_objects(bool_img, size)

    # Cast boolean image to binary and make a copy of the binary image for returning
    filtered_img = np.copy(bool_img.astype(np.uint8) * 255)

    if params.debug == 'print':
        print_image(
            filtered_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_fill' + str(size) + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
Example #30
0
def rgb2gray_lab(rgb_img, channel):
    """Convert image from RGB colorspace to LAB colorspace. Returns the specified subchannel as a gray image.

    Inputs:
    rgb_img   = RGB image data
    channel   = color subchannel (l = lightness, a = green-magenta, b = blue-yellow)

    Returns:
    l | a | b = grayscale image from one LAB color channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1
    # The allowable channel inputs are l, a or b
    names = {"l": "lightness", "a": "green-magenta", "b": "blue-yellow"}
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not l, a or b!")

    # Convert the input BGR image to LAB colorspace
    lab = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)
    # Split LAB channels
    l, a, b = cv2.split(lab)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"l": l, "a": a, "b": b}

    if params.debug == "print":
        print_image(
            channels[channel],
            os.path.join(
                params.debug_outdir,
                str(params.device) + "_lab_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel]
Example #31
0
def scharr_filter(img, dX, dY, scale, device, debug=None):
    """This is a filtering method used to identify and highlight gradient edges/features using the 1st derivative.
       Typically used to identify gradients along the x-axis (dx = 1, dy = 0) and y-axis (dx = 0, dy = 1) independently.
       Performance is quite similar to Sobel filter. Used to detect edges / changes in pixel intensity. ddepth = -1
       specifies that the dimensions of output image will be the same as the input image.

    Inputs:
    # img    = image
    # dx     = derivative of x to analyze (1-3)
    # dy     = derivative of x to analyze (1-3)
    # scale  = scaling factor applied (multiplied) to computed Scharr values (scale = 1 is unscaled)
    # device = device number. Used to count steps in the pipeline
    # debug  = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device   = device number
    sr_img   = Scharr filtered image

    :param img: numpy array
    :param dX: int
    :param dY: int
    :param scale: int
    :param device: int
    :param debug: str
    :return device: int
    :return sr_img: numpy array
    """

    sr_img = cv2.Scharr(src=img, ddepth=-1, dx=dX, dy=dY, scale=scale)
    device += 1
    if debug == 'print':
        print_image(
            sr_img,
            str(device) + '_sr_img_dx' + str(dX) + '_dy' + str(dY) + '_scale' +
            str(scale) + '.png')
    elif debug == 'plot':
        plot_image(sr_img, cmap='gray')
    return device, sr_img
Example #32
0
def distance_transform(img, distanceType, maskSize, device, debug=None):
    """Creates an image where for each object pixel, a number is assigned that corresponds to the distance to the
    nearest background pixel.

    Inputs:
    img             = img object, binary
    distanceType    = Type of distance. It can be CV_DIST_L1, CV_DIST_L2 , or CV_DIST_C which are 1, 2 and 3,
                      respectively.
    device          = device number. Used to count steps in the pipeline
    debug           = None, print, or plot. Print = save to file, Plot = print to screen.
    maskSize        = Size of the distance transform mask. It can be 3, 5, or CV_DIST_MASK_PRECISE (the latter option
                      is only supported by the first function). In case of the CV_DIST_L1 or CV_DIST_C distance type,
                      the parameter is forced to 3 because a 3 by 3 mask gives the same result as 5 by 5 or any larger
                      aperture.

    Returns:
    device          = device number
    norm_image      = grayscale distance-transformed image normalized between [0,1]

    :param img: numpy array
    :param distanceType: int
    :param maskSize: int
    :param device: int
    :param debug: str
    :return device: int
    :return norm_image: numpy array
    """

    device += 1
    dist = cv2.distanceTransform(src=img, distanceType=distanceType, maskSize=maskSize)
    norm_image = cv2.normalize(src=dist, dst=dist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)

    if debug == 'print':
        print_image(norm_image, (str(device) + '_distance_transform.png'))
    elif debug == 'plot':
        plot_image(norm_image, cmap='gray')

    return device, norm_image
Example #33
0
def _draw_roi(img, roi_contour):
    """Draw an ROI

    :param img: numpy.ndarray
    :param roi_contour: list
    """
    # Make a copy of the reference image
    ref_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ref_img)) == 2:
        ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)
    # Draw the contour on the reference image
    cv2.drawContours(ref_img, roi_contour, -1, (255, 0, 0),
                     params.line_thickness)
    if params.debug == "print":
        # If debug is print, save the image to a file
        print_image(
            ref_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_roi.png"))
    elif params.debug == "plot":
        # If debug is plot, print to the plotting device
        plot_image(ref_img)
def rgb2gray(rgb_img):
    """Convert image from RGB colorspace to Gray.

    Inputs:
    rgb_img    = RGB image data

    Returns:
    gray   = grayscale image

    :param rgb_img: numpy.ndarray
    :return gray: numpy.ndarray
    """

    gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
    params.device += 1
    if params.debug == 'print':
        print_image(
            gray,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_gray.png'))
    elif params.debug == 'plot':
        plot_image(gray, cmap='gray')
    return gray
Example #35
0
def median_blur(gray_img, ksize):
    """Applies a median blur filter (applies median value to central pixel within a kernel size ksize x ksize).

    Inputs:
    gray_img  = Grayscale image data
    ksize     = kernel size => ksize x ksize box

    Returns:
    img_mblur = blurred image

    :param gray_img: numpy.ndarray
    :param ksize: int
    :return img_mblur: numpy.ndarray
    """

    img_mblur = cv2.medianBlur(gray_img, ksize)
    params.device += 1
    if params.debug == 'print':
        print_image(img_mblur, os.path.join(params.debug_outdir,
                                            str(params.device) + '_median_blur' + str(ksize) + '.png'))
    elif params.debug == 'plot':
        plot_image(img_mblur, cmap='gray')
    return img_mblur
Example #36
0
def erode(gray_img, ksize, i):
    """Perform morphological 'erosion' filtering. Keeps pixel in center of the kernel if conditions set in kernel are
       true, otherwise removes pixel.

    Inputs:
    gray_img = Grayscale (usually binary) image data
    ksize   = Kernel size (int). A ksize x ksize kernel will be built. Must be greater than 1 to have an effect.
    i        = interations, i.e. number of consecutive filtering passes

    Returns:
    er_img = eroded image

    :param gray_img: numpy.ndarray
    :param ksize: int
    :param i: int
    :return er_img: numpy.ndarray
    """

    if ksize <= 1:
        raise ValueError(
            'ksize needs to be greater than 1 for the function to have an effect'
        )

    kernel1 = int(ksize)
    kernel2 = np.ones((kernel1, kernel1), np.uint8)
    er_img = cv2.erode(src=gray_img, kernel=kernel2, iterations=i)
    params.device += 1
    if params.debug == 'print':
        print_image(
            er_img,
            os.path.join(
                params.debug_outdir,
                str(params.device) + '_er_image' + str(ksize) + '_itr_' +
                str(i) + '.png'))
    elif params.debug == 'plot':
        plot_image(er_img, cmap='gray')
    return er_img
Example #37
0
def readimage(filename, mode="native"):
    """Read image from file.

    Inputs:
    filename = name of image file
    mode     = mode of imread ("native", "rgb", "gray")

    Returns:
    img      = image object as numpy array
    path     = path to image file
    img_name = name of image file

    :param filename: str
    :param mode: str
    :return img: numpy.ndarray
    :return path: str
    :return img_name: str
    """
    if mode.upper() == "GRAY" or mode.upper() == "GREY":
        img = cv2.imread(filename, 0)
    elif mode.upper() == "RGB":
        img = cv2.imread(filename)
    else:
        img = cv2.imread(filename, -1)

    if img is None:
        fatal_error("Failed to open " + filename)

    # Split path from filename
    path, img_name = os.path.split(filename)

    if params.debug == "print":
        print_image(img, os.path.join(params.debug_outdir, "input_image.png"))
    elif params.debug == "plot":
        plot_image(img)

    return img, path, img_name
def rgb2gray_hsv(rgb_img, channel):
    """Convert an RGB color image to HSV colorspace and return a gray image (one channel).

    Inputs:
    rgb_img = RGB image data
    channel = color subchannel (h = hue, s = saturation, v = value/intensity/brightness)

    Returns:
    h | s | v = image from single HSV channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1

    # The allowable channel inputs are h, s or v
    names = {"h": "hue", "s": "saturation", "v": "value"}
    channel = channel.lower()
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not h, s or v!")

    # Convert the input BGR image to HSV colorspace
    hsv = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)
    # Split HSV channels
    h, s, v = cv2.split(hsv)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"h": h, "s": s, "v": v}

    if params.debug == "print":
        print_image(channels[channel], os.path.join(params.debug_outdir,
                                                    str(params.device) + "_hsv_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel]
Example #39
0
def gaussian_blur(device, img, ksize, sigmax=0, sigmay=None, debug=None):
    """Applies a Gaussian blur filter.

    Inputs:
    # device  = device number. Used to count steps in the pipeline
    # img     = img object
    # ksize   = kernel size => ksize x ksize box, e.g. (5,5)
    # sigmax = standard deviation in X direction; if 0, calculated from kernel size
    # sigmay = standard deviation in Y direction; if sigmaY is None, sigmaY is taken to equal sigmaX
    # debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device    = device number
    img_gblur = blurred image

    :param img: numpy array
    :param ksize: tuple
    :param sigmax: int
    :param sigmay: str or int
    :param device: int
    :param debug: str
    :return device: int
    :return img_gblur: numpy array
    """

    img_gblur = cv2.GaussianBlur(img, ksize, sigmax, sigmay)

    device += 1
    if debug == 'print':
        print_image(img_gblur, (str(device) + '_gaussian_blur.png'))
    elif debug == 'plot':
        if len(img_gblur) == 3:
            plot_image(img_gblur)
        else:
            plot_image(img_gblur, cmap='gray')

    return device, img_gblur
Example #40
0
def fill(bin_img, size):
    """Identifies objects and fills objects that are less than size.

    Inputs:
    bin_img      = Binary image data
    size         = minimum object area size in pixels (integer)


    Returns:
    filtered_img = image with objects filled

    :param bin_img: numpy.ndarray
    :param size: int
    :return filtered_img: numpy.ndarray
    """
    params.device += 1

    # Make sure the image is binary
    if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:
        fatal_error("Image is not binary")

    # Cast binary image to boolean
    bool_img = bin_img.astype(bool)

    # Find and fill contours
    bool_img = remove_small_objects(bool_img, size)

    # Cast boolean image to binary and make a copy of the binary image for returning
    filtered_img = np.copy(bool_img.astype(np.uint8) * 255)

    if params.debug == 'print':
        print_image(filtered_img, os.path.join(params.debug_outdir, str(params.device) + '_fill' + str(size) + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
Example #41
0
def rescale(gray_img, min_value=0, max_value=255):
    """Rescale image.

        Inputs:
        gray_img  = Grayscale image data
        min_value = (optional) new minimum value for range of interest. default = 0
        max_value = (optional) new maximum value for range of interest. default = 255

        Returns:
        rescaled_img = rescaled image

        :param gray_img: numpy.ndarray
        :param min_value: int
        :param max_value: int
        :return c: numpy.ndarray
        """
    if len(np.shape(gray_img)) != 2:
        fatal_error("Image is not grayscale")

    rescaled_img = np.interp(gray_img,
                             (np.nanmin(gray_img), np.nanmax(gray_img)),
                             (min_value, max_value))
    rescaled_img = (rescaled_img).astype('uint8')

    # Autoincrement the device counter
    params.device += 1

    if params.debug == 'print':
        print_image(
            rescaled_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_rescaled.png"))
    elif params.debug == 'plot':
        plot_image(rescaled_img, cmap='gray')

    return rescaled_img
def stdev_filter(img, ksize, borders='nearest'):
    """Creates a binary image from a grayscale image using skimage texture calculation for thresholding.
    This function is quite slow.

    Inputs:
    gray_img       = Grayscale image data
    ksize          = Kernel size for texture measure calculation
    borders        = How the array borders are handled, either 'reflect',
                     'constant', 'nearest', 'mirror', or 'wrap'

    Returns:
    output         = Standard deviation values image

    :param img: numpy.ndarray
    :param ksize: int
    :param borders: str
    :return output: numpy.ndarray
    """

    # Make an array the same size as the original image
    output = np.zeros(img.shape, dtype=img.dtype)

    # Apply the texture function over the whole image
    generic_filter(img, np.std, size=ksize, output=output, mode=borders)

    if params.debug == "print":
        # If debug is print, save the image to a file
        print_image(
            output,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_variance.png"))
    elif params.debug == "plot":
        # If debug is plot, print to the plotting device
        plot_image(output)

    return output
Example #43
0
def image_subtract(gray_img1, gray_img2):
    """This is a function used to subtract values of one gray-scale image array from another gray-scale image array. The
    resulting gray-scale image array has a minimum element value of zero. That is all negative values resulting from the
    subtraction are forced to zero.

    Inputs:
    gray_img1   = a gray-scale or binary image from which gray_img2 will be subtracted
    gray_img2   = a gray-scale or binary image which will be subtracted from gray_img1

    Returns:
    new_img = subtracted image

    :param gray_img1: numpy array
    :param gray_img2: numpy array
    :return new_img: numpy array
    """

    params.device += 1  # increment device

    # check inputs for gray-scale
    if len(np.shape(gray_img1)) != 2 or len(np.shape(gray_img2)) != 2:
        fatal_error("Input image is not gray-scale")

    new_img = gray_img1.astype(np.float64) - gray_img2.astype(
        np.float64)  # subtract values
    new_img[np.where(new_img < 0)] = 0  # force negative array values to zero
    new_img = new_img.astype(np.uint8)  # typecast image to 8-bit image
    # print-plot handling
    if params.debug == 'print':
        print_image(
            new_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_subtraction.png"))
    elif params.debug == 'plot':
        plot_image(new_img, cmap='gray')
    return new_img  # return
Example #44
0
def rgb2gray_lab(rgb_img, channel):
    """Convert image from RGB colorspace to LAB colorspace. Returns the specified subchannel as a gray image.

    Inputs:
    rgb_img   = RGB image data
    channel   = color subchannel (l = lightness, a = green-magenta, b = blue-yellow)

    Returns:
    l | a | b = grayscale image from one LAB color channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1
    # The allowable channel inputs are l, a or b
    names = {"l": "lightness", "a": "green-magenta", "b": "blue-yellow"}
    channel = channel.lower()
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not l, a or b!")

    # Convert the input BGR image to LAB colorspace
    lab = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)
    # Split LAB channels
    l, a, b = cv2.split(lab)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"l": l, "a": a, "b": b}

    if params.debug == "print":
        print_image(channels[channel], os.path.join(params.debug_outdir,
                                                    str(params.device) + "_lab_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel]
Example #45
0
def laplace_filter(img, k, scale, device, debug=None):
    """This is a filtering method used to identify and highlight fine edges based on the 2nd derivative. A very
       sensetive method to highlight edges but will also amplify background noise. ddepth = -1 specifies that the
       dimensions of output image will be the same as the input image.

    Inputs:
    img         = input image
    k           = apertures size used to calculate 2nd derivative filter, specifies the size of the kernel
                  (must be an odd integer: 1,3,5...)
    scale       = scaling factor applied (multiplied) to computed Laplacian values (scale = 1 is unscaled)
    device      = device number. Used to count steps in the pipeline
    debug       = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device      = device number
    lp_filtered = laplacian filtered image

    :param img: numpy array
    :param k: int
    :param scale: int
    :param device: int
    :param debug: str
    :return device: int
    :return lp_filtered: numpy array
    """

    lp_filtered = cv2.Laplacian(src=img, ddepth=-1, ksize=k, scale=scale)
    device += 1
    if debug == 'print':
        print_image(
            lp_filtered,
            str(device) + '_lp_out_k' + str(k) + '_scale' + str(scale) +
            '.png')
    elif debug == 'plot':
        plot_image(lp_filtered, cmap='gray')
    return device, lp_filtered
Example #46
0
def flip(img, direction):
    """Flip image.

    Inputs:
    img       = RGB or grayscale image data
    direction = "horizontal" or "vertical"

    Returns:
    vh_img    = flipped image

    :param img: numpy.ndarray
    :param direction: str
    :return vh_img: numpy.ndarray
    """
    params.device += 1
    if direction == "vertical":
        vh_img = cv2.flip(img, 1)
    elif direction == "horizontal":
        vh_img = cv2.flip(img, 0)
    else:
        fatal_error(
            str(direction) +
            " is not a valid direction, must be horizontal or vertical")

    if params.debug == 'print':
        print_image(
            vh_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_flipped.png"))
    elif params.debug == 'plot':
        if len(np.shape(vh_img)) == 3:
            plot_image(vh_img)
        else:
            plot_image(vh_img, cmap='gray')

    return vh_img
Example #47
0
def resize(img, resize_x, resize_y, device, debug=None):
    """Resize image.

    Inputs:
    img      = image to resize
    resize_x = scaling factor
    resize_y = scaling factor
    device   = device counter
    debug    = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device   = device number
    reimg    = resized image

    :param img: numpy array
    :param resize_x: int
    :param resize_y: int
    :param device: int
    :param debug: str
    :return device: int
    :return reimg: numpy array
    """

    device += 1

    if resize_x <= 0 and resize_y <= 0:
        fatal_error("Resize values both cannot be 0 or negative values!")

    reimg = cv2.resize(img, (0, 0), fx=resize_x, fy=resize_y)

    if debug == 'print':
        print_image(reimg, (str(device) + "_resize1.png"))
    elif debug == 'plot':
        plot_image(reimg, cmap='gray')

    return device, reimg
Example #48
0
def fill_holes(bin_img):
    """Flood fills holes in a binary mask

    Inputs:
    bin_img      = Binary image data

    Returns:
    filtered_img = image with objects filled

    :param bin_img: numpy.ndarray
    :return filtered_img: numpy.ndarray
    """
    params.device += 1

    # Make sure the image is binary
    if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:
        fatal_error("Image is not binary")

    # Cast binary image to boolean
    bool_img = bin_img.astype(bool)

    # Flood fill holes
    bool_img = binary_fill_holes(bool_img)

    # Cast boolean image to binary and make a copy of the binary image for returning
    filtered_img = np.copy(bool_img.astype(np.uint8) * 255)

    if params.debug == 'print':
        print_image(
            filtered_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_fill_holes' + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
Example #49
0
def pseudocolor(gray_img,
                obj=None,
                mask=None,
                cmap=None,
                background="image",
                min_value=0,
                max_value=255,
                axes=True,
                colorbar=True,
                obj_padding="auto"):
    """Pseudocolor any grayscale image to custom colormap

    Inputs:
    gray_img    = grayscale image data
    obj         = (optional) ROI or plant contour object. If provided, the pseudocolored image gets cropped
                  down to the region of interest. default = None
    mask        = (optional) binary mask
    cmap        = (optional) colormap. default is the matplotlib default, viridis
    background  = (optional) background color/type, options are "image" (gray_img), "white", or "black" (requires a mask). default = 'image'
    min_value   = (optional) minimum value for range of interest. default = 0
    max_value   = (optional) maximum value for range of interest. default = 255
    axes        = (optional) if False then x- and y-axis won't be displayed, nor will the title. default = True
    colorbar    = (optional) if False then colorbar won't be displayed. default = True
    obj_padding = (optional) if "auto" (default) and an obj is supplied, then the image is cropped to an extent 20%
                  larger in each dimension than the object. An single integer is also accepted to define the padding
                  in pixels

    Returns:
    pseudo_image = pseudocolored image

    :param gray_img: numpy.ndarray
    :param obj: numpy.ndarray
    :param mask: numpy.ndarray
    :param cmap: str
    :param background: str
    :param min_value: numeric
    :param max_value: numeric
    :param dpi: int
    :param axes: bool
    :param obj_padding: str, int
    :return pseudo_image: numpy.ndarray
    """

    # Auto-increment the device counter
    params.device += 1

    # Make copies of the gray image
    gray_img1 = np.copy(gray_img)

    # Check if the image is grayscale
    if len(np.shape(gray_img)) != 2:
        fatal_error("Image must be grayscale.")

    # Apply the mask if given
    if mask is not None:
        if obj is not None:
            # Copy the image
            img_copy = np.copy(gray_img1)
            # Extract contour size
            x, y, w, h = cv2.boundingRect(obj)
            cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)

            # Crop down the image
            crop_img = gray_img[y:y + h, x:x + w]

            # Setup a buffer around the bounding box of obj
            if type(obj_padding) is int:
                offsetx = obj_padding
                offsety = obj_padding
            elif type(obj_padding) is str and obj_padding.upper() == "AUTO":
                # Calculate the buffer size based on the contour size
                offsetx = int(w / 5)
                offsety = int(h / 5)
            else:
                fatal_error("Padding must either be 'auto' or an integer.")

            if background.upper() == "IMAGE":
                gray_img1 = gray_img1[y - offsety:y + h + offsety,
                                      x - offsetx:x + w + offsetx]
            else:
                # Crop img including buffer
                gray_img1 = cv2.copyMakeBorder(crop_img,
                                               offsety,
                                               offsety,
                                               offsetx,
                                               offsetx,
                                               cv2.BORDER_CONSTANT,
                                               value=(0, 0, 0))

            # Crop the mask to the same size as the image
            crop_mask = mask[y:y + h, x:x + w]
            mask = cv2.copyMakeBorder(crop_mask,
                                      offsety,
                                      offsety,
                                      offsetx,
                                      offsetx,
                                      cv2.BORDER_CONSTANT,
                                      value=(0, 0, 0))

        # Apply the mask
        masked_img = np.ma.array(gray_img1, mask=~mask.astype(np.bool))

        # Set the background color or type
        if background.upper() == "BLACK":
            # Background is all zeros
            bkg_img = np.zeros(np.shape(gray_img1), dtype=np.uint8)
            # Use the gray cmap for the background
            bkg_cmap = "gray"
        elif background.upper() == "WHITE":
            # Background is all 255 (white)
            bkg_img = np.zeros(np.shape(gray_img1), dtype=np.uint8)
            bkg_img += 255
            bkg_cmap = "gray"
        elif background.upper() == "IMAGE":
            # Set the background to the input gray image
            bkg_img = gray_img1
            bkg_cmap = "gray"
        else:
            fatal_error(
                "Background type {0} is not supported. Please use 'white', 'black', or 'image'."
                .format(background))

        # Pseudocolor the image, plot the background first
        pseudo_img1 = plt.imshow(bkg_img, cmap=bkg_cmap)
        # Overlay the masked grayscale image with the user input colormap
        plt.imshow(masked_img, cmap=cmap, vmin=min_value, vmax=max_value)

        if colorbar:
            plt.colorbar(fraction=0.033, pad=0.04)

        if axes:
            # Include image title
            plt.title('Pseudocolored image')
        else:
            # Remove axes
            plt.xticks([])
            plt.yticks([])

        # Store the current figure
        pseudo_img = plt.gcf()

        # Print or plot if debug is turned on
        if params.debug == 'print':
            plt.savefig(os.path.join(params.debug_outdir,
                                     str(params.device) +
                                     '_pseudocolored.png'),
                        dpi=params.dpi)
            plt.close()
        elif params.debug == 'plot':
            plot_image(pseudo_img1)
            # Use non-blocking mode in case the function is run more than once
            plt.show(block=False)
        elif params.debug == None:
            plt.show(block=False)

    else:
        # Pseudocolor the image
        pseudo_img1 = plt.imshow(gray_img1,
                                 cmap=cmap,
                                 vmin=min_value,
                                 vmax=max_value)

        if colorbar:
            # Include the colorbar
            plt.colorbar(fraction=0.033, pad=0.04)

        if axes:
            # Include image title
            plt.title(
                'Pseudocolored image')  # + os.path.splitext(filename)[0])
        else:
            # Remove axes
            plt.xticks([])
            plt.yticks([])

        pseudo_img = plt.gcf()

        # Print or plot if debug is turned on
        if params.debug == 'print':
            plt.savefig(os.path.join(params.debug_outdir,
                                     str(params.device) +
                                     '_pseudocolored.png'),
                        dpi=params.dpi)
            pseudo_img.clear()
            plt.close()
        elif params.debug == 'plot':
            plot_image(pseudo_img1)
            # Use non-blocking mode in case the function is run more than once
            plt.show(block=False)
        elif params.debug == None:
            plt.show(block=False)

    return pseudo_img
def custom_range(rgb_img, lower_thresh, upper_thresh, channel='gray'):
    """Creates a thresholded image and mask from an RGB image and threshold values.

    Inputs:
    rgb_img      = RGB image data
    lower_thresh = List of lower threshold values (0-255)
    upper_thresh = List of upper threshold values (0-255)
    channel      = Color-space channels of interest (RGB, HSV, LAB, or gray)

    Returns:
    mask         = Mask, binary image
    masked_img   = Masked image, keeping the part of image of interest

    :param rgb_img: numpy.ndarray
    :param lower_thresh: list
    :param upper_thresh: list
    :param channel: str
    :return mask: numpy.ndarray
    :return masked_img: numpy.ndarray
    """

    # Auto-increment the device counter
    params.device += 1

    if channel.upper() == 'HSV':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error("If using the HSV colorspace, 3 thresholds are needed for both lower_thresh and " +
                        "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
                        "upper_thresh=255")

        # Convert the RGB image to HSV colorspace
        hsv_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)

        # Separate channels
        hue = hsv_img[:, :, 0]
        saturation = hsv_img[:, :, 1]
        value = hsv_img[:, :, 2]

        # Make a mask for each channel
        h_mask = cv2.inRange(hue, lower_thresh[0], upper_thresh[0])
        s_mask = cv2.inRange(saturation, lower_thresh[1], upper_thresh[1])
        v_mask = cv2.inRange(value, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=h_mask)
        result = cv2.bitwise_and(result, result, mask=s_mask)
        masked_img = cv2.bitwise_and(result, result, mask=v_mask)

        # Combine masks
        mask = cv2.bitwise_and(s_mask, h_mask)
        mask = cv2.bitwise_and(mask, v_mask)

    elif channel.upper() == 'RGB':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error("If using the RGB colorspace, 3 thresholds are needed for both lower_thresh and " +
                        "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
                        "upper_thresh=255")

        # Separate channels (pcv.readimage reads RGB images in as BGR)
        blue = rgb_img[:, :, 0]
        green = rgb_img[:, :, 1]
        red = rgb_img[:, :, 2]

        # Make a mask for each channel
        b_mask = cv2.inRange(blue, lower_thresh[0], upper_thresh[0])
        g_mask = cv2.inRange(green, lower_thresh[1], upper_thresh[1])
        r_mask = cv2.inRange(red, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=b_mask)
        result = cv2.bitwise_and(result, result, mask=g_mask)
        masked_img = cv2.bitwise_and(result, result, mask=r_mask)

        # Combine masks
        mask = cv2.bitwise_and(b_mask, g_mask)
        mask = cv2.bitwise_and(mask, r_mask)

    elif channel.upper() == 'LAB':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error("If using the LAB colorspace, 3 thresholds are needed for both lower_thresh and " +
                        "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
                        "upper_thresh=255")

        # Convert the RGB image to LAB colorspace
        lab_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)

        # Separate channels (pcv.readimage reads RGB images in as BGR)
        lightness = lab_img[:, :, 0]
        green_magenta = lab_img[:, :, 1]
        blue_yellow = lab_img[:, :, 2]

        # Make a mask for each channel
        l_mask = cv2.inRange(lightness, lower_thresh[0], upper_thresh[0])
        gm_mask = cv2.inRange(green_magenta, lower_thresh[1], upper_thresh[1])
        by_mask = cv2.inRange(blue_yellow, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=l_mask)
        result = cv2.bitwise_and(result, result, mask=gm_mask)
        masked_img = cv2.bitwise_and(result, result, mask=by_mask)

        # Combine masks
        mask = cv2.bitwise_and(l_mask, gm_mask)
        mask = cv2.bitwise_and(mask, by_mask)

    elif channel.upper() == 'GRAY' or channel.upper() == 'GREY':

        # Check threshold input
        if not (len(lower_thresh) == 1 and len(upper_thresh) == 1):
            fatal_error("If useing a grayscale colorspace, 1 threshold is needed for both the " +
                        "lower_thresh and upper_thresh.")
        if len(np.shape(rgb_img))==3:
            # Convert RGB image to grayscale colorspace
            gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = rgb_img

        # Make a mask
        mask = cv2.inRange(gray_img, lower_thresh[0], upper_thresh[0])

        # Apply the masks to the image
        masked_img = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)

    else:
        fatal_error(str(channel) + " is not a valid colorspace. Channel must be either 'RGB', 'HSV', or 'gray'.")

    # Print or plot the binary image if debug is on
    if params.debug == 'print':
        print_image(masked_img, os.path.join(params.debug_outdir,
                                             str(params.device) + channel + 'custom_thresh.png'))
        print_image(mask, os.path.join(params.debug_outdir,
                                       str(params.device) + channel + 'custom_thresh_mask.png'))
    elif params.debug == 'plot':
        plot_image(masked_img)
        plot_image(mask)

    return mask, masked_img
Example #51
0
def _pseudocolored_image(histogram, bins, img, mask, background, channel,
                         filename, analysis_images):
    """Pseudocolor image.

    Inputs:
    histogram       = a normalized histogram of color values from one color channel
    bins            = number of color bins the channel is divided into
    img             = input image
    mask            = binary mask image
    background      = what background image?: channel image (img) or white
    channel         = color channel name
    filename        = input image filename
    analysis_images = list of analysis image filenames

    Returns:
    analysis_images = list of analysis image filenames

    :param histogram: list
    :param bins: int
    :param img: numpy array
    :param mask: numpy array
    :param background: str
    :param channel: str
    :param filename: str
    :param analysis_images: list
    :return analysis_images: list
    """
    mask_inv = cv2.bitwise_not(mask)

    cplant = cv2.applyColorMap(histogram, colormap=2)
    cplant1 = cv2.bitwise_and(cplant, cplant, mask=mask)

    output_imgs = {
        "pseudo_on_img": {
            "background": "img",
            "img": None
        },
        "pseudo_on_white": {
            "background": "white",
            "img": None
        }
    }

    if background == 'img' or background == 'both':
        # mask the background and color the plant with color scheme 'jet'
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        img_back = cv2.bitwise_and(img_gray, img_gray, mask=mask_inv)
        img_back3 = np.dstack((img_back, img_back, img_back))

        output_imgs["pseudo_on_img"]["img"] = cv2.add(cplant1, img_back3)

    if background == 'white' or background == 'both':
        # Get the image size
        if np.shape(img)[2] == 3:
            ix, iy, iz = np.shape(img)
        else:
            ix, iy = np.shape(img)
        size = ix, iy
        back = np.zeros(size, dtype=np.uint8)
        w_back = back + 255
        w_back3 = np.dstack((w_back, w_back, w_back))
        img_back3 = cv2.bitwise_and(w_back3, w_back3, mask=mask_inv)
        output_imgs["pseudo_on_white"]["img"] = cv2.add(cplant1, img_back3)

    if filename:
        for key in output_imgs:
            if output_imgs[key]["img"] is not None:
                fig_name_pseudo = str(filename[0:-4]) + '_' + str(channel) + '_pseudo_on_' + \
                                  output_imgs[key]["background"] + '.jpg'
                path = os.path.dirname(filename)
                print_image(output_imgs[key]["img"], fig_name_pseudo)
                analysis_images.append(['IMAGE', 'pseudo', fig_name_pseudo])
    else:
        path = "."

    if params.debug is not None:
        if params.debug == 'print':
            for key in output_imgs:
                if output_imgs[key]["img"] is not None:
                    print_image(
                        output_imgs[key]["img"],
                        os.path.join(
                            params.debug_outdir,
                            str(params.device) + "_" +
                            output_imgs[key]["background"] +
                            '_pseudocolor.jpg'))
            fig_name = 'VIS_pseudocolor_colorbar_' + str(
                channel) + '_channel.svg'
            if not os.path.isfile(os.path.join(params.debug_outdir, fig_name)):
                plot_colorbar(path, fig_name, bins)
        elif params.debug == 'plot':
            for key in output_imgs:
                if output_imgs[key]["img"] is not None:
                    plot_image(output_imgs[key]["img"])

    return analysis_images
def naive_bayes_classifier(img, pdf_file, device, debug=None):
    """Use the Naive Bayes classifier to output a plant binary mask.

    Inputs:
    img      = image object (NumPy ndarray), BGR colorspace
    pdf_file = filename of file containing PDFs output from the Naive Bayes training method (see plantcv-train.py)
    device   = device number. Used to count steps in the pipeline
    debug    = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device   = device number
    mask     = binary mask (ndarray)

    :param img: ndarray
    :param pdf_file: str
    :param device: int
    :param debug: str
    :return device: int
    :return masks: dict
    """
    device += 1

    # Initialize PDF dictionary
    pdfs = {}
    # Read the PDF file
    pf = open(pdf_file, "r")
    # Read the first line (header)
    pf.readline()
    # Read each line of the file and parse the PDFs, store in the PDF dictionary
    for row in pf:
        # Remove newline character
        row = row.rstrip("\n")
        # Split the row into columns on tab characters
        cols = row.split("\t")
        # Make sure there are the correct number of columns (i.e. is this a valid PDF file?)
        if len(cols) != 258:
            fatal_error(
                "Naive Bayes PDF file is not formatted correctly. Error on line:\n"
                + row)
        # Store the PDFs. Column 0 is the class, Column 1 is the color channel, the rest are p at
        # intensity values 0-255. Cast text p values as float
        class_name = cols[0]
        channel = cols[1]
        if class_name not in pdfs:
            pdfs[class_name] = {}
        pdfs[class_name][channel] = [float(i) for i in cols[2:]]

    # Split the input BGR image into component channels for BGR, HSV, and LAB colorspaces
    # b, g, r = cv2.split(img)
    h, s, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    # l, gm, by = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2LAB))

    # Calculate the dimensions of the input image
    width, height, depth = np.shape(img)

    # Initialize an empty ndarray for plant and background. These will be used to store the joint probabilities
    px_p = {}
    for class_name in pdfs.keys():
        px_p[class_name] = np.zeros([width, height])

    # Loop over the image coordinates (each i, j pixel)
    for i in range(0, width):
        for j in range(0, height):
            for class_name in pdfs.keys():
                # Calculate the joint probability that this is in the class
                px_p[class_name][i][j] = pdfs[class_name]["hue"][h[i][j]] * pdfs[class_name]["saturation"][s[i][j]] * \
                                         pdfs[class_name]["value"][v[i][j]]

    # Initialize empty masks
    masks = {}
    for class_name in pdfs.keys():
        masks[class_name] = np.zeros([width, height], dtype=np.uint8)
    # Set pixel intensities to 255 (white) for the mask where the class has the highest probability
    for class_name in masks:
        background_classes = []
        for name in masks:
            if class_name is not name:
                background_classes.append(px_p[name])
        background_class = np.maximum.reduce(background_classes)
        masks[class_name][np.where(px_p[class_name] > background_class)] = 255
    # mask[np.where(plant > bg)] = 255

    # Print or plot the mask if debug is not None
    if debug == "print":
        for class_name, mask in masks.items():
            print_image(
                mask,
                (str(device) + "_naive_bayes_" + class_name + "_mask.jpg"))
    elif debug == "plot":
        for class_name, mask in masks.items():
            plot_image(mask, cmap="gray")

    return device, masks
Example #53
0
def analyze_index(index_array,
                  mask,
                  histplot=False,
                  bins=100,
                  min_bin=0,
                  max_bin=1):
    """This extracts the hyperspectral index statistics and writes the values  as observations out to
       the Outputs class.

    Inputs:
    index_array  = Instance of the Spectral_data class, usually the output from pcv.hyperspectral.extract_index
    mask         = Binary mask made from selected contours
    histplot     = if True plots histogram of intensity values
    bins         = optional, number of classes to divide spectrum into
    min_bin      = optional, minimum bin value ("auto" or user input minimum value)
    max_bin      = optional, maximum bin value ("auto" or user input maximum value)


    :param array: __main__.Spectral_data
    :param mask: numpy array
    :param histplot: bool
    :param bins: int
    :param max_bin: float, str
    :param min_bin: float, str
    :return analysis_image: ggplot, None
    """
    params.device += 1

    debug = params.debug
    params.debug = None
    analysis_image = None

    if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
        fatal_error("Mask should be a binary image of 0 and nonzero values.")

    if len(np.shape(index_array.array_data)) > 2:
        fatal_error("index_array data should be a grayscale image.")

    # Mask data and collect statistics about pixels within the masked image
    masked_array = index_array.array_data[np.where(mask > 0)]
    index_mean = np.average(masked_array)
    index_median = np.median(masked_array)
    index_std = np.std(masked_array)

    # Set starting point and max bin values
    maxval = max_bin
    b = min_bin

    # Calculate observed min and max pixel values of the masked array
    observed_max = np.nanmax(masked_array)
    observed_min = np.nanmin(masked_array)

    # Auto calculate max_bin if set
    if type(max_bin) is str and (max_bin.upper() == "AUTO"):
        maxval = float(
            round(observed_max, 8)
        )  # Auto bins will detect maxval to use for calculating labels/bins
    if type(min_bin) is str and (min_bin.upper() == "AUTO"):
        b = float(round(observed_min,
                        8))  # If bin_min is auto then overwrite starting value

    # Print a warning if observed min/max outside user defined range
    if observed_max > maxval or observed_min < b:
        print(
            "WARNING!!! The observed range of pixel values in your masked index provided is ["
            + str(observed_min) + ", " + str(observed_max) +
            "] but the user defined range of bins for pixel frequencies is [" +
            str(b) + ", " + str(maxval) +
            "]. Adjust min_bin and max_bin in order to avoid cutting off data being collected."
        )

    # Calculate histogram
    hist_val = [
        float(l[0]) for l in cv2.calcHist([masked_array.astype(np.float32)],
                                          [0], None, [bins], [b, maxval])
    ]
    bin_width = (maxval - b) / float(bins)
    bin_labels = [float(b)]
    plotting_labels = [float(b)]
    for i in range(bins - 1):
        b += bin_width
        bin_labels.append(b)
        plotting_labels.append(round(b, 2))

    # Make hist percentage for plotting
    pixels = cv2.countNonZero(mask)
    hist_percent = [(p / float(pixels)) * 100 for p in hist_val]

    params.debug = debug

    if histplot is True:
        dataset = pd.DataFrame({
            'Index Reflectance': bin_labels,
            'Proportion of pixels (%)': hist_percent
        })
        fig_hist = (
            ggplot(data=dataset,
                   mapping=aes(x='Index Reflectance',
                               y='Proportion of pixels (%)')) +
            geom_line(color='red') +
            scale_x_continuous(breaks=bin_labels, labels=plotting_labels))
        analysis_image = fig_hist
        if params.debug == 'print':
            fig_hist.save(
                os.path.join(
                    params.debug_outdir,
                    str(params.device) + index_array.array_type + "hist.png"))
        elif params.debug == 'plot':
            print(fig_hist)

    outputs.add_observation(
        variable='mean_' + index_array.array_type,
        trait='Average ' + index_array.array_type + ' reflectance',
        method='plantcv.plantcv.hyperspectral.analyze_index',
        scale='reflectance',
        datatype=float,
        value=float(index_mean),
        label='none')

    outputs.add_observation(
        variable='med_' + index_array.array_type,
        trait='Median ' + index_array.array_type + ' reflectance',
        method='plantcv.plantcv.hyperspectral.analyze_index',
        scale='reflectance',
        datatype=float,
        value=float(index_median),
        label='none')

    outputs.add_observation(
        variable='std_' + index_array.array_type,
        trait='Standard deviation ' + index_array.array_type + ' reflectance',
        method='plantcv.plantcv.hyperspectral.analyze_index',
        scale='reflectance',
        datatype=float,
        value=float(index_std),
        label='none')

    outputs.add_observation(variable='index_frequencies_' +
                            index_array.array_type,
                            trait='index frequencies',
                            method='plantcv.plantcv.analyze_index',
                            scale='frequency',
                            datatype=list,
                            value=hist_percent,
                            label=bin_labels)

    if params.debug == "plot":
        plot_image(masked_array)
    elif params.debug == "print":
        print_image(img=masked_array,
                    filename=os.path.join(
                        params.debug_outdir,
                        str(params.device) + index_array.array_type + ".png"))
    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
def report_size_marker_area(img,
                            roi_contour,
                            roi_hierarchy,
                            marker='define',
                            objcolor='dark',
                            thresh_channel=None,
                            thresh=None):
    """Detects a size marker in a specified region and reports its size and eccentricity

    Inputs:
    img             = An RGB or grayscale image to plot the marker object on
    roi_contour     = A region of interest contour (e.g. output from pcv.roi.rectangle or other methods)
    roi_hierarchy   = A region of interest contour hierarchy (e.g. output from pcv.roi.rectangle or other methods)
    marker          = 'define' or 'detect'. If define it means you set an area, if detect it means you want to
                      detect within an area
    objcolor        = Object color is 'dark' or 'light' (is the marker darker or lighter than the background)
    thresh_channel  = 'h', 's', or 'v' for hue, saturation or value
    thresh          = Binary threshold value (integer)

    Returns:
    analysis_images = List of output images

    :param img: numpy.ndarray
    :param roi_contour: list
    :param roi_hierarchy: numpy.ndarray
    :param marker: str
    :param objcolor: str
    :param thresh_channel: str
    :param thresh: int
    :return: analysis_images: list
    """
    # Store debug
    debug = params.debug
    params.debug = None

    params.device += 1
    # Make a copy of the reference image
    ref_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ref_img)) == 2:
        ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)

    # Marker components
    # If the marker type is "defined" then the marker_mask and marker_contours are equal to the input ROI
    # Initialize a binary image
    roi_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8)
    # Draw the filled ROI on the mask
    cv2.drawContours(roi_mask, roi_contour, -1, (255), -1)
    marker_mask = []
    marker_contour = []

    # If the marker type is "detect" then we will use the ROI to isolate marker contours from the input image
    if marker.upper() == 'DETECT':
        # We need to convert the input image into an one of the HSV channels and then threshold it
        if thresh_channel is not None and thresh is not None:
            # Mask the input image
            masked = apply_mask(rgb_img=ref_img,
                                mask=roi_mask,
                                mask_color="black")
            # Convert the masked image to hue, saturation, or value
            marker_hsv = rgb2gray_hsv(rgb_img=masked, channel=thresh_channel)
            # Threshold the HSV image
            marker_bin = binary_threshold(gray_img=marker_hsv,
                                          threshold=thresh,
                                          max_value=255,
                                          object_type=objcolor)
            # Identify contours in the masked image
            contours, hierarchy = find_objects(img=ref_img, mask=marker_bin)
            # Filter marker contours using the input ROI
            kept_contours, kept_hierarchy, kept_mask, obj_area = roi_objects(
                img=ref_img,
                object_contour=contours,
                obj_hierarchy=hierarchy,
                roi_contour=roi_contour,
                roi_hierarchy=roi_hierarchy,
                roi_type="partial")
            # If there are more than one contour detected, combine them into one
            # These become the marker contour and mask
            marker_contour, marker_mask = object_composition(
                img=ref_img, contours=kept_contours, hierarchy=kept_hierarchy)
        else:
            fatal_error(
                'thresh_channel and thresh must be defined in detect mode')
    elif marker.upper() == "DEFINE":
        # Identify contours in the masked image
        contours, hierarchy = find_objects(img=ref_img, mask=roi_mask)
        # If there are more than one contour detected, combine them into one
        # These become the marker contour and mask
        marker_contour, marker_mask = object_composition(img=ref_img,
                                                         contours=contours,
                                                         hierarchy=hierarchy)
    else:
        fatal_error(
            "marker must be either 'define' or 'detect' but {0} was provided.".
            format(marker))

    # Calculate the moments of the defined marker region
    m = cv2.moments(marker_mask, binaryImage=True)
    # Calculate the marker area
    marker_area = m['m00']

    # Fit a bounding ellipse to the marker
    center, axes, angle = cv2.fitEllipse(marker_contour)
    major_axis = np.argmax(axes)
    minor_axis = 1 - major_axis
    major_axis_length = axes[major_axis]
    minor_axis_length = axes[minor_axis]
    # Calculate the bounding ellipse eccentricity
    eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis])**2)

    cv2.drawContours(ref_img, marker_contour, -1, (255, 0, 0), 5)
    analysis_image = ref_img

    # Reset debug mode
    params.debug = debug

    if params.debug is 'print':
        print_image(
            ref_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_marker_shape.png'))
    elif params.debug is 'plot':
        plot_image(ref_img)

    outputs.add_observation(variable='marker_area',
                            trait='marker area',
                            method='plantcv.plantcv.report_size_marker_area',
                            scale='pixels',
                            datatype=int,
                            value=marker_area,
                            label='pixels')
    outputs.add_observation(variable='marker_ellipse_major_axis',
                            trait='marker ellipse major axis length',
                            method='plantcv.plantcv.report_size_marker_area',
                            scale='pixels',
                            datatype=int,
                            value=major_axis_length,
                            label='pixels')
    outputs.add_observation(variable='marker_ellipse_minor_axis',
                            trait='marker ellipse minor axis length',
                            method='plantcv.plantcv.report_size_marker_area',
                            scale='pixels',
                            datatype=int,
                            value=minor_axis_length,
                            label='pixels')
    outputs.add_observation(variable='marker_ellipse_eccentricity',
                            trait='marker ellipse eccentricity',
                            method='plantcv.plantcv.report_size_marker_area',
                            scale='none',
                            datatype=float,
                            value=eccentricity,
                            label='none')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
Example #55
0
def custom_range(rgb_img, lower_thresh, upper_thresh, channel='gray'):
    """Creates a thresholded image and mask from an RGB image and threshold values.

    Inputs:
    rgb_img      = RGB image data
    lower_thresh = List of lower threshold values (0-255)
    upper_thresh = List of upper threshold values (0-255)
    channel      = Color-space channels of interest (RGB, HSV, LAB, or gray)

    Returns:
    mask         = Mask, binary image
    masked_img   = Masked image, keeping the part of image of interest

    :param rgb_img: numpy.ndarray
    :param lower_thresh: list
    :param upper_thresh: list
    :param channel: str
    :return mask: numpy.ndarray
    :return masked_img: numpy.ndarray
    """
    if channel.upper() == 'HSV':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error(
                "If using the HSV colorspace, 3 thresholds are needed for both lower_thresh and "
                +
                "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and "
                + "upper_thresh=255")

        # Convert the RGB image to HSV colorspace
        hsv_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)

        # Separate channels
        hue = hsv_img[:, :, 0]
        saturation = hsv_img[:, :, 1]
        value = hsv_img[:, :, 2]

        # Make a mask for each channel
        h_mask = cv2.inRange(hue, lower_thresh[0], upper_thresh[0])
        s_mask = cv2.inRange(saturation, lower_thresh[1], upper_thresh[1])
        v_mask = cv2.inRange(value, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=h_mask)
        result = cv2.bitwise_and(result, result, mask=s_mask)
        masked_img = cv2.bitwise_and(result, result, mask=v_mask)

        # Combine masks
        mask = cv2.bitwise_and(s_mask, h_mask)
        mask = cv2.bitwise_and(mask, v_mask)

    elif channel.upper() == 'RGB':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error(
                "If using the RGB colorspace, 3 thresholds are needed for both lower_thresh and "
                +
                "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and "
                + "upper_thresh=255")

        # Separate channels (pcv.readimage reads RGB images in as BGR)
        blue = rgb_img[:, :, 0]
        green = rgb_img[:, :, 1]
        red = rgb_img[:, :, 2]

        # Make a mask for each channel
        b_mask = cv2.inRange(blue, lower_thresh[0], upper_thresh[0])
        g_mask = cv2.inRange(green, lower_thresh[1], upper_thresh[1])
        r_mask = cv2.inRange(red, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=b_mask)
        result = cv2.bitwise_and(result, result, mask=g_mask)
        masked_img = cv2.bitwise_and(result, result, mask=r_mask)

        # Combine masks
        mask = cv2.bitwise_and(b_mask, g_mask)
        mask = cv2.bitwise_and(mask, r_mask)

    elif channel.upper() == 'LAB':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error(
                "If using the LAB colorspace, 3 thresholds are needed for both lower_thresh and "
                +
                "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and "
                + "upper_thresh=255")

        # Convert the RGB image to LAB colorspace
        lab_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)

        # Separate channels (pcv.readimage reads RGB images in as BGR)
        lightness = lab_img[:, :, 0]
        green_magenta = lab_img[:, :, 1]
        blue_yellow = lab_img[:, :, 2]

        # Make a mask for each channel
        l_mask = cv2.inRange(lightness, lower_thresh[0], upper_thresh[0])
        gm_mask = cv2.inRange(green_magenta, lower_thresh[1], upper_thresh[1])
        by_mask = cv2.inRange(blue_yellow, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=l_mask)
        result = cv2.bitwise_and(result, result, mask=gm_mask)
        masked_img = cv2.bitwise_and(result, result, mask=by_mask)

        # Combine masks
        mask = cv2.bitwise_and(l_mask, gm_mask)
        mask = cv2.bitwise_and(mask, by_mask)

    elif channel.upper() == 'GRAY' or channel.upper() == 'GREY':

        # Check threshold input
        if not (len(lower_thresh) == 1 and len(upper_thresh) == 1):
            fatal_error(
                "If useing a grayscale colorspace, 1 threshold is needed for both the "
                + "lower_thresh and upper_thresh.")
        if len(np.shape(rgb_img)) == 3:
            # Convert RGB image to grayscale colorspace
            gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = rgb_img

        # Make a mask
        mask = cv2.inRange(gray_img, lower_thresh[0], upper_thresh[0])

        # Apply the masks to the image
        masked_img = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)

    else:
        fatal_error(
            str(channel) +
            " is not a valid colorspace. Channel must be either 'RGB', 'HSV', or 'gray'."
        )

    # Auto-increment the device counter
    params.device += 1

    # Print or plot the binary image if debug is on
    if params.debug == 'print':
        print_image(
            masked_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + channel + 'custom_thresh.png'))
        print_image(
            mask,
            os.path.join(
                params.debug_outdir,
                str(params.device) + channel + 'custom_thresh_mask.png'))
    elif params.debug == 'plot':
        plot_image(masked_img)
        plot_image(mask)

    return mask, masked_img
Example #56
0
def segment_euclidean_length(segmented_img, objects, label="default"):
    """ Use segmented skeleton image to gather euclidean length measurements per segment

        Inputs:
        segmented_img = Segmented image to plot lengths on
        objects       = List of contours
        label         = optional label parameter, modifies the variable name of observations recorded

        Returns:
        labeled_img      = Segmented debugging image with lengths labeled

        :param segmented_img: numpy.ndarray
        :param objects: list
        :param label: str
        :return labeled_img: numpy.ndarray

        """
    x_list = []
    y_list = []
    segment_lengths = []
    # Create a color scale, use a previously stored scale if available
    rand_color = color_palette(num=len(objects), saved=True)

    labeled_img = segmented_img.copy()
    # Store debug
    debug = params.debug
    params.debug = None

    for i, cnt in enumerate(objects):
        # Store coordinates for labels
        x_list.append(objects[i][0][0][0])
        y_list.append(objects[i][0][0][1])

        # Draw segments one by one to group segment tips together
        finding_tips_img = np.zeros(segmented_img.shape[:2], np.uint8)
        cv2.drawContours(finding_tips_img,
                         objects,
                         i, (255, 255, 255),
                         1,
                         lineType=8)
        segment_tips = find_tips(finding_tips_img)
        tip_objects, tip_hierarchies = find_objects(segment_tips, segment_tips)
        points = []
        if not len(tip_objects) == 2:
            fatal_error("Too many tips found per segment, try pruning again")
        for t in tip_objects:
            # Gather pairs of coordinates
            x, y = t.ravel()
            coord = (x, y)
            points.append(coord)

        # Draw euclidean distance lines
        cv2.line(labeled_img, points[0], points[1], rand_color[i], 1)

        # Calculate euclidean distance between tips of each contour
        segment_lengths.append(float(euclidean(points[0], points[1])))

    segment_ids = []
    # Reset debug mode
    params.debug = debug

    # Put labels of length
    for c, value in enumerate(segment_lengths):
        text = "{:.2f}".format(value)
        w = x_list[c]
        h = y_list[c]
        cv2.putText(img=labeled_img,
                    text=text,
                    org=(w, h),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size,
                    color=(150, 150, 150),
                    thickness=params.text_thickness)
        # segment_label = "ID" + str(c)
        segment_ids.append(c)

    outputs.add_observation(
        sample=label,
        variable='segment_eu_length',
        trait='segment euclidean length',
        method='plantcv.plantcv.morphology.segment_euclidean_length',
        scale='pixels',
        datatype=list,
        value=segment_lengths,
        label=segment_ids)

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            labeled_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_segment_eu_lengths.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
Example #57
0
def apply_transformation_matrix(source_img, target_img, transformation_matrix):
    """ Apply the transformation matrix to the source_image.

    Inputs:
    source_img      = an RGB image to be corrected to the target color space
    target_img      = an RGB image with the target color space
    transformation_matrix        = a 9x9 matrix of tranformation coefficients

    Outputs:
    corrected_img    = an RGB image in correct color space

    :param source_img: numpy.ndarray
    :param target_img: numpy.ndarray
    :param transformation_matrix: numpy.ndarray
    :return corrected_img: numpy.ndarray
    """
    # check transformation_matrix for 9x9
    if np.shape(transformation_matrix) != (9, 9):
        fatal_error(
            "transformation_matrix must be a 9x9 matrix of transformation coefficients."
        )
    # Check for RGB input
    if len(np.shape(source_img)) != 3:
        fatal_error("Source_img is not an RGB image.")

    # Autoincrement the device counter
    params.device += 1

    # split transformation_matrix
    red, green, blue, red2, green2, blue2, red3, green3, blue3 = np.split(
        transformation_matrix, 9, 1)

    # find linear, square, and cubic values of source_img color channels
    source_b, source_g, source_r = cv2.split(source_img)
    source_b2 = np.square(source_b)
    source_b3 = np.power(source_b, 3)
    source_g2 = np.square(source_g)
    source_g3 = np.power(source_g, 3)
    source_r2 = np.square(source_r)
    source_r3 = np.power(source_r, 3)

    # apply linear model to source color channels
    b = 0 + source_r * blue[0] + source_g * blue[1] + source_b * blue[
        2] + source_r2 * blue[3] + source_g2 * blue[4] + source_b2 * blue[
            5] + source_r3 * blue[6] + source_g3 * blue[7] + source_b3 * blue[8]
    g = 0 + source_r * green[0] + source_g * green[1] + source_b * green[
        2] + source_r2 * green[3] + source_g2 * green[4] + source_b2 * green[
            5] + source_r3 * green[6] + source_g3 * green[
                7] + source_b3 * green[8]
    r = 0 + source_r * red[0] + source_g * red[1] + source_b * red[
        2] + source_r2 * red[3] + source_g2 * red[4] + source_b2 * red[
            5] + source_r3 * red[6] + source_g3 * red[7] + source_b3 * red[8]

    # merge corrected color channels onto source_image
    bgr = [b, g, r]
    corrected_img = cv2.merge(bgr)

    # round corrected_img elements to be within range and of the correct data type
    corrected_img = np.rint(corrected_img)
    corrected_img[np.where(corrected_img > 255)] = 255
    corrected_img = corrected_img.astype(np.uint8)

    if params.debug == "print":
        # If debug is print, save the image to a file
        print_image(
            corrected_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_corrected.png"))
    elif params.debug == "plot":
        # If debug is plot, print a horizontal view of source_img, corrected_img, and target_img to the plotting device
        # plot horizontal comparison of source_img, corrected_img (with rounded elements) and target_img
        plot_image(np.hstack([source_img, corrected_img, target_img]))

    # return corrected_img
    return corrected_img
Example #58
0
def segment_sort(skel_img, objects, mask=None, first_stem=True):
    """ Calculate segment curvature as defined by the ratio between geodesic and euclidean distance

        Inputs:
        skel_img          = Skeletonized image
        objects           = List of contours
        mask              = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.
        first_stem        = (Optional) if True, then the first (bottom) segment always gets classified as stem

        Returns:
        labeled_img       = Segmented debugging image with lengths labeled
        secondary_objects = List of secondary segments (leaf)
        primary_objects   = List of primary objects (stem)

        :param skel_img: numpy.ndarray
        :param objects: list
        :param mask: numpy.ndarray
        :param first_stem: bool
        :return secondary_objects: list
        :return other_objects: list
        """
    # Store debug
    debug = params.debug
    params.debug = None

    secondary_objects = []
    primary_objects = []

    if mask is None:
        labeled_img = np.zeros(skel_img.shape[:2], np.uint8)
    else:
        labeled_img = mask.copy()

    tips_img = find_tips(skel_img)
    tips_img = dilate(tips_img, 3, 1)

    # Loop through segment contours
    for i, cnt in enumerate(objects):
        segment_plot = np.zeros(skel_img.shape[:2], np.uint8)
        cv2.drawContours(segment_plot, objects, i, 255, 1, lineType=8)
        is_leaf = False
        overlap_img = logical_and(segment_plot, tips_img)

        # The first contour is the base, and while it contains a tip, it isn't a leaf
        if i == 0 and first_stem:
            primary_objects.append(cnt)

        # Sort segments
        else:

            if np.sum(overlap_img) > 0:
                secondary_objects.append(cnt)
                is_leaf = True
            else:
                primary_objects.append(cnt)

    # Plot segments where green segments are leaf objects and fuschia are other objects
    labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_GRAY2RGB)
    for i, cnt in enumerate(primary_objects):
        cv2.drawContours(labeled_img, primary_objects, i, (255, 0, 255), params.line_thickness, lineType=8)
    for i, cnt in enumerate(secondary_objects):
        cv2.drawContours(labeled_img, secondary_objects, i, (0, 255, 0), params.line_thickness, lineType=8)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_sorted_segments.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return secondary_objects, primary_objects
Example #59
0
def create_color_card_mask(rgb_img,
                           radius,
                           start_coord,
                           spacing,
                           nrows,
                           ncols,
                           exclude=[]):
    """Create a labeled mask for color card chips
    Inputs:
    rgb_img        = Input RGB image data containing a color card.
    radius         = Radius of color masks.
    start_coord    = Two-element tuple of the first chip mask starting x and y coordinate.
    spacing        = Two-element tuple of the horizontal and vertical spacing between chip masks.
    nrows          = Number of chip rows.
    ncols          = Number of chip columns.
    exclude        = Optional list of chips to exclude.

    Returns:
    mask           = Labeled mask of chips

    :param rgb_img: numpy.ndarray
    :param radius: int
    :param start_coord: tuple
    :param spacing: tuple
    :param nrows: int
    :param ncols: int
    :param exclude: list
    :return mask: numpy.ndarray
    """

    # Autoincrement the device counter
    params.device += 1
    # Initialize chip list
    chips = []

    # Loop over each color card row
    for i in range(0, nrows):
        # The upper left corner is the y starting coordinate + the chip offset * the vertical spacing between chips
        y = start_coord[1] + i * spacing[1]
        # Loop over each column
        for j in range(0, ncols):
            # The upper left corner is the x starting coordinate + the chip offset * the
            # horizontal spacing between chips
            x = start_coord[0] + j * spacing[0]
            # Create a chip ROI
            chips.append(circle(img=rgb_img, x=x, y=y, r=radius))
    # Sort excluded chips from largest to smallest
    exclude.sort(reverse=True)
    # Remove any excluded chips
    for chip in exclude:
        del chips[chip]
    # Create mask
    mask = np.zeros(shape=np.shape(rgb_img)[:2], dtype=np.uint8())
    # Mask label index
    i = 1
    # Draw labeled chip boxes on the mask
    for chip in chips:
        mask = cv2.drawContours(mask, chip[0], -1, (i * 10), -1)
        i += 1
    if params.debug is not None:
        # Create a copy of the input image for plotting
        canvas = np.copy(rgb_img)
        # Draw chip ROIs on the canvas image
        for chip in chips:
            cv2.drawContours(canvas, chip[0], -1, (255, 255, 0),
                             params.line_thickness)
        if params.debug == "print":
            print_image(img=canvas,
                        filename=os.path.join(
                            params.debug_outdir,
                            str(params.device) + "_color_card_mask_rois.png"))
            print_image(img=mask,
                        filename=os.path.join(
                            params.debug_outdir,
                            str(params.device) + "_color_card_mask.png"))
        elif params.debug == "plot":
            plot_image(canvas)
    return mask
Example #60
0
def acute_vertex(obj, win, thresh, sep, img):
    """acute_vertex: identify corners/acute angles of an object

    For each point in contour, get a point before (pre) and after (post) the point of interest,
    calculate the angle between the pre and post point.

    Inputs:
    obj    = a contour of the plant object (this should be output from the object_composition.py fxn)
    win    = win argument specifies the pre and post point distances (a value of 30 worked well for a sample image)
    thresh = an threshold to set for acuteness; keep points with an angle more acute than the threshold (a value of 15
             worked well for sample image)
    sep    = the number of contour points to search within for the most acute value
    img    = the original image

    :param obj: ndarray
    :param win: int
    :param thresh: int
    :param sep: int
    :param img: ndarray
    :return acute: ndarray
    """
    params.device += 1
    chain = []
    if not np.any(obj):
        acute = ('NA', 'NA')
        return acute
    for i in range(len(obj) - win):
        x, y = obj[i].ravel()
        pre_x, pre_y = obj[i - win].ravel()
        post_x, post_y = obj[i + win].ravel()
        # print "The iterator i is currently " + str(i)
        # print "Here are the values: " + str(x) + " " + str(y)
        # print "Here are the pre values: " + str(pre_x) + " " + str(pre_y)
        # print "Here are the post values: " + str(post_x) + " " + str(post_y)
        # Angle in radians derived from Law of Cosines, converted to degrees
        P12 = np.sqrt((x - pre_x) * (x - pre_x) + (y - pre_y) * (y - pre_y))
        P13 = np.sqrt((x - post_x) * (x - post_x) + (y - post_y) *
                      (y - post_y))
        P23 = np.sqrt((pre_x - post_x) * (pre_x - post_x) + (pre_y - post_y) *
                      (pre_y - post_y))
        if (2 * P12 * P13) > 0.001:
            dot = (P12 * P12 + P13 * P13 - P23 * P23) / (2 * P12 * P13)
        elif (2 * P12 * P13) < 0.001:
            dot = (P12 * P12 + P13 * P13 - P23 * P23) / 0.001
        if dot > 1:  # If float excedes 1 prevent arcos error and force to equal 1
            dot = 1
        elif dot < -1:  # If float excedes -1 prevent arcos error and force to equal -1
            dot = -1
        ang = math.degrees(math.acos(dot))
        # print "Here is the angle: " + str(ang)
        chain.append(ang)

    # Select points in contour that have an angle more acute than thresh
    index = []
    for c in range(len(chain)):
        if float(chain[c]) <= thresh:
            index.append(c)
    # There oftentimes several points around tips with acute angles
    # Here we try to pick the most acute angle given a set of contiguous point
    # Sep is the number of points to evaluate the number of verticies
    out = []
    tester = []
    for i in range(len(index) - 1):
        # print str(index[i])
        if index[i + 1] - index[i] < sep:
            tester.append(index[i])
        if index[i + 1] - index[i] >= sep:
            tester.append(index[i])
            # print(tester)
            angles = ([chain[d] for d in tester])
            keeper = angles.index(min(angles))
            t = tester[keeper]
            # print str(t)
            out.append(t)
            tester = []

    # Store the points in the variable acute
    flag = 0
    acute = obj[[out]]
    # If no points found as acute get the largest point
    # if len(acute) == 0:
    # acute = max(obj, key=cv2.contourArea)
    # flag = 1
    # img2 = np.copy(img)
    # cv2.circle(img2,(int(cmx),int(cmy)),30,(0,215,255),-1)
    # cv2.circle(img2,(int(cmx),int(bly)),30,(255,0,0),-1)
    # Plot each of these tip points on the image
    # for i in acute:
    #        x,y = i.ravel()
    #        cv2.circle(img2,(x,y),15,(153,0,153),-1)
    # cv2.imwrite('tip_points_centroid_and_base.png', img2)
    if params.debug == 'print':
        # Lets make a plot of these values on the
        img2 = np.copy(img)
        # Plot each of these tip points on the image
        for i in acute:
            x, y = i.ravel()
            cv2.circle(img2, (x, y), 15, (255, 204, 255), -1)
        print_image(
            img2,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_acute_vertices.png'))
    elif params.debug == 'plot':
        # Lets make a plot of these values on the
        img2 = np.copy(img)
        # Plot each of these tip points on the image
        for i in acute:
            x, y = i.ravel()
            # cv2.circle(img2,(x,y),15,(255,204,255),-1)
            cv2.circle(img2, (x, y), 15, (0, 0, 255), -1)
        plot_image(img2)
    # If flag was true (no points found as acute) reformat output appropriate type
    # if flag == 1:
    #     acute = np.asarray(acute)
    #     acute = acute.reshape(1, 1, 2)

    # Store into global measurements
    if not 'landmark_reference' in outputs.measurements:
        outputs.measurements['landmark_reference'] = {}
    outputs.measurements['landmark_reference']['tip_points'] = acute

    return acute