Example #1
0
def flip(img, direction):
    """Flip image.

    Inputs:
    img       = RGB or grayscale image data
    direction = "horizontal" or "vertical"

    Returns:
    vh_img    = flipped image

    :param img: numpy.ndarray
    :param direction: str
    :return vh_img: numpy.ndarray
    """
    params.device += 1
    if direction.upper() == "VERTICAL":
        vh_img = cv2.flip(img, 1)
    elif direction.upper() == "HORIZONTAL":
        vh_img = cv2.flip(img, 0)
    else:
        fatal_error(str(direction) + " is not a valid direction, must be horizontal or vertical")

    if params.debug == 'print':
        print_image(vh_img, os.path.join(params.debug_outdir, str(params.device) + "_flipped.png"))
    elif params.debug == 'plot':
        if len(np.shape(vh_img)) == 3:
            plot_image(vh_img)
        else:
            plot_image(vh_img, cmap='gray')

    return vh_img
Example #2
0
def image_subtract(gray_img1, gray_img2):
    """This is a function used to subtract values of one gray-scale image array from another gray-scale image array. The
    resulting gray-scale image array has a minimum element value of zero. That is all negative values resulting from the
    subtraction are forced to zero.

    Inputs:
    gray_img1   = Grayscale image data from which gray_img2 will be subtracted
    gray_img2   = Grayscale image data which will be subtracted from gray_img1

    Returns:
    new_img = subtracted image

    :param gray_img1: numpy.ndarray
    :param gray_img2: numpy.ndarray
    :return new_img: numpy.ndarray
    """

    params.device += 1  # increment device

    # check inputs for gray-scale
    if len(np.shape(gray_img1)) != 2 or len(np.shape(gray_img2)) != 2:
        fatal_error("Input image is not gray-scale")

    new_img = gray_img1.astype(np.float64) - gray_img2.astype(np.float64)  # subtract values
    new_img[np.where(new_img < 0)] = 0  # force negative array values to zero
    new_img = new_img.astype(np.uint8)  # typecast image to 8-bit image
    # print-plot handling
    if params.debug == 'print':
        print_image(new_img, os.path.join(params.debug_outdir, str(params.device) + "_subtraction.png"))
    elif params.debug == 'plot':
        plot_image(new_img, cmap='gray')
    return new_img  # return
Example #3
0
def from_binary_image(img, bin_img):
    """Create an ROI from a binary image

    Inputs:
    img           = An RGB or grayscale image to plot the ROI on.
    bin_img       = Binary image to extract an ROI contour from.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param img: numpy.ndarray
    :param bin_img: numpy.ndarray
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1
    # Make sure the input bin_img is binary
    if len(np.unique(bin_img)) != 2:
        fatal_error("Input image is not binary!")
    # Use the binary image to create an ROI contour
    roi_contour, roi_hierarchy = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    return roi_contour, roi_hierarchy
Example #4
0
def opening(gray_img, kernel=None):
    """Wrapper for scikit-image opening functions. Opening can remove small bright spots (i.e. salt).

    Inputs:
    gray_img = input image (grayscale or binary)
    kernel   = optional neighborhood, expressed as an array of 1s and 0s. If None, use cross-shaped structuring element.

    :param gray_img: ndarray
    :param kernel = ndarray
    :return filtered_img: ndarray
    """

    params.device += 1

    # Make sure the image is binary/grayscale
    if len(np.shape(gray_img)) != 2:
        fatal_error("Input image must be grayscale or binary")

    # If image is binary use the faster method
    if len(np.unique(gray_img)) == 2:
        bool_img = morphology.binary_opening(gray_img, kernel)
        filtered_img = np.copy(bool_img.astype(np.uint8) * 255)
    # Otherwise use method appropriate for grayscale images
    else:
        filtered_img = morphology.opening(gray_img, kernel)

    if params.debug == 'print':
        print_image(filtered_img, os.path.join(params.debug_outdir, str(params.device) + '_opening' + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
Example #5
0
def resize(img, resize_x, resize_y):
    """Resize image.

    Inputs:
    img      = RGB or grayscale image data to resize
    resize_x = scaling factor
    resize_y = scaling factor

    Returns:
    reimg    = resized image

    :param img: numpy.ndarray
    :param resize_x: int
    :param resize_y: int
    :return reimg: numpy.ndarray
    """

    params.device += 1

    if resize_x <= 0 and resize_y <= 0:
        fatal_error("Resize values both cannot be 0 or negative values!")

    reimg = cv2.resize(img, (0, 0), fx=resize_x, fy=resize_y)

    if params.debug == 'print':
        print_image(reimg, os.path.join(params.debug_outdir, str(params.device) + "_resize1.png"))
    elif params.debug == 'plot':
        plot_image(reimg)

    return reimg
def mean(gray_img, max_value, object_type="light"):
    """Creates a binary image from a grayscale image based on the mean adaptive threshold method.

    Inputs:
    gray_img     = Grayscale image data
    max_value    = value to apply above threshold (usually 255 = white)
    object_type  = "light" or "dark" (default: "light")
                   - If object is lighter than the background then standard thresholding is done
                   - If object is darker than the background then inverse thresholding is done

    Returns:
    bin_img      = Thresholded, binary image

    :param gray_img: numpy.ndarray
    :param max_value: int
    :param object_type: str
    :return bin_img: numpy.ndarray
    """
    params.device += 1

    # Set the threshold method
    threshold_method = ""
    if object_type.upper() == "LIGHT":
        threshold_method = cv2.THRESH_BINARY
    elif object_type.upper() == "DARK":
        threshold_method = cv2.THRESH_BINARY_INV
    else:
        fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')

    bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_MEAN_C, threshold_method,
                                       "_mean_threshold_")

    return bin_img
Example #7
0
def plot_image(img, cmap=None):
    """Plot an image to the screen.

    :param img: numpy.ndarray
    :param cmap: str
    :return:
    """

    image_type = type(img)

    dimensions = numpy.shape(img)

    if image_type == numpy.ndarray:
        matplotlib.rcParams['figure.dpi'] = params.dpi
        # If the image is color then OpenCV stores it as BGR, we plot it as RGB
        if len(dimensions) == 3:
            plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            plt.show()

        elif cmap is None and len(dimensions) == 2:
            plt.imshow(img, cmap="gray")
            plt.show()

        elif cmap is not None and len(dimensions) == 2:
            plt.imshow(img, cmap=cmap)
            plt.show()

    elif image_type == matplotlib.figure.Figure:
        fatal_error("Error, matplotlib Figure not supported. Instead try running without plot_image.")

    # Plot if the image is a plotnine ggplot image
    elif str(image_type) == "<class 'plotnine.ggplot.ggplot'>":
        print(img)
Example #8
0
def print_image(img, filename):
    """Save image to file.

    Inputs:
    img      = image object
    filename = name of file to save image to

    :param img: numpy.ndarray
    :param filename: string
    :return:
    """

    # Print numpy array type images
    image_type = type(img)
    if image_type == numpy.ndarray:
        matplotlib.rcParams['figure.dpi'] = params.dpi
        cv2.imwrite(filename, img)

    # Print matplotlib type images
    elif image_type == matplotlib.figure.Figure:
        img.savefig(filename, dpi=params.dpi)

    # Print ggplot type images
    elif str(image_type) == "<class 'plotnine.ggplot.ggplot'>":
        img.save(filename)

    else:
        fatal_error("Error writing file " + filename + ": " + str(sys.exc_info()[0]))
Example #9
0
def median_blur(gray_img, ksize):
    """Applies a median blur filter (applies median value to central pixel within a kernel size).

    Inputs:
    gray_img  = Grayscale image data
    ksize = kernel size => integer or tuple, ksize x ksize box if integer, (n, m) size box if tuple

    Returns:
    img_mblur = blurred image


    :param gray_img: numpy.ndarray
    :param ksize: int or tuple
    :return img_mblur: numpy.ndarray
    """

    # Make sure ksize is valid
    if type(ksize) is not int and type(ksize) is not tuple:
        fatal_error("Invalid ksize, must be integer or tuple")

    img_mblur = median_filter(gray_img, size=ksize)
    params.device += 1
    if params.debug == 'print':
        print_image(img_mblur, os.path.join(params.debug_outdir,
                                            str(params.device) + '_median_blur' + str(ksize) + '.png'))
    elif params.debug == 'plot':
        plot_image(img_mblur, cmap='gray')
    return img_mblur
def db_lookup(database, outdir, query, type, vis=False, nir=False, flu=False):
  # Does the database exist?
  if not os.path.exists(database):
    pcv.fatal_error("The database file " + str(database) + " does not exist");
  
  # Open a connection
  try:
    connect=sq.connect(database)
  except sq.Error, e:
    print("Error %s:" % e.args[0])
def main():
  # Get options
  args = options()
  
  # Does the database exist?
  if not os.path.exists(args.database):
    pcv.fatal_error("The database file " + str(args.database) + " does not exist");
  
  # Open a connection
  try:
    connect=sq.connect(args.database)
  except sq.Error, e:
    print("Error %s:" % e.args[0])
Example #12
0
def mask_bad(float_img, bad_type='native'):
    """ Create a mask with desired "bad" pixels of the input floaat image marked.
    Inputs:
    float_img = image represented by an nd-array (data type: float). Most probably, it is the result of some
                calculation based on the original image. So the datatype is float, and it is possible to have some
                "bad" values, i.e. nan and/or inf
    bad_type = definition of "bad" type, can be 'nan', 'inf' or 'native'
    Returns:
    mask = A mask indicating the locations of "bad" pixels

    :param float_img: numpy.ndarray
    :param bad_type: str
    :return mask: numpy.ndarray
    """
    size_img = np.shape(float_img)
    if len(size_img) != 2:
        fatal_error('Input image is not a single channel image!')

    mask = np.zeros(size_img, dtype='uint8')
    idx_nan, idy_nan = np.where(np.isnan(float_img) == 1)
    idx_inf, idy_inf = np.where(np.isinf(float_img) == 1)

    # neither nan nor inf exists in the image, print out a message and the mask would just be all zero
    if len(idx_nan) == 0 and len(idx_inf) == 0:
        mask = mask
        print('Neither nan nor inf appears in the current image.')
    # at least one of the "bad" exists
    # desired bad to mark is "native"
    elif bad_type.lower() == 'native':
        # mask[np.isnan(gray_img)] = 255
        # mask[np.isinf(gray_img)] = 255
        mask[idx_nan, idy_nan] = 255
        mask[idx_inf, idy_inf] = 255
    elif bad_type.lower() == 'nan' and len(idx_nan) >= 1:
        mask[idx_nan, idy_nan] = 255
    elif bad_type.lower() == 'inf' and len(idx_inf) >= 1:
        mask[idx_inf, idy_inf] = 255
    # "bad" exists but not the user desired bad type, return the all-zero mask
    else:
        mask = mask
        print('{} does not appear in the current image.'.format(
            bad_type.lower()))

    _debug(visual=mask,
           filename=os.path.join(params.debug_outdir,
                                 str(params.device) + "_bad_mask.png"))

    return mask
def get_color_matrix(rgb_img, mask):
    """ Calculate the average value of pixels in each color chip for each color channel.

    Inputs:
    rgb_img         = RGB image with color chips visualized
    mask        = a gray-scale img with unique values for each segmented space, representing unique, discrete
                    color chips.

    Outputs:
    color_matrix        = a 22x4 matrix containing the average red value, average green value, and average blue value
                            for each color chip.
    headers             = a list of 4 headers corresponding to the 4 columns of color_matrix respectively

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :return headers: string array
    :return color_matrix: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Check for RGB input
    if len(np.shape(rgb_img)) != 3:
        fatal_error("Input rgb_img is not an RGB image.")
    # Check mask for gray-scale
    if len(np.shape(mask)) != 2:
        fatal_error("Input mask is not an gray-scale image.")

    # create empty color_matrix
    color_matrix = np.zeros((len(np.unique(mask))-1, 4))

    # create headers
    headers = ["chip_number", "r_avg", "g_avg", "b_avg"]

    # declare row_counter variable and initialize to 0
    row_counter = 0

    # for each unique color chip calculate each average RGB value
    for i in np.unique(mask):
        if i != 0:
            chip = rgb_img[np.where(mask == i)]
            color_matrix[row_counter][0] = i
            color_matrix[row_counter][1] = np.mean(chip[:, 2])
            color_matrix[row_counter][2] = np.mean(chip[:, 1])
            color_matrix[row_counter][3] = np.mean(chip[:, 0])
            row_counter += 1

    return headers, color_matrix
Example #14
0
def apply_mask(img, mask, mask_color, device, debug=None):
    """Apply white image mask to image, with bitwise AND operator bitwise NOT operator and ADD operator.

    Inputs:
    img        = image object, color(RGB)
    mask       = image object, binary (black background with white object)
    mask_color = white or black
    device     = device number. Used to count steps in the pipeline
    debug      = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device     = device number
    masked_img = masked image

    :param img: numpy array
    :param mask: numpy array
    :param mask_color: str
    :param device: int
    :param debug: str
    :return device: int
    :return masked_img: numpy array
    """

    device += 1
    if mask_color == 'white':
        # Mask image
        masked_img = cv2.bitwise_and(img, img, mask=mask)
        # Create inverted mask for background
        mask_inv = cv2.bitwise_not(mask)
        # Invert the background so that it is white, but apply mask_inv so you don't white out the plant
        white_mask = cv2.bitwise_not(masked_img, mask=mask_inv)
        # Add masked image to white background (can't just use mask_inv because that is a binary)
        white_masked = cv2.add(masked_img, white_mask)
        if debug == 'print':
            print_image(white_masked, (str(device) + '_wmasked.png'))
        elif debug == 'plot':
            plot_image(white_masked)
        return device, white_masked
    elif mask_color == 'black':
        masked_img = cv2.bitwise_and(img, img, mask=mask)
        if debug == 'print':
            print_image(masked_img, (str(device) + '_bmasked.png'))
        elif debug == 'plot':
            plot_image(masked_img)
        return device, masked_img
    else:
        fatal_error('Mask Color' + str(mask_color) +
                    ' is not "white" or "black"!')
Example #15
0
def ellipse(img, x, y, r1, r2, angle):
    """Create an elliptical ROI.

    Inputs:
    img           = An RGB or grayscale image to plot the ROI on in debug mode.
    x             = The x-coordinate of the center of the ellipse.
    y             = The y-coordinate of the center of the ellipse.
    r1            = The radius of the minor axis.
    r2            = The radius of the major axis.
    angle         = The angle of rotation in degrees of the major axis.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param img: numpy.ndarray
    :param x: int
    :param y: int
    :param r1: int
    :param r2: int
    :param angle: double
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Get the height and width of the reference image
    height, width = np.shape(img)[:2]

    # Initialize a binary image of the ellipse
    bin_img = np.zeros((height, width), dtype=np.uint8)
    # Draw the ellipse on the binary image
    cv2.ellipse(bin_img, (x, y), (r1, r2), angle, 0, 360, 255, -1)

    # Use the binary image to create an ROI contour
    roi_contour, roi_hierarchy = cv2.findContours(bin_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    # Checks ellipse goes outside the image by checking row and column sum of edges
    if (np.sum(bin_img[0, :]) + np.sum(bin_img[-1, :]) + np.sum(bin_img[:, 0]) + np.sum(bin_img[:, -1]) > 0) or \
            len(roi_contour) == 0:
        fatal_error("The ROI extends outside of the image, or ROI is not on the image!")

    return roi_contour, roi_hierarchy
Example #16
0
def apply_mask(rgb_img, mask, mask_color):
    """Apply white image mask to image, with bitwise AND operator bitwise NOT operator and ADD operator.

    Inputs:
    rgb_img    = RGB image data
    mask       = Binary mask image data
    mask_color = 'white' or 'black'

    Returns:
    masked_img = masked image data

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param mask_color: str
    :return masked_img: numpy.ndarray
    """

    params.device += 1
    if mask_color == 'white':
        # Mask image
        masked_img = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
        # Create inverted mask for background
        mask_inv = cv2.bitwise_not(mask)
        # Invert the background so that it is white, but apply mask_inv so you don't white out the plant
        white_mask = cv2.bitwise_not(masked_img, mask=mask_inv)
        # Add masked image to white background (can't just use mask_inv because that is a binary)
        white_masked = cv2.add(masked_img, white_mask)
        if params.debug == 'print':
            print_image(
                white_masked,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_wmasked.png'))
        elif params.debug == 'plot':
            plot_image(white_masked)
        return white_masked
    elif mask_color == 'black':
        masked_img = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
        if params.debug == 'print':
            print_image(
                masked_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_bmasked.png'))
        elif params.debug == 'plot':
            plot_image(masked_img)
        return masked_img
    else:
        fatal_error('Mask Color' + str(mask_color) +
                    ' is not "white" or "black"!')
Example #17
0
def get_color_matrix(rgb_img, mask):
    """ Calculate the average value of pixels in each color chip for each color channel.

    Inputs:
    rgb_img         = RGB image with color chips visualized
    mask        = a gray-scale img with unique values for each segmented space, representing unique, discrete
                    color chips.

    Outputs:
    color_matrix        = a 22x4 matrix containing the average red value, average green value, and average blue value
                            for each color chip.
    headers             = a list of 4 headers corresponding to the 4 columns of color_matrix respectively

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :return headers: string array
    :return color_matrix: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Check for RGB input
    if len(np.shape(rgb_img)) != 3:
        fatal_error("Input rgb_img is not an RGB image.")
    # Check mask for gray-scale
    if len(np.shape(mask)) != 2:
        fatal_error("Input mask is not an gray-scale image.")

    # create empty color_matrix
    color_matrix = np.zeros((len(np.unique(mask))-1, 4))

    # create headers
    headers = ["chip_number", "r_avg", "g_avg", "b_avg"]

    # declare row_counter variable and initialize to 0
    row_counter = 0

    # for each unique color chip calculate each average RGB value
    for i in np.unique(mask):
        if i != 0:
            chip = rgb_img[np.where(mask == i)]
            color_matrix[row_counter][0] = i
            color_matrix[row_counter][1] = np.mean(chip[:, 2])
            color_matrix[row_counter][2] = np.mean(chip[:, 1])
            color_matrix[row_counter][3] = np.mean(chip[:, 0])
            row_counter += 1

    return headers, color_matrix
def save_matrix(matrix, filename):
    """ Serializes a matrix as an numpy.ndarray object and save to a .npz file.
    Inputs:
    matrix      = a numpy.matrix
    filename    = name of file to which matrix will be saved. Must end in .npz

    :param matrix: numpy.ndarray
    :param filename: string ending in ".npz"
    """
    if ".npz" not in filename:
        fatal_error("File must be an .npz file.")

    # Autoincrement the device counter
    params.device += 1

    np.savez(filename, matrix)
Example #19
0
def save_matrix(matrix, filename):
    """ Serializes a matrix as an numpy.ndarray object and save to a .npz file.
    Inputs:
    matrix      = a numpy.matrix
    filename    = name of file to which matrix will be saved. Must end in .npz

    :param matrix: numpy.ndarray
    :param filename: string ending in ".npz"
    """
    if ".npz" not in filename:
        fatal_error("File must be an .npz file.")

    # Autoincrement the device counter
    params.device += 1

    np.savez(filename, matrix)
Example #20
0
def rectangle(x, y, h, w, img):
    """Create a rectangular ROI.

    Inputs:
    x             = The x-coordinate of the upper left corner of the rectangle.
    y             = The y-coordinate of the upper left corner of the rectangle.
    h             = The height of the rectangle.
    w             = The width of the rectangle.
    img           = An RGB or grayscale image to plot the ROI on in debug mode.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param x: int
    :param y: int
    :param h: int
    :param w: int
    :param img: numpy.ndarray
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Get the height and width of the reference image
    height, width = np.shape(img)[:2]

    # Check whether the ROI is correctly bounded inside the image
    if x < 0 or y < 0 or x + w > width or y + h > height:
        fatal_error("The ROI extends outside of the image!")

    # Create the rectangle contour vertices
    pt1 = [x, y]
    pt2 = [x, y + h - 1]
    pt3 = [x + w - 1, y + h - 1]
    pt4 = [x + w - 1, y]

    # Create the ROI contour
    roi_contour = [np.array([[pt1], [pt2], [pt3], [pt4]], dtype=np.int32)]
    roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)

    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    return roi_contour, roi_hierarchy
Example #21
0
def readimage(filename, mode="native"):
    """Read image from file.

    Inputs:
    filename = name of image file
    mode     = mode of imread ("native", "rgb", "rgba", "gray", "csv")

    Returns:
    img      = image object as numpy array
    path     = path to image file
    img_name = name of image file

    :param filename: str
    :param mode: str
    :return img: numpy.ndarray
    :return path: str
    :return img_name: str
    """
    if mode.upper() == "GRAY" or mode.upper() == "GREY":
        img = cv2.imread(filename, 0)
    elif mode.upper() == "RGB":
        img = cv2.imread(filename)
    elif mode.upper() == "RGBA":
        img = cv2.imread(filename, -1)
    elif mode.upper() == "CSV":
        inputarray = pd.read_csv(filename, sep=',', header=None)
        img = inputarray.values
    else:
        img = cv2.imread(filename, -1)

    # Default to drop alpha channel if user doesn't specify 'rgba'
    if len(np.shape(img)) == 3 and np.shape(
            img)[2] == 4 and mode.upper() == "NATIVE":
        img = cv2.imread(filename)

    if img is None:
        fatal_error("Failed to open " + filename)

    # Split path from filename
    path, img_name = os.path.split(filename)

    if params.debug == "print":
        print_image(img, os.path.join(params.debug_outdir, "input_image.png"))
    elif params.debug == "plot":
        plot_image(img)

    return img, path, img_name
Example #22
0
def overlay_two_imgs(img1, img2, alpha=0.5):
    """Overlay two images with a given alpha value.

    Inputs:
    img1     - RGB or grayscale image data
    img2     - RGB or grayscale image data
    alpha    - Desired opacity of 1st image, range: (0,1), default value=0.5

    Returns:
    out_img  - Blended RGB image

    :param img1: numpy.ndarray
    :param img2: numpy.ndarray
    :param alpha: float
    :return: out_img: numpy.ndarray
    """
    # Validate alpha
    if alpha > 1 or alpha < 0:
        fatal_error("The value of alpha should be in the range of (0,1)!")

    # Validate image sizes are the same
    size_img1 = img1.shape[0:2]
    size_img2 = img2.shape[0:2]
    if size_img1 != size_img2:
        fatal_error(f"The height/width of img1 ({size_img1}) needs to match img2 ({size_img2}).")

    # Copy the input images
    img1_ = np.copy(img1)
    img2_ = np.copy(img2)
    # If the images are grayscale convert to BGR
    if len(img1_.shape) == 2:
        img1_ = cv2.cvtColor(img1_, cv2.COLOR_GRAY2BGR)
    if len(img2_.shape) == 2:
        img2_ = cv2.cvtColor(img2_, cv2.COLOR_GRAY2BGR)

    # initialize the output image
    out_img = np.zeros(size_img1 + (3,), dtype=np.uint8)

    # blending
    out_img[:, :, :] = (alpha * img1_[:, :, :]) + ((1 - alpha) * img2_[:, :, :])

    params.device += 1
    if params.debug == 'print':
        print_image(out_img, os.path.join(params.debug_outdir, str(params.device) + '_overlay.png'))
    elif params.debug == 'plot':
        plot_image(out_img)
    return out_img
Example #23
0
def rectangle(img, x, y, h, w):
    """Create a rectangular ROI.

    Inputs:
    img           = An RGB or grayscale image to plot the ROI on in debug mode.
    x             = The x-coordinate of the upper left corner of the rectangle.
    y             = The y-coordinate of the upper left corner of the rectangle.
    h             = The height of the rectangle.
    w             = The width of the rectangle.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param img: numpy.ndarray
    :param x: int
    :param y: int
    :param h: int
    :param w: int
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Get the height and width of the reference image
    height, width = np.shape(img)[:2]

    # Check whether the ROI is correctly bounded inside the image
    if x < 0 or y < 0 or x + w > width or y + h > height:
        fatal_error("The ROI extends outside of the image!")

    # Create the rectangle contour vertices
    pt1 = [x, y]
    pt2 = [x, y + h - 1]
    pt3 = [x + w - 1, y + h - 1]
    pt4 = [x + w - 1, y]

    # Create the ROI contour
    roi_contour = [np.array([[pt1], [pt2], [pt3], [pt4]], dtype=np.int32)]
    roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)

    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    return roi_contour, roi_hierarchy
Example #24
0
def resize_factor(img, factors, interpolation="auto"):
    """Resize input image to a new size using resize factors along x and y axes.

    Inputs:
    img           = RGB or grayscale image data
    factors       = Resizing factors (width, height). E.g. (0.5, 0.5)
    interpolation = Interpolation method (if requested):
                      "auto" = select method automatically (default)
                      "area" = resampling using pixel area (OpenCV INTER_AREA)
                      "bicubic" = bicubic interpolation (OpenCV INTER_CUBIC)
                      "bilinear" = bilinear interpolation (OpenCV INTER_LINEAR)
                      "lanczos" = Lanczos interpolation (OpenCV INTER_LANCZOS4)
                      "nearest" = nearest-neighbor interpolation (OpenCV INTER_NEAREST)

    Returns:
    resized_img   = Resized image

    :param img: numpy.ndarray
    :param factors: tuple
    :param interpolation: str
    :return resized_img: numpy.ndarray
    """
    params.device += 1
    if not isinstance(factors, tuple) or len(factors) != 2 or not all(
        [n > 0 for n in factors]):
        fatal_error(
            f"The input factors={factors} should be a tuple of length 2 with values greater than 0."
        )

    interp_mtd = _set_interpolation(input_size=(1, 1),
                                    output_size=factors,
                                    method=interpolation)
    resized_img = cv2.resize(img, (0, 0),
                             fx=factors[0],
                             fy=factors[1],
                             interpolation=interp_mtd)

    if params.debug == 'print':
        print_image(
            resized_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + "_resize.png"))
    elif params.debug == 'plot':
        plot_image(resized_img)

    return resized_img
 def get_file_list(self):
     ## Get the list of all files
     if self.list_files is not None:
         self.list_f = self.list_files
     else:
         self.list_f = [
             f for f in os.listdir(self.imagedir) if f.endswith(self.suffix)
         ]
     self.list_f.sort()
     if len(self.list_f) == 1:
         print('There is only 1 image.')
     elif len(self.list_f) > 1:
         print('There are {} images.'.format(len(self.list_f)))
     else:
         fatal_error(
             'There is no images given the combination conditions of suffix, file list, please double check!'
         )
Example #26
0
def print_image(img, filename):
    """Save image to file.

    Inputs:
    img      = image object
    filename = name of file to save image to

    :param img: numpy.ndarray
    :param filename: string
    :return:
    """

    try:
        cv2.imwrite(filename, img)
    except:
        fatal_error("Error writing file " + filename + ": " +
                    str(sys.exc_info()[0]))
Example #27
0
def ellipse(img, x, y, r1, r2, angle):
    """Create an elliptical ROI.

    Inputs:
    img           = An RGB or grayscale image to plot the ROI on in debug mode.
    x             = The x-coordinate of the center of the ellipse.
    y             = The y-coordinate of the center of the ellipse.
    r1            = The radius of the major axis.
    r2            = The radius of the minor axis.
    angle         = The angle of rotation in degrees of the major axis.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param img: numpy.ndarray
    :param x: int
    :param y: int
    :param r1: int
    :param r2: int
    :param angle: double
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Get the height and width of the reference image
    height, width = np.shape(img)[:2]

    # Initialize a binary image of the ellipse
    bin_img = np.zeros((height, width), dtype=np.uint8)
    # Draw the ellipse on the binary image
    cv2.ellipse(bin_img, (x, y), (r1, r2), angle, 0, 360, 255, -1)

    if np.sum(bin_img[0, :]) + np.sum(bin_img[-1, :]) + np.sum(bin_img[:, 0]) + np.sum(bin_img[:, -1]) > 0:
        fatal_error("The ROI extends outside of the image!")

    # Use the binary image to create an ROI contour
    roi_contour, roi_hierarchy = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    return roi_contour, roi_hierarchy
Example #28
0
def circle(x, y, r, img):
    """Create a circular ROI.

    Inputs:
    x             = The x-coordinate of the center of the circle.
    y             = The y-coordinate of the center of the circle.
    r             = The radius of the circle.
    img           = An RGB or grayscale image to plot the ROI on in debug mode.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param x: int
    :param y: int
    :param r: int
    :param img: numpy.ndarray
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Get the height and width of the reference image
    height, width = np.shape(img)[:2]

    # Check whether the ROI is correctly bounded inside the image
    if x - r < 0 or x + r > width or y - r < 0 or y + r > height:
        fatal_error("The ROI extends outside of the image!")

    # Initialize a binary image of the circle
    bin_img = np.zeros((height, width), dtype=np.uint8)
    # Draw the circle on the binary image
    cv2.circle(bin_img, (x, y), r, 255, -1)

    # Use the binary image to create an ROI contour
    roi_contour, roi_hierarchy = cv2.findContours(np.copy(bin_img),
                                                  cv2.RETR_TREE,
                                                  cv2.CHAIN_APPROX_NONE)[-2:]

    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    return roi_contour, roi_hierarchy
Example #29
0
def saturation(rgb_img, threshold=255, channel = "any"):
    """Return a mask filtering out saturated pixels.

    Inputs:
    rgb_img    = RGB image
    threshold  = value for threshold, above which is considered saturated
    channel    = how many channels must be saturated for the pixel to be masked out ("any", "all")

    Returns:
    masked_img = A binary image with the saturated regions blacked out.

    :param img: np.ndarray
    :param threshold: int
    :param channel: str
    :return masked_img: np.ndarray
    """

    params.device += 1

    # Mask red, green, and blue saturation separately
    b, g, r = cv2.split(rgb_img)
    b_saturated = cv2.inRange(b, threshold, 255)
    g_saturated = cv2.inRange(g, threshold, 255)
    r_saturated = cv2.inRange(r, threshold, 255)

    # Combine channel masks
    if channel.lower() == "any":
        # Consider a pixel saturated if any channel is saturated
        saturated = cv2.bitwise_or(b_saturated, g_saturated)
        saturated = cv2.bitwise_or(saturated, r_saturated)
    elif channel.lower() == "all":
        # Consider a pixel saturated only if all channels are saturated
        saturated = cv2.bitwise_and(b_saturated, g_saturated)
        saturated = cv2.bitwise_and(saturated, r_saturated)
    else:
        fatal_error(str(channel) + " is not a valid option. Channel must be either 'any', or 'all'.")

    # Invert "saturated" before returning, so saturated = black
    bin_img = cv2.bitwise_not(saturated)

    if params.debug == 'print':
        print_image(bin_img, os.path.join(params.debug_outdir, str(params.device), '_saturation_threshold.png'))
    elif params.debug == 'plot':
        plot_image(bin_img, cmap='gray')
    return bin_img
Example #30
0
def process_results(valid_meta, job_dir, json_file):
    """Get results from individual files. Parse the results and recompile for SQLite.

    Args:
        valid_meta:           Dictionary of valid metadata keys.
        job_dir:              Intermediate file output directory.
        json_file:            Json data table filehandle object.

    :param valid_meta: dict
    :param job_dir: str
    :param json_file: obj
    """

    if os.path.exists(json_file):
        with open(json_file, 'r') as datafile:
            try:
                data = json.load(datafile)
                if "variables" not in data or "entities" not in data:
                    fatal_error("Invalid JSON file")
            except:
                fatal_error("Invalid JSON file")
    else:
        # Data dictionary
        data = {"variables": {}, "entities": []}


    # Walk through the image processing job directory and process data from each file
    for (dirpath, dirnames, filenames) in os.walk(job_dir):
        for filename in filenames:
            # Make sure file is a text file
            if 'text/plain' in mimetypes.guess_type(filename):
                # Open results file
                with open(os.path.join(dirpath, filename)) as results:
                    obs = json.load(results)
                    data["entities"].append(obs)
                    # Keep track of all metadata variables stored
                    for vars in obs["metadata"].keys():
                        data["variables"][vars] = 1
                    # Keep track of all observations variables stored
                    for othervars in obs["observations"].keys():
                        data["variables"][othervars] = 1

    # Write out json file with info from all images
    with open(json_file, 'w') as datafile:
        json.dump(data, datafile)
    def visualize_separate(self,
                           updated=False,
                           colors=None,
                           print_img=False,
                           savepath=None):
        """
        Inputs:
        savepath: path (subfolder) to save the separate visualization
        savepath = os.path.join(savepath_all, 'masks', imgname)
        if not os.path.isdir(savepath):
            os.makedirs(savepath)
        """
        if print_img is True and savepath is None:
            fatal_error("You must provide a savepath to save the result!")
        if colors is None:
            colors = _random_colors(50)
        image = self.image
        if updated is False:
            r = self.segment
            title_ = ''
        elif updated is True:
            r = self.segment_
            title_ = 'Updated'
        num_instances = r['masks'].shape[2]

        for idx in range(0, num_instances):
            mask_i = np.expand_dims(r['masks'][:, :, idx], 2)
            roi_i = np.expand_dims(r['rois'][idx], 0)
            class_id_i = np.expand_dims(r['class_ids'][idx], 0)
            score_i = np.expand_dims(r['scores'][idx], 0)
            visualize.display_instances(image,
                                        roi_i,
                                        mask_i,
                                        class_id_i,
                                        self.class_names,
                                        score_i,
                                        ax=_get_ax(rows=1, cols=1, size=16),
                                        show_bbox=True,
                                        show_mask=True,
                                        title="{} Leaf {}".format(title_, idx),
                                        colors=colors[idx:idx + 1])
            if print_img is True:
                plt.savefig(os.path.join(savepath, 'leaf_{}.png'.format(idx)))
                plt.close('all')
Example #32
0
def within_frame(mask):
    """
    This function tests whether the plant touches the edge of the image, i.e. it is completely in the field of view.
    Input:
    mask = a binary image of 0 and nonzero values

    Returns:
    in_bounds = a boolean (True or False) confirming that the object does not touch the edge of the image

    :param mask: numpy.ndarray
    :return in_bounds: bool

    """

    # Check if object is touching image boundaries (QC)
    if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
        fatal_error("Mask should be a binary image of 0 and nonzero values.")

    # First column
    first_col = mask[:, 0]

    # Last column
    last_col = mask[:, -1]

    # First row
    first_row = mask[0, :]

    # Last row
    last_row = mask[-1, :]

    edges = np.concatenate([first_col, last_col, first_row, last_row])

    out_of_bounds = bool(np.count_nonzero(edges))
    in_bounds = not out_of_bounds

    outputs.add_observation(variable='in_bounds',
                            trait='whether the plant goes out of bounds ',
                            method='plantcv.plantcv.within_frame',
                            scale='none',
                            datatype=bool,
                            value=in_bounds,
                            label='none')

    return in_bounds
Example #33
0
def process_results(job_dir, json_file):
    """Get results from individual files. Parse the results and recompile for SQLite.

    Args:
        job_dir:              Intermediate file output directory.
        json_file:            Json data table filehandle object.

    :param job_dir: str
    :param json_file: obj
    """

    if os.path.exists(json_file):
        with open(json_file, 'r') as datafile:
            try:
                data = json.load(datafile)
                if "variables" not in data or "entities" not in data:
                    fatal_error("Invalid JSON file")
            except:
                fatal_error("Invalid JSON file")
    else:
        # Data dictionary
        data = {"variables": {}, "entities": []}


    # Walk through the image processing job directory and process data from each file
    for (dirpath, dirnames, filenames) in os.walk(job_dir):
        for filename in filenames:
            # Make sure file is a text or json file
            if 'text/plain' in mimetypes.guess_type(filename) or 'application/json' in mimetypes.guess_type(filename):
                # Open results file
                with open(os.path.join(dirpath, filename)) as results:
                    obs = json.load(results)
                    data["entities"].append(obs)
                    # Keep track of all metadata variables stored
                    for vars in obs["metadata"].keys():
                        data["variables"][vars] = {"category": "metadata", "datatype": "<class 'str'>"}
                    # Keep track of all observations variables stored
                    for othervars in obs["observations"].keys():
                        data["variables"][othervars] = {"category": "observations",
                                                        "datatype": obs["observations"][othervars]["datatype"]}

    # Write out json file with info from all images
    with open(json_file, 'w') as datafile:
        json.dump(data, datafile)
Example #34
0
def readimage(filename, mode="native"):
    """Read image from file.

    Inputs:
    filename = name of image file
    mode     = mode of imread ("native", "rgb", "rgba", "gray")

    Returns:
    img      = image object as numpy array
    path     = path to image file
    img_name = name of image file

    :param filename: str
    :param mode: str
    :return img: numpy.ndarray
    :return path: str
    :return img_name: str
    """
    if mode.upper() == "GRAY" or mode.upper() == "GREY":
        img = cv2.imread(filename, 0)
    elif mode.upper() == "RGB":
        img = cv2.imread(filename)
    elif mode.upper() == "RGBA":
        img = cv2.imread(filename, -1)
    else:
        img = cv2.imread(filename, -1)

    # Default to drop alpha channel if user doesn't specify 'rgba'
    if len(np.shape(img))==3 and np.shape(img)[2] == 4 and mode.upper() == "NATIVE":
        img = cv2.imread(filename)

    if img is None:
        fatal_error("Failed to open " + filename)

    # Split path from filename
    path, img_name = os.path.split(filename)

    if params.debug == "print":
        print_image(img, os.path.join(params.debug_outdir, "input_image.png"))
    elif params.debug == "plot":
        plot_image(img)

    return img, path, img_name
Example #35
0
def read_hyperspectral(path):
    """this function allows you read in hyperspectral images in raw format (needs associated .hdr file)

    Inputs:
    path     = path to .hdr file, there is the assumption that .hdr file name matches raw image name

    Returns:
    hyperimge = image mask
    bands = band centers
    path = path to hyperspectral image
    filename = name of hyperspectral image

    :param hyperimg: spectral object
    :param bands: list of band centers
    :param path: string
    :return filname: string
    """

    params.device += 1

    if path.endswith(".hdr") == False:
        fatal_error("Input is not an .hdr file")
    if os.path.isfile(path) == False:
        fatal_error(str(path) + " does not exist")

    path1, filename = os.path.split(path)
    hyperimg = spectral.open_image(path)
    bands = hyperimg.bands.centers

    if params.debug == "print":
        message = str(
            filename
        ) + "_input_image.png" + " succesfully opened. With a total of " + str(
            len(bands)) + " bands."
        print(message)
    elif params.debug == "plot":
        message = str(
            filename
        ) + "_input_image.png" + " succesfully opened. With a total of " + str(
            len(bands)) + " bands."
        print(message)

    return hyperimg, bands, path, filename
Example #36
0
def circle(img, x, y, r):
    """Create a circular ROI.

    Inputs:
    img           = An RGB or grayscale image to plot the ROI on in debug mode.
    x             = The x-coordinate of the center of the circle.
    y             = The y-coordinate of the center of the circle.
    r             = The radius of the circle.

    Outputs:
    roi_contour   = An ROI set of points (contour).
    roi_hierarchy = The hierarchy of ROI contour(s).

    :param img: numpy.ndarray
    :param x: int
    :param y: int
    :param r: int
    :return roi_contour: list
    :return roi_hierarchy: numpy.ndarray
    """
    # Autoincrement the device counter
    params.device += 1

    # Get the height and width of the reference image
    height, width = np.shape(img)[:2]

    # Check whether the ROI is correctly bounded inside the image
    if x - r < 0 or x + r > width or y - r < 0 or y + r > height:
        fatal_error("The ROI extends outside of the image!")

    # Initialize a binary image of the circle
    bin_img = np.zeros((height, width), dtype=np.uint8)
    # Draw the circle on the binary image
    cv2.circle(bin_img, (x, y), r, 255, -1)

    # Use the binary image to create an ROI contour
    roi_contour, roi_hierarchy = cv2.findContours(np.copy(bin_img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    # Draw the ROI if requested
    if params.debug is not None:
        _draw_roi(img=img, roi_contour=roi_contour)

    return roi_contour, roi_hierarchy
def read_hyperspectral_gdal(path):
    """this function allows you read in hyperspectral images in raw format

    Inputs:
    path     = path to hyperspectral bil file

    Returns:
    hyperimge = image mask
    bands = band centers
    path = path to hyperspectral image
    filename = name of hyperspectral image

    :param hyperimg: spectral object
    :param bands: list of band centers
    :param path: string
    :return filname: string
    """

    params.device += 1

    if os.path.isfile(path) == False:
        fatal_error(str(path) + " does not exist")

    path1, filename = os.path.split(path)
    gdalhyper = gdal.open(path)
    bands = gdalhyper.GetRasterBand(1)
    bandNo = gdalhyper.RasterCount
    print("Band Type={}".format(gdal.GetDataTypeName(bands.DataType)))

    if params.debug == "print":
        message = str(
            filename
        ) + "_input_image.png" + " succesfully opened. With a total of " + str(
            bandNo) + " bands."
        print(message)
    elif params.debug == "plot":
        message = str(
            filename
        ) + "_input_image.png" + " succesfully opened. With a total of " + str(
            bandNo) + " bands."
        print(message)

    return hyperimg, bands, path, filename
Example #38
0
def rgb2gray_hsv(img, channel, device, debug=None):
    """Convert an RGB color image to HSV colorspace and return a gray image (one channel).

    Inputs:
    img     = image object, RGB colorspace
    channel = color subchannel (h = hue, s = saturation, v = value/intensity/brightness)
    device  = device number. Used to count steps in the pipeline
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device    = device number
    h | s | v = image from single HSV channel

    :param img: numpy array
    :param channel: str
    :param device: int
    :param debug: str
    :return device: int
    :return channel: numpy array
    """
    # Auto-increment the device counter
    device += 1

    # The allowable channel inputs are h, s or v
    names = {"h": "hue", "s": "saturation", "v": "value"}
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not h, s or v!")

    # Convert the input BGR image to HSV colorspace
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # Split HSV channels
    h, s, v = cv2.split(hsv)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"h": h, "s": s, "v": v}

    if debug == "print":
        print_image(channels[channel],
                    str(device) + "_hsv_" + names[channel] + ".png")
    elif debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return device, channels[channel]
Example #39
0
def rgb2gray_lab(img, channel, device, debug=None):
    """Convert image from RGB colorspace to LAB colorspace. Returns the specified subchannel as a gray image.

    Inputs:
    img       = image object, RGB colorspace
    channel   = color subchannel (l = lightness, a = green-magenta, b = blue-yellow)
    device    = device number. Used to count steps in the pipeline
    debug     = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device    = device number
    l | a | b = grayscale image from one LAB color channel

    :param img: numpy array
    :param channel: str
    :param device: int
    :param debug: str
    :return device: int
    :return channel: numpy array
    """
    # Auto-increment the device counter
    device += 1
    # The allowable channel inputs are l, a or b
    names = {"l": "lightness", "a": "green-magenta", "b": "blue-yellow"}
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not l, a or b!")

    # Convert the input BGR image to LAB colorspace
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    # Split LAB channels
    l, a, b = cv2.split(lab)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"l": l, "a": a, "b": b}

    if debug == "print":
        print_image(channels[channel],
                    str(device) + "_lab_" + names[channel] + ".png")
    elif debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return device, channels[channel]
def grab_random(database, random, imgtype, camera, outdir, verbose):
    # Does the database exist?
    if not os.path.exists(database):
        pcv.fatal_error("The database file " + str(database) + " does not exist")

    # Open a connection
    try:
        connect = sq.connect(database)
    except sq.Error as e:
        print("Error %s:" % e.args[0])
    else:
        # Replace the row_factory result constructor with a dictionary constructor
        connect.row_factory = dict_factory
        # Change the text output format from unicode to UTF-8
        connect.text_factory = str

        # Database handler
        db = connect.cursor()
        imageid_list = []
        num = random
        if verbose:
            print(num)
        list_random = db.execute('select * from metadata where imgtype=? and camera=? order by random() limit ?',
                                 (imgtype, camera, num,))
        for i, x in enumerate(list_random):
            imgid = x['image_id']
            imageid_list.append(imgid)

        if verbose:
            print(imageid_list)

        for imgid in imageid_list:
            get_image = db.execute('select * from metadata where image_id=?', (imgid,))
            for row in get_image:
                dt = datetime.datetime.strptime(row['timestamp'], "%Y-%m-%d %H:%M:%S.%f").strftime('%Y-%m-%d-%H-%M-%S')
                img_name = os.path.join(outdir,
                                        row['plantbarcode'] + "_" + dt + "_" + os.path.basename(row['image']))
                copy(row['image'], img_name)
                if verbose:
                    print("copying")
                    print(img_name)
Example #41
0
def calc_transformation_matrix(matrix_m, matrix_b):
    """ Calculates transformation matrix (transformation_matrix).

    Inputs:
    matrix_m    = a 9x22 Moore-Penrose inverse matrix
    matrix_b    = a 22x9 matrix of linear, square, and cubic rgb values from target_img

    Outputs:
    1-t_det     = "deviance" the measure of how greatly the source image deviates from the target image's color space.
                    Two images of the same color space should have a deviance of ~0.
    transformation_matrix    = a 9x9 matrix of linear, square, and cubic transformation coefficients


    :param matrix_m: numpy.ndarray
    :param matrix_b: numpy.ndarray
    :return red: numpy.ndarray
    :return blue: numpy.ndarray
    :return green: numpy.ndarray
    :return 1-t_det: float
    :return transformation_matrix: numpy.ndarray
    """
    # check matrix_m and matrix_b are matrices
    if len(np.shape(matrix_b)) != 2 or len(np.shape(matrix_m)) != 2:
        fatal_error(
            "matrix_m and matrix_b must be n x m matrices such that m,n != 1.")
    # check matrix_b has 9 columns
    if np.shape(matrix_b)[1] != 9:
        fatal_error("matrix_b must have 9 columns.")
    # check matrix_m and matrix_b for multiplication
    if np.shape(matrix_m)[0] != np.shape(matrix_b)[1] or np.shape(
            matrix_m)[1] != np.shape(matrix_b)[0]:
        fatal_error("Cannot multiply matrices.")

    # Autoincrement the device counter
    params.device += 1

    t_r, t_r2, t_r3, t_g, t_g2, t_g3, t_b, t_b2, t_b3 = np.split(
        matrix_b, 9, 1)

    # multiply each 22x1 matrix from target color space by matrix_m
    red = np.matmul(matrix_m, t_r)
    green = np.matmul(matrix_m, t_g)
    blue = np.matmul(matrix_m, t_b)

    red2 = np.matmul(matrix_m, t_r2)
    green2 = np.matmul(matrix_m, t_g2)
    blue2 = np.matmul(matrix_m, t_b2)

    red3 = np.matmul(matrix_m, t_r3)
    green3 = np.matmul(matrix_m, t_g3)
    blue3 = np.matmul(matrix_m, t_b3)

    # concatenate each product column into 9X9 transformation matrix
    transformation_matrix = np.concatenate(
        (red, green, blue, red2, green2, blue2, red3, green3, blue3), 1)

    # find determinant of transformation matrix
    t_det = np.linalg.det(transformation_matrix)

    return 1 - t_det, transformation_matrix
Example #42
0
def fill(bin_img, size):
    """Identifies objects and fills objects that are less than size.

    Inputs:
    bin_img      = Binary image data
    size         = minimum object area size in pixels (integer)


    Returns:
    filtered_img = image with objects filled

    :param bin_img: numpy.ndarray
    :param size: int
    :return filtered_img: numpy.ndarray
    """
    params.device += 1

    # Make sure the image is binary
    if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:
        fatal_error("Image is not binary")

    # Cast binary image to boolean
    bool_img = bin_img.astype(bool)

    # Find and fill contours
    bool_img = remove_small_objects(bool_img, size)

    # Cast boolean image to binary and make a copy of the binary image for returning
    filtered_img = np.copy(bool_img.astype(np.uint8) * 255)

    if params.debug == 'print':
        print_image(
            filtered_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_fill' + str(size) + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
def rgb2gray_lab(rgb_img, channel):
    """Convert image from RGB colorspace to LAB colorspace. Returns the specified subchannel as a gray image.

    Inputs:
    rgb_img   = RGB image data
    channel   = color subchannel (l = lightness, a = green-magenta, b = blue-yellow)

    Returns:
    l | a | b = grayscale image from one LAB color channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1
    # The allowable channel inputs are l, a or b
    names = {"l": "lightness", "a": "green-magenta", "b": "blue-yellow"}
    channel = channel.lower()
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not l, a or b!")

    # Convert the input BGR image to LAB colorspace
    lab = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)
    # Split LAB channels
    l, a, b = cv2.split(lab)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"l": l, "a": a, "b": b}

    if params.debug == "print":
        print_image(
            channels[channel],
            os.path.join(
                params.debug_outdir,
                str(params.device) + "_lab_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel]
Example #44
0
def rgb2gray_hsv(rgb_img, channel):
    """Convert an RGB color image to HSV colorspace and return a gray image (one channel).

    Inputs:
    rgb_img = RGB image data
    channel = color subchannel (h = hue, s = saturation, v = value/intensity/brightness)

    Returns:
    h | s | v = image from single HSV channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1

    # The allowable channel inputs are h, s or v
    names = {"h": "hue", "s": "saturation", "v": "value"}
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not h, s or v!")

    # Convert the input BGR image to HSV colorspace
    hsv = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)
    # Split HSV channels
    h, s, v = cv2.split(hsv)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"h": h, "s": s, "v": v}

    if params.debug == "print":
        print_image(
            channels[channel],
            os.path.join(
                params.debug_outdir,
                str(params.device) + "_hsv_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel]
Example #45
0
def flip(img, direction, device, debug=None):
    """Flip image.

    Inputs:
    img       = image to be flipped
    direction = "horizontal" or "vertical"
    device    = device counter
    debug     = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device    = device number
    vh_img    = flipped image

    :param img: numpy array
    :param direction: str
    :param device: int
    :param debug: str
    :return device: int
    :return vh_img: numpy array
    """
    device += 1
    if direction == "vertical":
        vh_img = cv2.flip(img, 1)
    elif direction == "horizontal":
        vh_img = cv2.flip(img, 0)
    else:
        fatal_error(
            str(direction) +
            " is not a valid direction, must be horizontal or vertical")

    if debug == 'print':
        print_image(vh_img, (str(device) + "_flipped.png"))
    elif debug == 'plot':
        if len(np.shape(vh_img)) == 3:
            plot_image(vh_img)
        else:
            plot_image(vh_img, cmap='gray')

    return device, vh_img
Example #46
0
def plot_image(img, cmap=None):
    """Plot an image to the screen.

    :param img: numpy.ndarray
    :param cmap: str
    :return:
    """

    image_type = type(img)

    dimensions = numpy.shape(img)

    if image_type == numpy.ndarray:
        matplotlib.rcParams['figure.dpi'] = params.dpi
        # If the image is color then OpenCV stores it as BGR, we plot it as RGB
        if len(dimensions) == 3:
            plt.figure()
            plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            plt.show()

        elif cmap is None and len(dimensions) == 2:
            plt.figure()
            plt.imshow(img, cmap="gray")
            plt.show()

        elif cmap is not None and len(dimensions) == 2:
            plt.figure()
            plt.imshow(img, cmap=cmap)
            plt.show()

    elif image_type == matplotlib.figure.Figure:
        fatal_error(
            "Error, matplotlib Figure not supported. Instead try running without plot_image."
        )

    # Plot if the image is a plotnine ggplot image
    elif str(image_type) == "<class 'plotnine.ggplot.ggplot'>":
        print(img)
Example #47
0
def colorize_masks(masks, colors):
    """Plot masks with different colors
    Inputs:
        masks    = list of masks to colorize
        colors   = list of colors (either keys from the color_dict or a list of custom tuples)

        :param masks: list
        :param colors: list
        :return colored_img: ndarray
        """

    # Users must enter the exact same number of colors as classes they'd like to color
    num_classes = len(masks)
    num_colors = len(colors)
    if not num_classes == num_colors:
        fatal_error("The number of colors provided doesn't match the number of class masks provided.")

    # Check to make sure user provided at least one mask and color
    if len(colors) == 0 or len(masks) == 0:
        fatal_error("At least one class mask and color must be provided.")

    # Dictionary of colors and the BGR values, based on some of the colors listed here:
    # https://en.wikipedia.org/wiki/X11_color_names
    color_dict = {'white': (255, 255, 255), 'black': (0, 0, 0), 'aqua': (0, 255, 255), 'blue': (255, 0, 0),
                  'blue violet': (228, 44, 138), 'brown': (41, 41, 168), 'chartreuse': (0, 255, 128),
                  'dark blue': (140, 0, 0), 'gray': (169, 169, 169), 'yellow': (0, 255, 255),
                  'turquoise': (210, 210, 64), 'red': (0, 0, 255), 'purple': (241, 33, 161), 'orange red': (0, 69, 255),
                  'orange': (0, 166, 255), 'lime': (0, 255, 0), 'lime green': (52, 205, 52), 'fuchsia': (255, 0, 255),
                  'crimson': (61, 20, 220), 'beige': (197, 220, 246), 'chocolate': (31, 105, 210),
                  'coral': (79, 128, 255), 'dark green': (0, 100, 0), 'dark orange': (0, 140, 255),
                  'green yellow': (46, 255, 174), 'light blue': (230, 218, 174), 'tomato': (72, 100, 255),
                  'slate gray': (143, 128, 113), 'gold': (0, 215, 255), 'goldenrod': (33, 166, 218),
                  'light green': (143, 238, 143), 'sea green': (77, 141, 46), 'dark red': (0, 0, 141),
                  'pink': (204, 192, 255), 'dark yellow': (0, 205, 255), 'green': (0, 255, 0)}

    ix, iy = np.shape(masks[0])
    colored_img = np.zeros((ix, iy, 3), dtype=np.uint8)
    # Assign pixels to the selected color

    for i in range(0, len(masks)):
        mask = np.copy(masks[i])
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        if isinstance(colors[i], tuple):
            mask[masks[i] > 0] = colors[i]
        elif isinstance(colors[i], str):
            mask[masks[i] > 0] = color_dict[colors[i]]
        else:
            fatal_error("All elements of the 'colors' list must be either str or tuple")
        colored_img = colored_img + mask

    params.device += 1

    if params.debug == 'print':
        print_image(colored_img, os.path.join(params.debug_outdir, str(params.device) + '_classes_plot.png'))
    elif params.debug == 'plot':
        plot_image(colored_img)

    return colored_img
Example #48
0
def binary(gray_img, threshold, max_value, object_type="light"):
    """Creates a binary image from a grayscale image based on the threshold value.

    Inputs:
    gray_img     = Grayscale image data
    threshold    = Threshold value (0-255)
    max_value    = value to apply above threshold (usually 255 = white)
    object_type  = "light" or "dark" (default: "light")
                   - If object is lighter than the background then standard thresholding is done
                   - If object is darker than the background then inverse thresholding is done

    Returns:
    bin_img      = Thresholded, binary image

    :param gray_img: numpy.ndarray
    :param threshold: int
    :param max_value: int
    :param object_type: str
    :return bin_img: numpy.ndarray
    """
    params.device += 1

    # Set the threshold method
    threshold_method = ""
    if object_type.upper() == "LIGHT":
        threshold_method = cv2.THRESH_BINARY
    elif object_type.upper() == "DARK":
        threshold_method = cv2.THRESH_BINARY_INV
    else:
        fatal_error('Object type ' + str(object_type) +
                    ' is not "light" or "dark"!')

    # Threshold the image
    bin_img = _call_threshold(gray_img, threshold, max_value, threshold_method,
                              "_binary_threshold_")

    return bin_img
def calc_transformation_matrix(matrix_m, matrix_b):
    """ Calculates transformation matrix (transformation_matrix).

    Inputs:
    matrix_m    = a 9x22 Moore-Penrose inverse matrix
    matrix_b    = a 22x9 matrix of linear, square, and cubic rgb values from target_img

    Outputs:
    1-t_det     = "deviance" the measure of how greatly the source image deviates from the target image's color space.
                    Two images of the same color space should have a deviance of ~0.
    transformation_matrix    = a 9x9 matrix of linear, square, and cubic transformation coefficients


    :param matrix_m: numpy.ndarray
    :param matrix_b: numpy.ndarray
    :return red: numpy.ndarray
    :return blue: numpy.ndarray
    :return green: numpy.ndarray
    :return 1-t_det: float
    :return transformation_matrix: numpy.ndarray
    """
    # check matrix_m and matrix_b are matrices
    if len(np.shape(matrix_b)) != 2 or len(np.shape(matrix_m)) != 2:
        fatal_error("matrix_m and matrix_b must be n x m matrices such that m,n != 1.")
    # check matrix_b has 9 columns
    if np.shape(matrix_b)[1] != 9:
        fatal_error("matrix_b must have 9 columns.")
    # check matrix_m and matrix_b for multiplication
    if np.shape(matrix_m)[0] != np.shape(matrix_b)[1] or np.shape(matrix_m)[1] != np.shape(matrix_b)[0]:
        fatal_error("Cannot multiply matrices.")

    # Autoincrement the device counter
    params.device += 1

    t_r, t_r2, t_r3, t_g, t_g2, t_g3, t_b, t_b2, t_b3 = np.split(matrix_b, 9, 1)

    # multiply each 22x1 matrix from target color space by matrix_m
    red = np.matmul(matrix_m, t_r)
    green = np.matmul(matrix_m, t_g)
    blue = np.matmul(matrix_m, t_b)

    red2 = np.matmul(matrix_m, t_r2)
    green2 = np.matmul(matrix_m, t_g2)
    blue2 = np.matmul(matrix_m, t_b2)

    red3 = np.matmul(matrix_m, t_r3)
    green3 = np.matmul(matrix_m, t_g3)
    blue3 = np.matmul(matrix_m, t_b3)

    # concatenate each product column into 9X9 transformation matrix
    transformation_matrix = np.concatenate((red, green, blue, red2, green2, blue2, red3, green3, blue3), 1)

    # find determinant of transformation matrix
    t_det = np.linalg.det(transformation_matrix)

    return 1-t_det, transformation_matrix
Example #50
0
def rgb2gray_lab(rgb_img, channel):
    """Convert image from RGB colorspace to LAB colorspace. Returns the specified subchannel as a gray image.

    Inputs:
    rgb_img   = RGB image data
    channel   = color subchannel (l = lightness, a = green-magenta, b = blue-yellow)

    Returns:
    l | a | b = grayscale image from one LAB color channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1
    # The allowable channel inputs are l, a or b
    names = {"l": "lightness", "a": "green-magenta", "b": "blue-yellow"}
    channel = channel.lower()
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not l, a or b!")

    # Convert the input BGR image to LAB colorspace
    lab = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)
    # Split LAB channels
    l, a, b = cv2.split(lab)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"l": l, "a": a, "b": b}

    if params.debug == "print":
        print_image(channels[channel], os.path.join(params.debug_outdir,
                                                    str(params.device) + "_lab_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel]
Example #51
0
def fill(bin_img, size):
    """Identifies objects and fills objects that are less than size.

    Inputs:
    bin_img      = Binary image data
    size         = minimum object area size in pixels (integer)


    Returns:
    filtered_img = image with objects filled

    :param bin_img: numpy.ndarray
    :param size: int
    :return filtered_img: numpy.ndarray
    """
    params.device += 1

    # Make sure the image is binary
    if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:
        fatal_error("Image is not binary")

    # Cast binary image to boolean
    bool_img = bin_img.astype(bool)

    # Find and fill contours
    bool_img = remove_small_objects(bool_img, size)

    # Cast boolean image to binary and make a copy of the binary image for returning
    filtered_img = np.copy(bool_img.astype(np.uint8) * 255)

    if params.debug == 'print':
        print_image(filtered_img, os.path.join(params.debug_outdir, str(params.device) + '_fill' + str(size) + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
Example #52
0
def within_frame(mask):
    """
    This function tests whether the plant touches the edge of the image, i.e. it is completely in the field of view.
    Input:
    mask = a binary image of 0 and nonzero values

    Returns:
    in_bounds = a boolean (True or False) confirming that the object does not touch the edge of the image

    :param mask: numpy.ndarray
    :return in_bounds: bool

    """

    # Check if object is touching image boundaries (QC)
    if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
        fatal_error("Mask should be a binary image of 0 and nonzero values.")

    # First column
    first_col = mask[:, 0]

    # Last column
    last_col = mask[:, -1]

    # First row
    first_row = mask[0, :]

    # Last row
    last_row = mask[-1, :]

    edges = np.concatenate([first_col, last_col, first_row, last_row])

    out_of_bounds = bool(np.count_nonzero(edges))
    in_bounds = not out_of_bounds

    return in_bounds
Example #53
0
def colorize_masks(masks, colors):
    """Plot masks with different colors
    Inputs:
        masks    = list of masks to colorize
        colors   = list of colors (either keys from the color_dict or a list of custom tuples)

        :param masks: list
        :param colors: list
        :return colored_img: ndarray
        """

    # Users must enter the exact same number of colors as classes they'd like to color
    num_classes = len(masks)
    num_colors = len(colors)
    if not num_classes == num_colors:
        fatal_error("The number of colors provided doesn't match the number of class masks provided.")

    # Check to make sure user provided at least one mask and color
    if len(colors) == 0 or len(masks) == 0:
        fatal_error("At least one class mask and color must be provided.")

    # Dictionary of colors and the BGR values, based on some of the colors listed here:
    # https://en.wikipedia.org/wiki/X11_color_names
    color_dict = {'white': (255,255,255), 'black': (0,0,0), 'aqua': (0,255,255), 'blue': (255,0,0), 'blue violet':
    (228,44,138), 'brown': (41,41,168), 'chartreuse': (0,255,128), 'dark blue': (140,0,0), 'gray': (169,169,169),
    'yellow': (0, 255, 255), 'turquoise': (210,210,64), 'red': (0, 0, 255), 'purple': (241,33,161), 'orange red':
    (0,69, 255), 'orange': (0,166,255), 'lime': (0, 255, 0), 'lime green': (52,205,52), 'fuchsia': (255,0,255),
    'crimson': (61,20,220), 'beige': (197,220,246), 'chocolate': (31,105,210), 'coral': (79,128,255), 'dark green':
    (0,100,0), 'dark orange': (0,140,255), 'green yellow': (46,255,174), 'light blue': (230,218,174), 'tomato':
    (72,100,255), 'slate gray': (143,128,113), 'gold': (0,215,255), 'goldenrod': (33,166,218), 'light green':
    (143,238,143), 'sea green': (77,141,46), 'dark red': (0,0,141), 'pink': (204,192,255), 'dark yellow': (0,205,255),
    'green': (0,255,0)}

    ix, iy = np.shape(masks[0])
    colored_img = np.zeros((ix,iy,3), dtype=np.uint8)
    # Assign pixels to the selected color

    for i in range(0, len(masks)):
        mask = np.copy(masks[i])
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        if isinstance(colors[i], tuple):
            mask[masks[i] > 0] = colors[i]
        elif isinstance(colors[i], str):
            mask[masks[i] > 0] = color_dict[colors[i]]
        else:
            fatal_error("All elements of the 'colors' list must be either str or tuple")
        colored_img = colored_img + mask


    if params.debug == 'print':
        print_image(colored_img, os.path.join(params.debug_outdir, str(params.device) + '_classes_plot.png'))
    elif params.debug == 'plot':
        plot_image(colored_img)

    return colored_img
Example #54
0
def white_balance(img, mode='hist', roi=None):
    """Corrects the exposure of an image based on its histogram.

    Inputs:
    img     = An RGB image on which to perform the correction, correction is done on each channel and then reassembled,
              alternatively a single channel can be input but is not recommended.
    mode    = 'hist or 'max'
    roi     = A list of 4 points (x, y, width, height) that form the rectangular ROI of the white color standard.
              If a list of 4 points is not given, whole image will be used.

    Returns:
    img     = Image after exposure correction

    :param img: numpy.ndarray
    :param mode: str
    :param roi: list
    :return finalcorrected: numpy.ndarray
    """
    params.device += 1

    ori_img = np.copy(img)

    if roi is not None:
        roiint = all(isinstance(item, (list, int)) for item in roi)

        if len(roi) != 4:
            fatal_error('If ROI is used ROI must have 4 elements as a list and all must be integers')
        elif roiint is False:
            fatal_error('If ROI is used ROI must have 4 elements as a list and all must be integers')
    else:
        pass

    if len(np.shape(img)) == 3:
        iy, ix, iz = np.shape(img)
        hmax = 255
        type = np.uint8
    else:
        iy, ix = np.shape(img)
        if img.dtype == 'uint8':
            hmax = 255
            type = np.uint8
        elif img.dtype == 'uint16':
            hmax = 65536
            type = np.uint16

    mask = np.zeros((iy, ix, 3), dtype=np.uint8)

    if roi is None:
        x = 0
        y = 0
        w = ix
        h = iy

    else:
        x = roi[0]
        y = roi[1]
        w = roi[2]
        h = roi[3]

    if len(np.shape(img)) == 3:
        cv2.rectangle(ori_img, (x, y), (x + w, y + h), (0, 255, 0), 3)
        c1 = img[:, :, 0]
        c2 = img[:, :, 1]
        c3 = img[:, :, 2]
        if mode.upper() == 'HIST':
            channel1 = _hist(c1, hmax, x, y, h, w, type)
            channel2 = _hist(c2, hmax, x, y, h, w, type)
            channel3 = _hist(c3, hmax, x, y, h, w, type)
        elif mode.upper() == 'MAX':
            channel1 = _max(c1, hmax, mask, x, y, h, w, type)
            channel2 = _max(c2, hmax, mask, x, y, h, w, type)
            channel3 = _max(c3, hmax, mask, x, y, h, w, type)
        else:
            fatal_error('Mode must be either "hist" or "max" but ' + mode + ' was input.')

        finalcorrected = np.dstack((channel1, channel2, channel3))

    else:
        cv2.rectangle(ori_img, (x, y), (x + w, y + h), (255, 255, 255), 3)
        if mode.upper() == 'HIST':
            finalcorrected = _hist(img, hmax, x, y, h, w, type)
        elif mode.upper() == 'MAX':
            finalcorrected = _max(img, hmax, mask, x, y, h, w, type)

    if params.debug == 'print':
        print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_whitebalance_roi.png'))
        print_image(finalcorrected, os.path.join(params.debug_outdir, str(params.device) + '_whitebalance.png'))

    elif params.debug == 'plot':
        plot_image(ori_img, cmap='gray')
        plot_image(finalcorrected, cmap='gray')

    return finalcorrected
def custom_range(rgb_img, lower_thresh, upper_thresh, channel='gray'):
    """Creates a thresholded image and mask from an RGB image and threshold values.

    Inputs:
    rgb_img      = RGB image data
    lower_thresh = List of lower threshold values (0-255)
    upper_thresh = List of upper threshold values (0-255)
    channel      = Color-space channels of interest (RGB, HSV, LAB, or gray)

    Returns:
    mask         = Mask, binary image
    masked_img   = Masked image, keeping the part of image of interest

    :param rgb_img: numpy.ndarray
    :param lower_thresh: list
    :param upper_thresh: list
    :param channel: str
    :return mask: numpy.ndarray
    :return masked_img: numpy.ndarray
    """

    # Auto-increment the device counter
    params.device += 1

    if channel.upper() == 'HSV':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error("If using the HSV colorspace, 3 thresholds are needed for both lower_thresh and " +
                        "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
                        "upper_thresh=255")

        # Convert the RGB image to HSV colorspace
        hsv_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)

        # Separate channels
        hue = hsv_img[:, :, 0]
        saturation = hsv_img[:, :, 1]
        value = hsv_img[:, :, 2]

        # Make a mask for each channel
        h_mask = cv2.inRange(hue, lower_thresh[0], upper_thresh[0])
        s_mask = cv2.inRange(saturation, lower_thresh[1], upper_thresh[1])
        v_mask = cv2.inRange(value, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=h_mask)
        result = cv2.bitwise_and(result, result, mask=s_mask)
        masked_img = cv2.bitwise_and(result, result, mask=v_mask)

        # Combine masks
        mask = cv2.bitwise_and(s_mask, h_mask)
        mask = cv2.bitwise_and(mask, v_mask)

    elif channel.upper() == 'RGB':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error("If using the RGB colorspace, 3 thresholds are needed for both lower_thresh and " +
                        "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
                        "upper_thresh=255")

        # Separate channels (pcv.readimage reads RGB images in as BGR)
        blue = rgb_img[:, :, 0]
        green = rgb_img[:, :, 1]
        red = rgb_img[:, :, 2]

        # Make a mask for each channel
        b_mask = cv2.inRange(blue, lower_thresh[0], upper_thresh[0])
        g_mask = cv2.inRange(green, lower_thresh[1], upper_thresh[1])
        r_mask = cv2.inRange(red, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=b_mask)
        result = cv2.bitwise_and(result, result, mask=g_mask)
        masked_img = cv2.bitwise_and(result, result, mask=r_mask)

        # Combine masks
        mask = cv2.bitwise_and(b_mask, g_mask)
        mask = cv2.bitwise_and(mask, r_mask)

    elif channel.upper() == 'LAB':

        # Check threshold inputs
        if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
            fatal_error("If using the LAB colorspace, 3 thresholds are needed for both lower_thresh and " +
                        "upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
                        "upper_thresh=255")

        # Convert the RGB image to LAB colorspace
        lab_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)

        # Separate channels (pcv.readimage reads RGB images in as BGR)
        lightness = lab_img[:, :, 0]
        green_magenta = lab_img[:, :, 1]
        blue_yellow = lab_img[:, :, 2]

        # Make a mask for each channel
        l_mask = cv2.inRange(lightness, lower_thresh[0], upper_thresh[0])
        gm_mask = cv2.inRange(green_magenta, lower_thresh[1], upper_thresh[1])
        by_mask = cv2.inRange(blue_yellow, lower_thresh[2], upper_thresh[2])

        # Apply the masks to the image
        result = cv2.bitwise_and(rgb_img, rgb_img, mask=l_mask)
        result = cv2.bitwise_and(result, result, mask=gm_mask)
        masked_img = cv2.bitwise_and(result, result, mask=by_mask)

        # Combine masks
        mask = cv2.bitwise_and(l_mask, gm_mask)
        mask = cv2.bitwise_and(mask, by_mask)

    elif channel.upper() == 'GRAY' or channel.upper() == 'GREY':

        # Check threshold input
        if not (len(lower_thresh) == 1 and len(upper_thresh) == 1):
            fatal_error("If useing a grayscale colorspace, 1 threshold is needed for both the " +
                        "lower_thresh and upper_thresh.")
        if len(np.shape(rgb_img))==3:
            # Convert RGB image to grayscale colorspace
            gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = rgb_img

        # Make a mask
        mask = cv2.inRange(gray_img, lower_thresh[0], upper_thresh[0])

        # Apply the masks to the image
        masked_img = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)

    else:
        fatal_error(str(channel) + " is not a valid colorspace. Channel must be either 'RGB', 'HSV', or 'gray'.")

    # Print or plot the binary image if debug is on
    if params.debug == 'print':
        print_image(masked_img, os.path.join(params.debug_outdir,
                                             str(params.device) + channel + 'custom_thresh.png'))
        print_image(mask, os.path.join(params.debug_outdir,
                                       str(params.device) + channel + 'custom_thresh_mask.png'))
    elif params.debug == 'plot':
        plot_image(masked_img)
        plot_image(mask)

    return mask, masked_img
def triangle(gray_img, max_value, object_type="light", xstep=1):
    """Creates a binary image from a grayscale image using Zack et al.'s (1977) thresholding.

    Inputs:
    gray_img     = Grayscale image data
    max_value    = value to apply above threshold (usually 255 = white)
    object_type  = "light" or "dark" (default: "light")
                   - If object is lighter than the background then standard thresholding is done
                   - If object is darker than the background then inverse thresholding is done
    xstep        = value to move along x-axis to determine the points from which to calculate distance recommended to
                   start at 1 and change if needed)

    Returns:
    bin_img      = Thresholded, binary image

    :param gray_img: numpy.ndarray
    :param max_value: int
    :param object_type: str
    :param xstep: int
    :return bin_img: numpy.ndarray
    """
    params.device += 1

    # Calculate automatic threshold value based on triangle algorithm
    hist = cv2.calcHist([gray_img], [0], None, [256], [0, 255])

    # Make histogram one array
    newhist = []
    for item in hist:
        newhist.extend(item)

    # Detect peaks
    show = False
    if params.debug == "plot":
        show = True
    ind = _detect_peaks(newhist, mph=None, mpd=1, show=show)

    # Find point corresponding to highest peak
    # Find intensity value (y) of highest peak
    max_peak_int = max(list(newhist[i] for i in ind))
    # Find value (x) of highest peak
    max_peak = [i for i, x in enumerate(newhist) if x == max(newhist)]
    # Combine x,y
    max_peak_xy = [max_peak[0], max_peak_int]

    # Find final point at end of long tail
    end_x = len(newhist) - 1
    end_y = newhist[end_x]
    end_xy = [end_x, end_y]

    # Define the known points
    points = [max_peak_xy, end_xy]
    x_coords, y_coords = zip(*points)

    # Get threshold value
    peaks = []
    dists = []

    for i in range(x_coords[0], x_coords[1], xstep):
        distance = (((x_coords[1] - x_coords[0]) * (y_coords[0] - hist[i])) -
                    ((x_coords[0] - i) * (y_coords[1] - y_coords[0]))) / math.sqrt(
            (float(x_coords[1]) - float(x_coords[0])) *
            (float(x_coords[1]) - float(x_coords[0])) +
            ((float(y_coords[1]) - float(y_coords[0])) *
             (float(y_coords[1]) - float(y_coords[0]))))
        peaks.append(i)
        dists.append(distance)
    autothresh = [peaks[x] for x in [i for i, x in enumerate(list(dists)) if x == max(list(dists))]]
    autothreshval = autothresh[0]

    # Set the threshold method
    threshold_method = ""
    if object_type.upper() == "LIGHT":
        threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
    elif object_type.upper() == "DARK":
        threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
    else:
        fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')

    # Threshold the image
    bin_img = _call_threshold(gray_img, autothreshval, max_value, threshold_method, "_triangle_threshold_")

    # Additional figures created by this method, if debug is on
    if params.debug is not None:
        if params.debug == 'print':
            plt.plot(hist)
            plt.title('Threshold value = {t}'.format(t=autothreshval))
            plt.axis([0, 256, 0, max(hist)])
            plt.grid(True)
            fig_name_hist = os.path.join(params.debug_outdir,
                                         str(params.device) + '_triangle_thresh_hist_' + str(autothreshval) + ".png")
            # write the figure to current directory
            plt.savefig(fig_name_hist, dpi=params.dpi)
            # close pyplot plotting window
            plt.clf()
        elif params.debug == 'plot':
            print('Threshold value = {t}'.format(t=autothreshval))
            plt.plot(hist)
            plt.axis([0, 256, 0, max(hist)])
            plt.grid(True)
            plt.show()

    return bin_img
def report_size_marker_area(img, roi_contour, roi_hierarchy, marker='define', objcolor='dark', thresh_channel=None,
                            thresh=None):
    """Detects a size marker in a specified region and reports its size and eccentricity

    Inputs:
    img             = An RGB or grayscale image to plot the marker object on
    roi_contour     = A region of interest contour (e.g. output from pcv.roi.rectangle or other methods)
    roi_hierarchy   = A region of interest contour hierarchy (e.g. output from pcv.roi.rectangle or other methods)
    marker          = 'define' or 'detect'. If define it means you set an area, if detect it means you want to
                      detect within an area
    objcolor        = Object color is 'dark' or 'light' (is the marker darker or lighter than the background)
    thresh_channel  = 'h', 's', or 'v' for hue, saturation or value
    thresh          = Binary threshold value (integer)

    Returns:
    analysis_images = List of output images

    :param img: numpy.ndarray
    :param roi_contour: list
    :param roi_hierarchy: numpy.ndarray
    :param marker: str
    :param objcolor: str
    :param thresh_channel: str
    :param thresh: int
    :return: analysis_images: list
    """

    params.device += 1
    # Make a copy of the reference image
    ref_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ref_img)) == 2:
        ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)

    # Marker components
    # If the marker type is "defined" then the marker_mask and marker_contours are equal to the input ROI
    # Initialize a binary image
    roi_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8)
    # Draw the filled ROI on the mask
    cv2.drawContours(roi_mask, roi_contour, -1, (255), -1)
    marker_mask = []
    marker_contour = []

    # If the marker type is "detect" then we will use the ROI to isolate marker contours from the input image
    if marker.upper() == 'DETECT':
        # We need to convert the input image into an one of the HSV channels and then threshold it
        if thresh_channel is not None and thresh is not None:
            # Mask the input image
            masked = apply_mask(rgb_img=ref_img, mask=roi_mask, mask_color="black")
            # Convert the masked image to hue, saturation, or value
            marker_hsv = rgb2gray_hsv(rgb_img=masked, channel=thresh_channel)
            # Threshold the HSV image
            marker_bin = binary_threshold(gray_img=marker_hsv, threshold=thresh, max_value=255, object_type=objcolor)
            # Identify contours in the masked image
            contours, hierarchy = find_objects(img=ref_img, mask=marker_bin)
            # Filter marker contours using the input ROI
            kept_contours, kept_hierarchy, kept_mask, obj_area = roi_objects(img=ref_img, object_contour=contours,
                                                                             obj_hierarchy=hierarchy,
                                                                             roi_contour=roi_contour,
                                                                             roi_hierarchy=roi_hierarchy,
                                                                             roi_type="partial")
            # If there are more than one contour detected, combine them into one
            # These become the marker contour and mask
            marker_contour, marker_mask = object_composition(img=ref_img, contours=kept_contours,
                                                             hierarchy=kept_hierarchy)
        else:
            fatal_error('thresh_channel and thresh must be defined in detect mode')
    elif marker.upper() == "DEFINE":
        # Identify contours in the masked image
        contours, hierarchy = find_objects(img=ref_img, mask=roi_mask)
        # If there are more than one contour detected, combine them into one
        # These become the marker contour and mask
        marker_contour, marker_mask = object_composition(img=ref_img, contours=contours, hierarchy=hierarchy)
    else:
        fatal_error("marker must be either 'define' or 'detect' but {0} was provided.".format(marker))

    # Calculate the moments of the defined marker region
    m = cv2.moments(marker_mask, binaryImage=True)
    # Calculate the marker area
    marker_area = m['m00']

    # Fit a bounding ellipse to the marker
    center, axes, angle = cv2.fitEllipse(marker_contour)
    major_axis = np.argmax(axes)
    minor_axis = 1 - major_axis
    major_axis_length = axes[major_axis]
    minor_axis_length = axes[minor_axis]
    # Calculate the bounding ellipse eccentricity
    eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)

    # Make a list to store output images
    analysis_image = []
    cv2.drawContours(ref_img, marker_contour, -1, (255, 0, 0), 5)
    # out_file = os.path.splitext(filename)[0] + '_sizemarker.jpg'
    # print_image(ref_img, out_file)
    analysis_image.append(ref_img)
    if params.debug is 'print':
        print_image(ref_img, os.path.join(params.debug_outdir, str(params.device) + '_marker_shape.png'))
    elif params.debug is 'plot':
        plot_image(ref_img)

    outputs.add_observation(variable='marker_area', trait='marker area',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=marker_area, label='pixels')
    outputs.add_observation(variable='marker_ellipse_major_axis', trait='marker ellipse major axis length',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=major_axis_length, label='pixels')
    outputs.add_observation(variable='marker_ellipse_minor_axis', trait='marker ellipse minor axis length',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=minor_axis_length, label='pixels')
    outputs.add_observation(variable='marker_ellipse_eccentricity', trait='marker ellipse eccentricity',
                            method='plantcv.plantcv.report_size_marker_area', scale='none', datatype=float,
                            value=eccentricity, label='none')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
def apply_transformation_matrix(source_img, target_img, transformation_matrix):
    """ Apply the transformation matrix to the source_image.

    Inputs:
    source_img      = an RGB image to be corrected to the target color space
    target_img      = an RGB image with the target color space
    transformation_matrix        = a 9x9 matrix of tranformation coefficients

    Outputs:
    corrected_img    = an RGB image in correct color space

    :param source_img: numpy.ndarray
    :param target_img: numpy.ndarray
    :param transformation_matrix: numpy.ndarray
    :return corrected_img: numpy.ndarray
    """
    # check transformation_matrix for 9x9
    if np.shape(transformation_matrix) != (9, 9):
        fatal_error("transformation_matrix must be a 9x9 matrix of transformation coefficients.")
    # Check for RGB input
    if len(np.shape(source_img)) != 3:
        fatal_error("Source_img is not an RGB image.")

    # Autoincrement the device counter
    params.device += 1

    # split transformation_matrix
    red, green, blue, red2, green2, blue2, red3, green3, blue3 = np.split(transformation_matrix, 9, 1)

    # find linear, square, and cubic values of source_img color channels
    source_b, source_g, source_r = cv2.split(source_img)
    source_b2 = np.square(source_b)
    source_b3 = np.power(source_b, 3)
    source_g2 = np.square(source_g)
    source_g3 = np.power(source_g, 3)
    source_r2 = np.square(source_r)
    source_r3 = np.power(source_r, 3)

    # apply linear model to source color channels
    b = 0 + source_r * blue[0] + source_g * blue[1] + source_b * blue[2] + source_r2 * blue[3] + source_g2 * blue[
        4] + source_b2 * blue[5] + source_r3 * blue[6] + source_g3 * blue[7] + source_b3 * blue[8]
    g = 0 + source_r * green[0] + source_g * green[1] + source_b * green[2] + source_r2 * green[3] + source_g2 * green[
        4] + source_b2 * green[5] + source_r3 * green[6] + source_g3 * green[7] + source_b3 * green[8]
    r = 0 + source_r * red[0] + source_g * red[1] + source_b * red[2] + source_r2 * red[3] + source_g2 * red[
        4] + source_b2 * red[5] + source_r3 * red[6] + source_g3 * red[7] + source_b3 * red[8]

    # merge corrected color channels onto source_image
    bgr = [b, g, r]
    corrected_img = cv2.merge(bgr)

    # round corrected_img elements to be within range and of the correct data type
    corrected_img = np.rint(corrected_img)
    corrected_img[np.where(corrected_img > 255)] = 255
    corrected_img = corrected_img.astype(np.uint8)

    if params.debug == "print":
        # If debug is print, save the image to a file
        print_image(corrected_img, os.path.join(params.debug_outdir, str(params.device) + "_corrected.png"))
    elif params.debug == "plot":
        # If debug is plot, print a horizontal view of source_img, corrected_img, and target_img to the plotting device
        # plot horizontal comparison of source_img, corrected_img (with rounded elements) and target_img
        plot_image(np.hstack([source_img, corrected_img, target_img]))

    # return corrected_img
    return corrected_img
def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark'):
    """Automatically detects a color card and output info to use in create_color_card_mask function

    Inputs:
    rgb_img        = Input RGB image data containing a color card.
    threshold      = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
    threshvalue    = Thresholding value, optional (default 125)
    blurry         = Bool (default False) if True then image sharpening applied
    background     = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
                        expansion applied to better detect edges, but histogram expansion will be hindered if there
                        is a dark background

    Returns:
    df             = Dataframe containing information about the filtered contours
    start_coord    = Two element tuple of starting coordinates, location of the top left pixel detected
    spacing        = Two element tuple of spacing between centers of chips

    :param rgb_img: numpy.ndarray
    :param threshold: str
    :param threshvalue: int
    :param blurry: bool
    :param background: str
    :return df: pandas.core.frame.DataFrame
    :return start_coord: tuple
    :return spacing: tuple
    """
    # Imports
    import skimage
    import pandas as pd
    from scipy.spatial.distance import squareform, pdist

    # Get image attributes
    height, width, channels = rgb_img.shape
    totalpx = float(height * width)

    # Minimum and maximum square size based upon 12 MP image
    minarea = 1000. / 12000000. * totalpx
    maxarea = 8000000. / 12000000. * totalpx

    # Create gray image for further processing
    gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)

    # Laplacian Fourier Transform detection of blurriness
    blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()

    # If image is blurry then try to deblur using kernel
    if blurry:
        # from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
        kernel = np.array([[-1, -1, -1, -1, -1],
                           [-1, 2, 2, 2, -1],
                           [-1, 2, 8, 2, -1],
                           [-1, 2, 2, 2, -1],
                           [-1, -1, -1, -1, -1]]) / 8.0
        # Store result back out for further processing
        gray_img = cv2.filter2D(gray_img, -1, kernel)

    # In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
    # thresholding. If your image has a bright background then apply
    if background == 'light':
        clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
        # apply CLAHE histogram expansion to find squares better with canny edge detection
        gray_img = clahe.apply(gray_img)
    elif background != 'dark':
        fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')

    # Thresholding
    if threshold_type == "otsu":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
        ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    elif threshold_type == "normal":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
        ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
    elif threshold_type == "adaptgauss":
        # Blur slightly so defects on card squares and background patterns are less likely to be picked up
        gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
        threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                          cv2.THRESH_BINARY_INV, 51, 2)
    else:
        fatal_error('Threshold ' + str(threshold_type) + ' is not "otsu", "normal", or "adaptgauss"!')

    # Apply automatic Canny edge detection using the computed median
    edges = skimage.feature.canny(threshold)
    edges.dtype = 'uint8'

    # Compute contours to find the squares of the card
    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
    # Variable of which contour is which
    mindex = []
    # Variable to store moments
    mu = []
    # Variable to x,y coordinates in tuples
    mc = []
    # Variable to x coordinate as integer
    mx = []
    # Variable to y coordinate as integer
    my = []
    # Variable to store area
    marea = []
    # Variable to store whether something is a square (1) or not (0)
    msquare = []
    # Variable to store square approximation coordinates
    msquarecoords = []
    # Variable to store child hierarchy element
    mchild = []
    # Fitted rectangle height
    mheight = []
    # Fitted rectangle width
    mwidth = []
    # Ratio of height/width
    mwhratio = []

    # Extract moments from contour image
    for x in range(0, len(contours)):
        mu.append(cv2.moments(contours[x]))
        marea.append(cv2.contourArea(contours[x]))
        mchild.append(int(hierarchy[0][x][2]))
        mindex.append(x)

    # Cycle through moment data and compute location for each moment
    for m in mu:
        if m['m00'] != 0:  # This is the area term for a moment
            mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
            mx.append(int(m['m10'] / m['m00']))
            my.append(int(m['m01'] / m['m00']))
        else:
            mc.append((0, 0))
            mx.append((0))
            my.append((0))

    # Loop over our contours and extract data about them
    for index, c in enumerate(contours):
        # Area isn't 0, but greater than min-area and less than max-area
        if marea[index] != 0 and minarea < marea[index] < maxarea:
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.1 * peri, True)
            center, wh, angle = cv2.minAreaRect(c)  # Rotated rectangle
            mwidth.append(wh[0])
            mheight.append(wh[1])
            mwhratio.append(wh[0] / wh[1])
            msquare.append(len(approx))
            # If the approx contour has 4 points then we can assume we have 4-sided objects
            if len(approx) == 4 or len(approx) == 5:
                msquarecoords.append(approx)
            else:  # It's not square
                #msquare.append(0)
                msquarecoords.append(0)
        else:  # Contour has area of 0, not interesting
            msquare.append(0)
            msquarecoords.append(0)
            mwidth.append(0)
            mheight.append(0)
            mwhratio.append(0)

    # Make a pandas df from data for filtering out junk
    locarea = {'index': mindex, 'X': mx, 'Y': my, 'width': mwidth, 'height': mheight, 'WHratio': mwhratio,
               'Area': marea, 'square': msquare, 'child': mchild}
    df = pd.DataFrame(locarea)

    # Add calculated blur factor to output
    df['blurriness'] = blurfactor

    # Filter df for attributes that would isolate squares of reasonable size
    df = df[(df['Area'] > minarea) & (df['Area'] < maxarea) & (df['child'] != -1) &
            (df['square'].isin([4, 5])) & (df['WHratio'] < 1.2) & (df['WHratio'] > 0.85)]

    # Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
    df = df[~(df['index'].isin(df['index'] + 1))]

    # Count up squares that are within a given radius, more squares = more likelihood of them being the card
    # Median width of square time 2.5 gives proximity radius for searching for similar squares
    median_sq_width_px = df["width"].median()

    # Squares that are within 6 widths of the current square
    pixeldist = median_sq_width_px * 6
    # Computes euclidean distance matrix for the x and y contour centroids
    distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
    # Add up distances that are less than  ones have distance less than pixeldist pixels
    distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)

    # Append distprox summary to dataframe
    df = df.assign(distprox=distmatrixflat.values)

    # Compute how similar in area the squares are. lots of similar values indicates card
    # isolate area measurements
    filtered_area = df['Area']
    # Create empty matrix for storing comparisons
    sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
    # Double loop through all areas to compare to each other
    for p in range(0, len(filtered_area)):
        for o in range(0, len(filtered_area)):
            big = max(filtered_area.iloc[p], filtered_area.iloc[o])
            small = min(filtered_area.iloc[p], filtered_area.iloc[o])
            pct = 100. * (small / big)
            sizecomp[p][o] = pct

    # How many comparisons given 90% square similarity
    sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)

    # Append sizeprox summary to dataframe
    df = df.assign(sizeprox=sizematrix.values)

    # Reorder dataframe for better printing
    df = df[['index', 'X', 'Y', 'width', 'height', 'WHratio', 'Area', 'square', 'child',
             'blurriness', 'distprox', 'sizeprox']]

    # Loosely filter for size and distance (relative size to median)
    minsqwidth = median_sq_width_px * 0.80
    maxsqwidth = median_sq_width_px * 1.2
    df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
            (df['width'] < maxsqwidth)]

    # Filter for proximity again to root out stragglers
    # Find and count up squares that are within given radius,
    # more squares = more likelihood of them being the card
    # Median width of square time 2.5 gives proximity radius for searching for similar squares
    median_sq_width_px = df["width"].median()

    # Squares that are within 6 widths of the current square
    pixeldist = median_sq_width_px * 5
    # Computes euclidean distance matrix for the x and y contour centroids
    distmatrix = pd.DataFrame(squareform(pdist(df[['X', 'Y']])))
    # Add up distances that are less than  ones have distance less than pixeldist pixels
    distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)

    # Append distprox summary to dataframe
    df = df.assign(distprox=distmatrixflat.values)

    # Filter results for distance proximity to other squares
    df = df[(df['distprox'] >= 4)]
    # Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs:
    df['X'] = pd.to_numeric(df['X'], errors='coerce')
    df['Y'] = pd.to_numeric(df['Y'], errors='coerce')

    # Remove NaN
    df = df.dropna()


    if df['X'].min() is np.nan or df['Y'].min() is np.nan:
        fatal_error('No color card found under current parameters')
    else:
        # Extract the starting coordinate
        start_coord = (df['X'].min(), df['Y'].min())

        # start_coord = (int(df['X'].min()), int(df['Y'].min()))
        # Calculate the range
        spacingx_short = (df['X'].max() - df['X'].min()) / 3
        spacingy_short = (df['Y'].max() - df['Y'].min()) / 3
        spacingx_long = (df['X'].max() - df['X'].min()) / 5
        spacingy_long = (df['Y'].max() - df['Y'].min()) / 5
        # Chip spacing since 4x6 card assumed
        spacing_short = min(spacingx_short, spacingy_short)
        spacing_long = max(spacingx_long, spacingy_long)
        # Smaller spacing measurement might have a chip missing
        spacing = int(max(spacing_short, spacing_long))
        spacing = (spacing, spacing)

    return df, start_coord, spacing