def main(path, imagename):
    args = {'names': 'names.txt', 'outdir': './output-images'}
    #Read image
    img1, path, filename = pcv.readimage(path + imagename, "native")
    #pcv.params.debug=args['debug']
    #img1 = pcv.white_balance(img,roi=(400,800,200,200))
    #img1 = cv2.resize(img1,(4000,2000))
    shift1 = pcv.shift_img(img1, 10, 'top')
    img1 = shift1
    a = pcv.rgb2gray_lab(img1, 'a')
    img_binary = pcv.threshold.binary(a, 120, 255, 'dark')
    fill_image = pcv.fill(img_binary, 10)
    dilated = pcv.dilate(fill_image, 1, 1)
    id_objects, obj_hierarchy = pcv.find_objects(img1, dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(4000, 2000, -2000, -4000,
                                                   img1)
    #print(roi_contour)
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy)
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img1, roi_objects, roi_obj_hierarchy, 1, 4)
    '''
	pcv.params.debug = "print"'''
    out = args['outdir']
    names = args['names']
    output_path = pcv.cluster_contour_splitimg(img1,
                                               clusters_i,
                                               contours,
                                               hierarchies,
                                               out,
                                               file=filename,
                                               filenames=names)
    def read_dot(self, imageread):

        img1 = imageread
        device, img1gray = pcv.rgb2gray(img1, 0)
        img1hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
        dev, img_binary = pcv.binary_threshold(img1gray, 1, 255, 'light', 0)
        device, id_objects, obj_hierarchy = pcv.find_objects(
            img1, img_binary, 0)

        device, roi, roi_hierarchy = pcv.define_roi(img1,
                                                    shape="rectangle",
                                                    device=device,
                                                    roi_input="default",
                                                    adjust=False,
                                                    x_adj=600,
                                                    y_adj=600,
                                                    w_adj=1200,
                                                    h_adj=1200)
        device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
            img1, "partial", roi, roi_hierarchy, id_objects, obj_hierarchy,
            device)

        device, clusters_i, contours, obj_hierarchy = pcv.cluster_contours(
            device=device,
            img=img1,
            roi_objects=roi_objects,
            roi_obj_hierarchy=roi_obj_hierarchy,
            nrow=1,
            ncol=int(3))
        dotQ = list()
        obj_hierarchy = obj_hierarchy[0]
        for clusters1 in clusters_i:
            for contourtocheck in clusters1:
                if not obj_hierarchy[contourtocheck][2] == -1:
                    if not cv2.contourArea(
                            contours[obj_hierarchy[contourtocheck][2]]) >= 20:
                        obj_hierarchy[contourtocheck][2] = -1
        counter = 0
        for foundcontour in clusters_i:
            hierarchycontours = [obj_hierarchy[j][2] for j in foundcontour]
            if not all([bool(j == -1) for j in hierarchycontours]):
                dotQ.append(True)
            else:
                dotQ.append(False)
        return dotQ
Example #3
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    pcv.params.debug=args.debug #set debug mode

    # STEP 1: Check if this is a night image, for some of these dataset's images were captured
    # at night, even if nothing is visible. To make sure that images are not taken at
    # night we check that the image isn't mostly dark (0=black, 255=white).
    # if it is a night image it throws a fatal error and stops the workflow.

    if np.average(img) < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    # STEP 2: Normalize the white color so you can later
    # compare color between images.
    # Inputs:
    #   img = image object, RGB colorspace
    #   roi = region for white reference, if none uses the whole image,
    #         otherwise (x position, y position, box width, box height)

    # white balance image based on white toughspot

    #img1 = pcv.white_balance(img=img,roi=(400,800,200,200))
    img1 = pcv.white_balance(img=img, mode='hist', roi=None)

    # STEP 3: Rotate the image
    # Inputs:
    #   img = image object, RGB color space
    #   rotation_deg = Rotation angle in degrees, can be negative, positive values 
    #                  will move counter-clockwise 
    #   crop = If True then image will be cropped to original image dimensions, if False
    #          the image size will be adjusted to accommodate new image dimensions 


    rotate_img = pcv.rotate(img=img1,rotation_deg=-1, crop=False)

    # STEP 4: Shift image. This step is important for clustering later on.
    # For this image it also allows you to push the green raspberry pi camera
    # out of the image. This step might not be necessary for all images.
    # The resulting image is the same size as the original.
    # Inputs:
    #   img    = image object
    #   number = integer, number of pixels to move image
    #   side   = direction to move from "top", "bottom", "right","left"

    shift1 = pcv.shift_img(img=img1, number=300, side='top')
    img1 = shift1

    # STEP 5: Convert image from RGB colorspace to LAB colorspace
    # Keep only the green-magenta channel (grayscale)
    # Inputs:
    #    img     = image object, RGB colorspace
    #    channel = color subchannel ('l' = lightness, 'a' = green-magenta , 'b' = blue-yellow)

    #a = pcv.rgb2gray_lab(img=img1, channel='a')
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')

    # STEP 6: Set a binary threshold on the saturation channel image
    # Inputs:
    #    img         = img object, grayscale
    #    threshold   = threshold value (0-255)
    #    max_value   = value to apply above threshold (usually 255 = white)
    #    object_type = light or dark
    #       - If object is light then standard thresholding is done
    #       - If object is dark then inverse thresholding is done

    img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type='dark')
    #img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type'dark')
    #                                                   ^
    #                                                   |
    #                                     adjust this value

    # STEP 7: Fill in small objects (speckles)
    # Inputs:
    #    bin_img  = image object, binary. img will be returned after filling
    #    size = minimum object area size in pixels (integer)

    fill_image = pcv.fill(bin_img=img_binary, size=10)
    #                                          ^
    #                                          |
    #                           adjust this value

    # STEP 8: Dilate so that you don't lose leaves (just in case)
    # Inputs:
    #    img    = input image
    #    ksize  = kernel size
    #    i      = iterations, i.e. number of consecutive filtering passes

    #dilated = pcv.dilate(img=fill_image, ksize=1, i=1)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)

    # STEP 9: Find objects (contours: black-white boundaries)
    # Inputs:
    #    img  = image that the objects will be overlayed
    #    mask = what is used for object detection

    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    #id_objects, obj_hierarchy = pcv.find_objects(gray_img, mask)

    # STEP 10: Define region of interest (ROI)
    # Inputs:
    #    img       = img to overlay roi
    #    x_adj     = adjust center along x axis
    #    y_adj     = adjust center along y axis
    #    h_adj     = adjust height
    #    w_adj     = adjust width
    # roi_contour, roi_hierarchy = pcv.roi.rectangle(img1, 10, 500, -10, -100)
    #                                                      ^                ^
    #                                                      |________________|
    #                                            adjust these four values

    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1, x=200, y=190, h=2000, w=3000)

    # STEP 11: Keep objects that overlap with the ROI
    # Inputs:
    #    img            = img to display kept objects
    #    roi_contour    = contour of roi, output from any ROI function
    #    roi_hierarchy  = contour of roi, output from any ROI function
    #    object_contour = contours of objects, output from "Identifying Objects" function
    #    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" function
    #    roi_type       = 'partial' (default, for partially inside), 'cutto', or 'largest' (keep only largest contour)

    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img1, roi_contour=roi_contour, 
                                                                          roi_hierarchy=roi_hierarchy,
                                                                          object_contour=id_objects,
                                                                          obj_hierarchy=obj_hierarchy, 
                                                                          roi_type='partial')

    # STEP 12: This function take a image with multiple contours and
    # clusters them based on user input of rows and columns

    # Inputs:
    #    img               = An RGB image
    #    roi_objects       = object contours in an image that are needed to be clustered.
    #    roi_obj_hierarchy = object hierarchy
    #    nrow              = number of rows to cluster (this should be the approximate  number of desired rows in the entire image even if there isn't a literal row of plants)
    #    ncol              = number of columns to cluster (this should be the approximate number of desired columns in the entire image even if there isn't a literal row of plants)
    #    show_grid         = if True then a grid gets displayed in debug mode (default show_grid=False)

    clusters_i, contours, hierarchies = pcv.cluster_contours(img=img1, roi_objects=roi_objects, 
                                                             roi_obj_hierarchy=roi_obj_hierarchy, 
                                                             nrow=2, ncol=3)

    # STEP 13: This function takes clustered contours and splits them into multiple images,
    # also does a check to make sure that the number of inputted filenames matches the number
    # of clustered contours. If no filenames are given then the objects are just numbered
    # Inputs:
    #    img                     = ideally a masked RGB image.
    #    grouped_contour_indexes = output of cluster_contours, indexes of clusters of contours
    #    contours                = contours to cluster, output of cluster_contours
    #    hierarchy               = object hierarchy
    #    outdir                  = directory for output images
    #    file                    = the name of the input image to use as a base name , output of filename from read_image function
    #    filenames               = input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes)

    # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11)
    pcv.params.debug = "print"

    out = args.outdir
    names = args.names

    output_path, imgs, masks = pcv.cluster_contour_splitimg(rgb_img=img1, grouped_contour_indexes=clusters_i, 
                                                            contours=contours, hierarchy=hierarchies, 
                                                            outdir=out, file=filename, filenames=names)
def draw_plot(x_start, y_start, x_end, y_end, reference_file, save_file):
    """
		Utilizes plantcv (citation below) to count the green pixels (Chlorophyll) of wells containg plants in a 4x6 grid format of the selected tray.
		
		Outputs
		-------
		A csv file containing the green pixel count for each well containing plants within the grid 
		
		Parameters
		----------
		x_start : int
			Contains the x coordinate of the top left of the user selection
		y_start : int
			Contains the y coordinate of the top left of the user selection
		x_end : int
			Contains the x coordinate of the bottom right of the user selection
		y_end : int
			Contains the y coordinate of the bottom right of the user selection
		reference_file : str
			A txt file containing the names of each well of the tray
		save_file : str
			A csv file to output the green pixel count for each well of the tray
		
		Citation
		--------
		Fahlgren N, Feldman M, Gehan MA, Wilson MS, Shyu C, Bryant DW, Hill ST, McEntee CJ, Warnasooriya SN, Kumar I, Ficor T, Turnipseed S, Gilbert KB, Brutnell TP, Carrington JC, Mockler TC, Baxter I. (2015) A versatile phenotyping system and analytics platform reveals diverse temporal responses to water availability in Setaria. Molecular Plant 8: 1520-1535. http://doi.org/10.1016/j.molp.2015.06.005
		
		Website Link
		------------
		https://plantcv.readthedocs.io/en/stable/
	"""

    # Resize x,y values from the resized image to the initial raw image x,y coordinates for an accurate count on pixels
    x_start = x_start * img_width / dim[0]
    y_start = y_start * img_height / dim[1]
    x_end = x_end * img_width / dim[0]
    y_end = y_end * img_height / dim[1]

    # Crop raw image to selection window
    cropped = pcv.crop(img,
                       x=int(x_start),
                       y=int(y_start),
                       h=int(y_end - y_start),
                       w=int(x_end - x_start))

    # Debug code to display cropped image. Uncomment to see cropped window
    #cropbytes = cv.imencode('.png', cropped)[1].tobytes()
    #graph.DrawImage(data=cropbytes, location=(0, 0))

    # Utilize plantcv code to count green pixels within selection window
    # For further information see : https://plantcv.readthedocs.io/en/latest/multi-plant_tutorial/
    img1 = pcv.white_balance(img=cropped, roi=(0, 0, 50, 50))
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')
    img_binary = pcv.threshold.binary(gray_img=a,
                                      threshold=115,
                                      max_value=255,
                                      object_type='dark')
    fill_image = pcv.fill(bin_img=img_binary, size=80)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=0,
                                                   y=0,
                                                   h=int(y_end - y_start),
                                                   w=int(x_end - x_start))
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=img1,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img=img1,
        roi_objects=roi_objects,
        roi_obj_hierarchy=roi_obj_hierarchy,
        nrow=4,
        ncol=6,
        show_grid=True)
    output_path, imgs, masks = pcv.cluster_contour_splitimg(
        img1,
        grouped_contour_indexes=clusters_i,
        contours=contours,
        hierarchy=hierarchies,
        file=filename,
        filenames=reference_file)

    # Save green pixel count for each well of the tray to a csv file using the reference file to name each well
    results = []
    for f in range(len(imgs)):
        color_histogram = pcv.analyze_color(rgb_img=imgs[f],
                                            mask=kept_mask,
                                            hist_plot_type='rgb')

        # Access data stored out from analyze_color
        hue_circular_mean = pcv.outputs.observations['green_frequencies'][
            'value']

        result = [output_path[f].split('_')[1], np.trapz(hue_circular_mean)]
        results.append(result)

    with open(save_file, "w", newline="") as fil:
        writer = csv.writer(fil)
        writer.writerows(results)
        sg.Popup('Finished Analysis! Please see the .csv file for results!')
# This function take a image with multiple contours and
# clusters them based on user input of rows and columns

# Inputs:
#    img               = An RGB or grayscale image
#    roi_objects       = object contours in an image that are needed to be clustered.
#    roi_obj_hierarchy = object hierarchy
#    nrow              = number of rows to cluster (this should be the approximate  number of
#                        desired rows in the entire image even if there isn't a literal row of plants)
#    ncol              = number of columns to cluster (this should be the approximate number of
#                        desired columns in the entire image even if there isn't a literal row of plants)
#    show_grid         = if True then the grid is drawn on the image, default show_grid=False
clusters_i, contours, hierarchies = pcv.cluster_contours(
    img=img1,
    roi_objects=roi_objects,
    roi_obj_hierarchy=roi_obj_hierarchy,
    nrow=16,
    ncol=29,
    show_grid=True)

# In[30]:

# OPTIONAL: For debugging or for making figures

# The image is relatively small, decrease the global line thickness parameter
pcv.params.line_thickness = 2

# Plot to visualize what pieces of plant got grouped together.

# Inputs:
#   img - RGB or grayscale image data for plotting
    def read_image_obsolete(self, imagearray):
        """***Do Not Use***Translate the picture of the dots into a tray number assignment."""
        ###Preprocessing & Object Finding Steps

        device, img1gray = pcv.rgb2gray(imagearray, 0)
        device, img_binary = pcv.binary_threshold(img1gray, 1, 255, "light", 0)
        device, id_objects, obj_hierarchy = pcv.find_objects(
            imagearray, img_binary, 0)
        device, roi, roi_hierarchy = pcv.define_roi(imagearray,
                                                    shape="rectangle",
                                                    device=device,
                                                    roi_input="default",
                                                    adjust=False,
                                                    x_adj=600,
                                                    y_adj=600,
                                                    w_adj=1200,
                                                    h_adj=1200)
        device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
            imagearray, "partial", roi, roi_hierarchy, id_objects,
            obj_hierarchy, device)
        device, clusters_i, contours, obj_hierarchy = pcv.cluster_contours(
            device=device,
            img=imagearray,
            roi_objects=roi_objects,
            roi_obj_hierarchy=roi_obj_hierarchy,
            nrow=1,
            ncol=int(3))
        out = "/Users/gghosal/Desktop/gaurav/res"
        device, output_path = pcv.cluster_contour_splitimg(
            device=device,
            img=imagearray,
            grouped_contour_indexes=clusters_i,
            contours=contours,
            hierarchy=obj_hierarchy,
            outdir=out)
        obj_hierarchy = obj_hierarchy[0]
        centroids = list()
        totalcontours = list()
        #print(len(contours))
        for clusters1 in clusters_i:
            totalcontour = contours[clusters1[0]]
            for j in clusters1[1:]:
                totalcontour = np.concatenate((contours[j], totalcontour),
                                              axis=0)
            totalcontours.append(totalcontour)
            mmt = cv2.moments(totalcontour)
            ycoords = int(mmt['m10'] / mmt['m00'])
            xcoords = int(mmt['m01'] / mmt['m00'])
            #cv2.circle(img1, (ycoords, xcoords), 3, (255, 0, 0), 3)

            centroids.append(tuple([ycoords, xcoords]))
        count11 = 0
        for clusters1 in clusters_i:
            for contourtocheck in clusters1:
                if not obj_hierarchy[contourtocheck][2] == -1:
                    if not cv2.contourArea(
                            clusters_i[obj_hierarchy[contourtocheck][2]]) >= 5:
                        obj_hierarchy[contourtocheck][2] = -1
        dotlist = list()
        for foundcontour in clusters_i:
            hierarchycontours = [obj_hierarchy[j][2] for j in foundcontour]
            if not all([bool(j == -1) for j in hierarchycontours]):
                dotlist.append(True)
            else:
                dotlist.append(False)
        colors = self.read_colors(out)
        for pnr in range(3):
            dot_characteristics.append(tuple((colors[pnr], dotlist[pnr])))
        try:
            return self.translator[self.translate(dot_characteristics)]
        except:
            return "Error"
    def read_image2(self, imageread):
        """This is the current implementation for finding and reading the dot codes. """
        os.chdir('/Users/gghosal/Desktop/gaurav/res/')
        self.centers = list()
        for i in self.listdir_nohidden('/Users/gghosal/Desktop/gaurav/res/'):
            os.remove(i)
        #color_recognition_dict={'red':0, "lightblue":98, "darkblue":120, "pink":175, "purple":140}
        img1 = imageread
        device, img1gray = pcv.rgb2gray(img1, 0)
        img1hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
        dev, img_binary = pcv.binary_threshold(img1gray, 1, 255, 'light', 0)
        device, id_objects, obj_hierarchy = pcv.find_objects(
            img1, img_binary, 0)

        device, roi, roi_hierarchy = pcv.define_roi(img1,
                                                    shape="rectangle",
                                                    device=device,
                                                    roi_input="default",
                                                    adjust=False,
                                                    x_adj=600,
                                                    y_adj=600,
                                                    w_adj=1200,
                                                    h_adj=1200)
        device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
            img1, "partial", roi, roi_hierarchy, id_objects, obj_hierarchy,
            device)

        device, clusters_i, contours, obj_hierarchy = pcv.cluster_contours(
            device=device,
            img=img1,
            roi_objects=roi_objects,
            roi_obj_hierarchy=roi_obj_hierarchy,
            nrow=1,
            ncol=int(3))

        out = "/Users/gghosal/Desktop/gaurav/res"
        device, output_path = pcv.cluster_contour_splitimg(
            device=device,
            img=img1,
            grouped_contour_indexes=clusters_i,
            contours=contours,
            hierarchy=obj_hierarchy,
            outdir=out)

        obj_hierarchy = obj_hierarchy[0]
        dotQ = list()
        centroids = list()
        totalcontours = list()
        for clusters1 in clusters_i:
            totalcontour = contours[clusters1[0]]
            for j in clusters1[1:]:
                totalcontour = np.concatenate((contours[j], totalcontour),
                                              axis=0)
            totalcontours.append(totalcontour)
            mmt = cv2.moments(totalcontour)
            ycoords = int(mmt['m10'] / mmt['m00'])
            xcoords = int(mmt['m01'] / mmt['m00'])

            self.centers.append(tuple([ycoords, xcoords]))
            centroids.append(tuple([xcoords, ycoords]))
        count11 = 0

        dot_cleaned = apply_brightness_contrast(imageread,
                                                brightness=0,
                                                contrast=49)

        dot_cleaned_grey = cv2.cvtColor(dot_cleaned, cv2.COLOR_RGB2HSV)
        dot_cleaned_thresh = cv2.inRange(dot_cleaned_grey,
                                         np.array([0, 0, 150]),
                                         np.array([255, 255, 255]))
        dot_cleaned = cv2.bitwise_and(dot_cleaned,
                                      dot_cleaned,
                                      mask=dot_cleaned_thresh)
        dot_kernel = np.ones((7, 7), np.uint8)

        dotQ = self.read_dot3(dot_cleaned)

        os.chdir(out)
        colors = list()
        dot_characteristics = list()
        for i in self.listdir_nohidden(out):
            #print(i)
            if self.masked(i):
                mask = cv2.imread(i, 0)
                unmasked = cv2.imread(i[0:-5])
                width = mask.shape[0]
                length = mask.shape[1]
                color_averagelist = list()
                masked_img1hsv = cv2.bitwise_and(img1hsv, img1hsv, mask=mask)
                color_averagelist = cv2.split(masked_img1hsv)[0]
                color_averagelist = np.reshape(color_averagelist,
                                               (color_averagelist.shape[0] *
                                                color_averagelist.shape[1], ))
                color_averagelist = color_averagelist[np.flatnonzero(
                    color_averagelist)]

                color_avg = statistics.mode(color_averagelist)
                resultsdict = dict()
                for color in self.colordefinitions:
                    resultsdict[color] = abs(self.colordefinitions[color] -
                                             color_avg)
                color = min(resultsdict, key=lambda x: resultsdict[x])
                colors.append(color)

        for pnr in range(3):
            dot_characteristics.append(tuple((colors[pnr], dotQ[pnr])))

        try:

            return self.translator.get(self.translate(dot_characteristics),
                                       self.translate(dot_characteristics))
        except Exception as e:

            pass
Example #8
0
def main():
    # Get options
    args = options()

    # Set variables
    pcv.params.debug = args.debug  # Replace the hard-coded debug with the debug flag
    pcv.params.debug_outdir = args.outdir  # set output directory

    ### Main pipeline ###

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    img, path, filename = pcv.readimage(args.image, mode='rgb')

    # Read reference image for colour correction (currently unused)
    #ref_img, ref_path, ref_filename = pcv.readimage(
    #    "/home/leonard/Dropbox/2020-01_LAC_phenotyping/images/top/renamed/20200128_2.jpg",
    #    mode="rgb")

    # Find colour cards
    #df, start, space = pcv.transform.find_color_card(rgb_img=ref_img)
    #ref_mask = pcv.transform.create_color_card_mask(rgb_img=ref_img, radius=10, start_coord=start, spacing=space, ncols=4, nrows=6)

    df, start, space = pcv.transform.find_color_card(rgb_img=img)
    img_mask = pcv.transform.create_color_card_mask(rgb_img=img,
                                                    radius=10,
                                                    start_coord=start,
                                                    spacing=space,
                                                    ncols=4,
                                                    nrows=6)

    output_directory = "."

    # Correct colour (currently unused)
    #target_matrix, source_matrix, transformation_matrix, corrected_img = pcv.transform.correct_color(ref_img, ref_mask, img, img_mask, output_directory)

    # Check that the colour correction worked (source~target should be strictly linear)
    #pcv.transform.quick_color_check(source_matrix = source_matrix, target_matrix = target_matrix, num_chips = 24)

    # Write the spacing of the colour card to file as size marker
    with open(os.path.join(path, 'output/size_marker_trays.csv'), 'a') as f:
        writer = csv.writer(f)
        writer.writerow([filename, space[0]])

    ### Crop tray ###

    # Define a bounding rectangle around the colour card
    x_cc, y_cc, w_cc, h_cc = cv2.boundingRect(img_mask)
    x_cc = int(round(x_cc - 0.3 * w_cc))
    y_cc = int(round(y_cc - 0.3 * h_cc))
    h_cc = int(round(h_cc * 1.6))
    w_cc = int(round(w_cc * 1.6))

    # Crop out colour card
    start_point = (x_cc, y_cc)
    end_point = (x_cc + w_cc, y_cc + h_cc)
    colour = (0, 0, 0)
    thickness = -1
    card_crop_img = cv2.rectangle(img, start_point, end_point, colour,
                                  thickness)

    # Convert RGB to HSV and extract the value channel
    v = pcv.rgb2gray_hsv(card_crop_img, "v")

    # Threshold the value image
    v_thresh = pcv.threshold.binary(
        v, 100, 255, "light"
    )  # start threshold at 150 with bright corner-markers, 100 without

    # Fill out bright imperfections (siliques and other dirt on the background)
    v_thresh = pcv.fill(
        v_thresh, 100)  # fill at 500 with bright corner-markers, 100 without

    # Create bounding rectangle around the tray
    x, y, w, h = cv2.boundingRect(v_thresh)

    # Crop image to tray
    #crop_img = card_crop_img[y:y+h, x:x+int(w - (w * 0.03))] # crop extra 3% from right because of tray labels
    crop_img = card_crop_img[y:y + h, x:x + w]  # crop symmetrically

    # Save cropped image for quality control
    pcv.print_image(crop_img,
                    filename=path + "/output/" + "cropped" + filename + ".png")

    ### Threshold plants ###

    # Threshold the green-magenta, blue, and hue channels
    a_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[0, 0, 0],
                                             upper_thresh=[255, 108, 255],
                                             channel='LAB')
    b_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[0, 0, 135],
                                             upper_thresh=[255, 255, 255],
                                             channel='LAB')
    h_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[35, 0, 0],
                                             upper_thresh=[70, 255, 255],
                                             channel='HSV')

    # Join the thresholds (AND)
    ab = pcv.logical_and(b_thresh, a_thresh)
    abh = pcv.logical_and(ab, h_thresh)

    # Fill small objects depending on expected plant size based on DPG (make sure to take the correct file suffix jpg/JPG/jpeg...)
    match = re.search("(\d+).(\d)\.jpg$", filename)

    if int(match.group(1)) < 10:
        abh_clean = pcv.fill(abh, 50)
        print("50")
    elif int(match.group(1)) < 15:
        abh_clean = pcv.fill(abh, 200)
        print("200")
    else:
        abh_clean = pcv.fill(abh, 500)
        print("500")

    # Dilate to close broken borders
    abh_dilated = pcv.dilate(abh_clean, 3, 1)

    # Close holes
    # abh_fill = pcv.fill_holes(abh_dilated) # silly -- removed
    abh_fill = abh_dilated

    # Apply mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(crop_img, abh_fill, "white")

    # Save masked image for quality control
    pcv.print_image(masked,
                    filename=path + "/output/" + "masked" + filename + ".png")

    ### Filter and group contours ###

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(crop_img, abh_fill)

    # Create bounding box with margins to avoid border artifacts
    roi_y = 0 + crop_img.shape[0] * 0.05
    roi_x = 0 + crop_img.shape[0] * 0.05
    roi_h = crop_img.shape[0] - (crop_img.shape[0] * 0.1)
    roi_w = crop_img.shape[1] - (crop_img.shape[0] * 0.1)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(crop_img, roi_y, roi_x,
                                                   roi_h, roi_w)

    # Keep all objects in the bounding box
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=crop_img,
        roi_type='partial',
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy)

    # Cluster the objects by plant
    clusters, contours, hierarchies = pcv.cluster_contours(
        crop_img, roi_objects, roi_obj_hierarchy, 3, 5)

    # Split image into single plants
    out = args.outdir
    #output_path, imgs, masks = pcv.cluster_contour_splitimg(crop_img,
    #                                                        clusters,
    #                                                        contours,
    #                                                        hierarchies,
    #                                                        out,
    #                                                        file = filename)

    ### Analysis ###

    # Approximate the position of the top left plant as grid start
    coord_y = int(
        round(((crop_img.shape[0] / 3) * 0.5) + (crop_img.shape[0] * 0.025)))
    coord_x = int(
        round(((crop_img.shape[1] / 5) * 0.5) + (crop_img.shape[1] * 0.025)))

    # Set the ROI spacing relative to image dimensions
    spc_y = int((round(crop_img.shape[0] - (crop_img.shape[0] * 0.05)) / 3))
    spc_x = int((round(crop_img.shape[1] - (crop_img.shape[1] * 0.05)) / 5))

    # Set the ROI radius relative to image width
    if int(match.group(1)) < 16:
        r = int(round(crop_img.shape[1] / 12.5))
    else:
        r = int(round(crop_img.shape[1] / 20))

    # Make a grid of ROIs at the expected positions of plants
    # This allows for gaps due to dead/not germinated plants, without messing up the plant numbering
    imgs, masks = pcv.roi.multi(img=crop_img,
                                nrows=3,
                                ncols=5,
                                coord=(coord_x, coord_y),
                                radius=r,
                                spacing=(spc_x, spc_y))

    # Loop through the ROIs in the grid
    for i in range(0, len(imgs)):
        # Find objects within the ROI
        filtered_contours, filtered_hierarchy, filtered_mask, filtered_area = pcv.roi_objects(
            img=crop_img,
            roi_type="partial",
            roi_contour=imgs[i],
            roi_hierarchy=masks[i],
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy)
        # Continue only if not empty
        if len(filtered_contours) > 0:
            # Combine objects within each ROI
            plant_contour, plant_mask = pcv.object_composition(
                img=crop_img,
                contours=filtered_contours,
                hierarchy=filtered_hierarchy)

            # Analyse the shape of each plant
            analysis_images = pcv.analyze_object(img=crop_img,
                                                 obj=plant_contour,
                                                 mask=plant_mask)

            pcv.print_image(analysis_images,
                            filename=path + "/output/" + filename + "_" +
                            str(i) + "_analysed.png")

            # Determine color properties
            color_images = pcv.analyze_color(crop_img, plant_mask, "hsv")

            # Watershed plant area to count leaves (computationally intensive, use when needed)
            #watershed_images = pcv.watershed_segmentation(crop_img, plant_mask, 15)

            # Print out a .json file with the analysis data for the plant
            pcv.outputs.save_results(filename=path + "/" + filename + "_" +
                                     str(i) + '.json')

            # Clear the measurements stored globally into the Ouptuts class
            pcv.outputs.clear()
Example #9
0
def main():
    #Import file gambar
    path = 'Image test\capture (2).jpg'
    imgraw, path, img_filename = pcv.readimage(path, mode='native')

    nilaiTerang = np.average(imgraw)

    if nilaiTerang < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    rotateimg = pcv.rotate(imgraw, -3, True)
    imgraw = rotateimg

    bersih1 = pcv.white_balance(imgraw)

    hitamputih = pcv.rgb2gray_lab(bersih1, channel='a')

    img_binary = pcv.threshold.binary(hitamputih,
                                      threshold=110,
                                      max_value=255,
                                      object_type='dark')

    fill_image = pcv.fill(bin_img=img_binary, size=10)
    dilated = pcv.dilate(gray_img=fill_image, ksize=6, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=imgraw, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=imgraw,
                                                   x=280,
                                                   y=96,
                                                   h=1104,
                                                   w=1246)
    print(type(roi_contour))
    print(type(roi_hierarchy))
    print(roi_hierarchy)
    print(roi_contour)
    roicontour = cv2.drawContours(imgraw, roi_contour, -1, (0, 0, 255), 3)
    #cv2.rectangle(imgraw, roi_contour[0], roi_contour[3])

    roi_obj, hier, kept_mask, obj_area = pcv.roi_objects(
        img=imgraw,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    cnt_i, contours, hierarchies = pcv.cluster_contours(img=imgraw,
                                                        roi_objects=roi_obj,
                                                        roi_obj_hierarchy=hier,
                                                        nrow=4,
                                                        ncol=3)
    clustered_image = pcv.visualize.clustered_contours(
        img=imgraw,
        grouped_contour_indices=cnt_i,
        roi_objects=roi_obj,
        roi_obj_hierarchy=hier)
    obj, mask = pcv.object_composition(imgraw, roi_obj, hier)
    hasil = pcv.analyze_object(imgraw, obj, mask)
    pcv.print_image(imgraw, 'Image test\Result\wel.jpg')
    pcv.print_image(clustered_image, 'Image test\Result\clustred.jpg')
    pcv.print_image(hitamputih, 'Image test\Result\Bersihe.jpg')
    pcv.print_image(dilated, 'Image test\Result\dilated.jpg')
    pcv.print_image(hasil, 'Image test\Result\hasil.jpg')
    plantHasil = pcv.outputs.observations['area']
    data1 = pcv.outputs.observations['area']['value']
    print(data1)
    print(plantHasil)
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    pcv.params.debug = args.debug  #set debug mode

    # STEP 1: white balance (for comparison across images)
    # inputs:
    #   img = image object, RGB colorspace
    #   roi = region for white reference (position of ColorChecker Passport)
    img1 = pcv.white_balance(img, roi=(910, 3555, 30, 30))

    # STEP 2: Mask out color card and stake
    # inputs:
    #   img = grayscale image ('a' channel)
    #   p1 = (x,y) coordinates for top left corner of rectangle
    #   p2 = (x,y) coordinates for bottom right corner of rectangle
    #   color = color to make the mask (white here to match background)
    masked, binary, contours, hierarchy = pcv.rectangle_mask(img1, (0, 2000),
                                                             (1300, 4000),
                                                             color="white")
    masked2, binary, contours, hierarchy = pcv.rectangle_mask(masked,
                                                              (0, 3600),
                                                              (4000, 4000),
                                                              color="white")

    # STEP 3: Convert from RGB colorspace to LAB colorspace
    # Keep green-magenta channel (a)
    # inputs:
    #   img = image object, RGB colorspace
    #   channel = color subchannel ('l' = lightness, 'a' = green-magenta, 'b' = blue-yellow)
    a = pcv.rgb2gray_lab(masked2, 'l')

    # STEP 4: Set a binary threshold on the saturation channel image
    # inputs:
    #   img = img object, grayscale
    #   threshold = treshold value (0-255) - need to adjust this
    #   max_value = value to apply above treshold (255 = white)
    #   object_type = light or dark
    img_binary = pcv.threshold.binary(a, 118, 255, object_type="dark")

    # STEP 5: Apply Gaussian blur to binary image (reduced noise)
    # inputs:
    #   img = img object, binary
    #   ksize = tuple of kernel dimensions, e.g. (5,5)
    blur_image = pcv.median_blur(img_binary, 10)

    # STEP 6: Fill small objects (speckles)
    # inputs:
    #   img = img object, binary
    #   size = minimum object area size in pixels
    fill_image1 = pcv.fill(blur_image, 150000)

    # STEP 7: Invert image to fill gaps
    # inputs:
    #   img = img object, binary
    inv_image = pcv.invert(fill_image1)
    # rerun fill on inverted image
    inv_fill = pcv.fill(inv_image, 25000)
    # invert image again
    fill_image = pcv.invert(inv_fill)

    # STEP 8: Dilate to avoid losing detail
    # inputs:
    #   img = img object, binary
    #   ksize = kernel size
    #   i = iterations (number of consecutive filtering passes)
    dilated = pcv.dilate(fill_image, 2, 1)

    # STEP 9: Find objects (contours: black-white boundaries)
    # inputs:
    #   img = img object, RGB colorspace
    #   mask = binary image used for object detection
    id_objects, obj_hierarchy = pcv.find_objects(img1, dilated)

    # STEP 10: Define region of interest (ROI)
    # inputs:
    #   img = img object to overlay ROI
    #   x = x-coordinate of upper left corner for rectangle
    #   y = y-coordinate of upper left corner for rectangle
    #   h = height of rectangle
    #   w = width of rectangle
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=20,
                                                   y=10,
                                                   h=3000,
                                                   w=3000)

    # STEP 11: Keep objects that overlap with the ROI
    # inputs:
    #   img = img where selected objects will be displayed
    #   roi_type = options are 'cutto', 'partial' (objects are partially inside roi), or 'largest' (keep only the biggest boi)
    #   roi_countour = contour of roi, output from 'view and adjust roi' function (STEP 10)
    #   roi_hierarchy = contour of roi, output from 'view and adjust roi' function (STEP 10)
    #   object_contour = contours of objects, output from 'identifying objects' function (STEP 9)
    #   obj_hierarchy = hierarchy of objects, output from 'identifying objects' function (STEP 9)
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy)

    # STEP 12: Cluster multiple contours in an image based on user input of rows/columns
    # inputs:
    #   img = img object (RGB colorspace)
    #   roi_objects = object contours in an image that will be clustered (output from STEP 11)
    #   roi_obj_hierarchy = object hierarchy (also from STEP 11)
    #   nrow = number of rows for clustering (desired rows in image even if no leaf present in all)
    #   ncol = number of columns to cluster (desired columns in image even if no leaf present in all)
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img1, roi_objects, roi_obj_hierarchy, 3, 3)

    # STEP 13: select and split clustered contours to export into multiple images
    # also checks if number of inputted filenames matches number of clustered contours
    # if no filenames, objects are numbered in order
    # inputs:
    #   img = masked RGB image
    #   grouped_contour_indexes = indexes of clustered contours, output of 'cluster_contours' (STEP 12)
    #   contours = contours of cluster, output of 'cluster_contours' (STEP 12)
    #   hierarchy = object hierarchy (from STEP 12)
    #   outdir = directory to export output images
    #   file = name of input image to use as basename (uses filename from 'readimage')
    #   filenames = (optional) txt file with list of filenames ordered from top to bottom/left to right

    # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11)
    pcv.params.debug = None
    out = args.outdir
    # names = args.namesout = "./"

    output_path, imgs, masks = pcv.cluster_contour_splitimg(img1,
                                                            clusters_i,
                                                            contours,
                                                            hierarchies,
                                                            out,
                                                            file=filename,
                                                            filenames=None)
Example #11
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    debug = args.debug

    # Pipeline step
    device = 0

    # Step 1: Check if this is a night image, for some of these datasets images were captured
    # at night, even if nothing is visible. To make sure that images are not taken at
    # night we check that the image isn't mostly dark (0=black, 255=white).
    # if it is a night image it throws a fatal error and stops the pipeline.

    if np.average(img) < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    # Step 2: Normalize the white color so you can later
    # compare color between images.
    # Inputs:
    # device = device number. Used to count steps in the workflow
    # img = image object, RGB colorspace
    # debug = None, print, or plot. Print = save to file, Plot = print to screen.
    # roi = region for white reference, if none uses the whole image,
    # otherwise (x position, y position, box width, box height)

    #white balance image based on white toughspot
    device, img1 = pcv.white_balance(device, img, debug, roi=white_balance_roi)
    # img1 = img

    # Step 3: Rotate the image

    # device, rotate_img = pcv.rotate(img1, -1, device, debug)

    #Step 4: Shift image. This step is important for clustering later on.
    # For this image it also allows you to push the green raspberry pi camera
    # out of the image. This step might not be necessary for all images.
    # The resulting image is the same size as the original.
    # Input:
    # img = image object
    # device = device number. Used to count steps in the workflow
    # number = integer, number of pixels to move image
    # side = direction to move from "top", "bottom", "right","left"
    # debug = None, print, or plot. Print = save to file, Plot = print to screen.

    # device, shift1 = pcv.shift_img(img1, device, 300, 'top', debug)
    # img1 = shift1

    # STEP 5: Convert image from RGB colorspace to LAB colorspace
    # Keep only the green-magenta channel (grayscale)
    # Inputs:
    #    img     = image object, RGB colorspace
    #    channel = color subchannel (l = lightness, a = green-magenta , b = blue-yellow)
    #    device  = device number. Used to count steps in the workflow
    #    debug   = None, print, or plot. Print = save to file, Plot = print to screen.
    device, a = pcv.rgb2gray_lab(img1, 'a', device, debug)

    # STEP 6: Set a binary threshold on the Saturation channel image
    # Inputs:
    #    img         = img object, grayscale
    #    threshold   = threshold value (0-255)
    #    maxValue    = value to apply above threshold (usually 255 = white)
    #    object_type = light or dark
    #                  - If object is light then standard thresholding is done
    #                  - If object is dark then inverse thresholding is done
    #    device      = device number. Used to count steps in the pipeline
    #    debug       = None, print, or plot. Print = save to file, Plot = print to screen.
    device, img_binary = pcv.binary_threshold(a, darkness_threshold, 255,
                                              'dark', device, debug)
    #                                            ^
    #                                            |
    #                                           adjust this value

    # STEP 7: Fill in small objects (speckles)
    # Inputs:
    #    img    = image object, grayscale. img will be returned after filling
    #    mask   = image object, grayscale. This image will be used to identify contours
    #    size   = minimum object area size in pixels (integer)
    #    device = device number. Used to count steps in the pipeline
    #    debug  = None, print, or plot. Print = save to file, Plot = print to screen.
    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, minimum_object_area_pixels,
                                  device, debug)
    #                                               ^
    #                                               |
    #                                               adjust this value

    # STEP 8: Dilate so that you don't lose leaves (just in case)
    # Inputs:
    #    img     = input image
    #    kernel  = integer
    #    i       = interations, i.e. number of consecutive filtering passes
    #    device  = device number. Used to count steps in the pipeline
    #    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    device, dilated = pcv.dilate(fill_image, 1, 1, device, debug)

    # STEP 9: Find objects (contours: black-white boundaries)
    # Inputs:
    #    img       = image that the objects will be overlayed
    #    mask      = what is used for object detection
    #    device    = device number.  Used to count steps in the pipeline
    #    debug     = None, print, or plot. Print = save to file, Plot = print to screen.
    device, id_objects, obj_hierarchy = pcv.find_objects(
        img1, dilated, device, debug)

    # STEP 10: Define region of interest (ROI)
    # Inputs:
    #    img       = img to overlay roi
    #    roi       = default (None) or user input ROI image, object area should be white and background should be black,
    #                has not been optimized for more than one ROI
    #    roi_input = type of file roi_base is, either 'binary', 'rgb', or 'default' (no ROI inputted)
    #    shape     = desired shape of final roi, either 'rectangle' or 'circle', if  user inputs rectangular roi but chooses
    #                'circle' for shape then a circle is fitted around rectangular roi (and vice versa)
    #    device    = device number.  Used to count steps in the pipeline
    #    debug     = None, print, or plot. Print = save to file, Plot = print to screen.
    #    adjust    = either 'True' or 'False', if 'True' allows user to adjust ROI
    #    x_adj     = adjust center along x axis
    #    y_adj     = adjust center along y axis
    #    w_adj     = adjust width
    #    h_adj     = adjust height
    # x=0, y=560, h=4040-560, w=3456
    roi_contour, roi_hierarchy = pcv.roi.rectangle(**total_region_of_interest,
                                                   img=img1)
    # device, roi, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, False,
    #                                             0, 0, 0, 0)
    #                                            ^                ^
    #                                            |________________|
    #                                            adjust these four values

    # STEP 11: Keep objects that overlap with the ROI
    # Inputs:
    #    img            = img to display kept objects
    #    roi_type       = 'cutto' or 'partial' (for partially inside)
    #    roi_contour    = contour of roi, output from "View and Ajust ROI" function
    #    roi_hierarchy  = contour of roi, output from "View and Ajust ROI" function
    #    object_contour = contours of objects, output from "Identifying Objects" fuction
    #    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" fuction
    #    device         = device number.  Used to count steps in the pipeline
    #    debug          = None, print, or plot. Print = save to file, Plot = print to screen.
    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # print(obj_area)

    #Step 12: This function take a image with multiple contours and
    # clusters them based on user input of rows and columns

    #Inputs:
    #    img - An RGB image array
    #    roi_objects - object contours in an image that are needed to be clustered.
    #    nrow - number of rows to cluster (this should be the approximate  number of desired rows in the entire image (even if there isn't a literal row of plants)
    #    ncol - number of columns to cluster (this should be the approximate number of desired columns in the entire image (even if there isn't a literal row of plants)
    #    file -  output of filename from read_image function
    #    filenames - input txt file with list of filenames in order from top to bottom left to right
    #    debug - print debugging images

    device, clusters_i, contours = pcv.cluster_contours(
        device, img1, roi_objects, expected_number_of_rows,
        expected_number_of_columns, debug)

    # print(contours)

    #Step 13:This function takes clustered contours and splits them into multiple images,
    #also does a check to make sure that the number of inputted filenames matches the number
    #of clustered contours. If no filenames are given then the objects are just numbered

    #Inputs:
    #    img - ideally a masked RGB image.
    #    grouped_contour_indexes - output of cluster_contours, indexes of clusters of contours
    #    contours - contours to cluster, output of cluster_contours
    #    filenames - input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes)
    #    debug - print debugging images

    out = args.outdir
    names = args.names
    device, output_path = pcv.cluster_contour_splitimg(device,
                                                       img1,
                                                       clusters_i,
                                                       contours,
                                                       out,
                                                       file=filename,
                                                       filenames=names,
                                                       debug=debug)