コード例 #1
0
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, corrected_img = pcv.white_balance(device, img, debug,
                                              (500, 1000, 500, 500))
    img = corrected_img

    device, img_gray_sat = pcv.rgb2gray_lab(img, 'a', device, debug)

    device, img_binary = pcv.binary_threshold(img_gray_sat, 120, 255, 'dark',
                                              device, debug)

    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 300, device, debug)

    device, id_objects, obj_hierarchy = pcv.find_objects(
        img, fill_image, device, debug)

    device, roi, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None,
                                                'default', debug, True, 1800,
                                                1600, -1500, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device,
        debug)

    device, obj, mask = pcv.object_composition(img, roi_objects,
                                               roi_obj_hierarchy, device,
                                               debug)

    outfile = os.path.join(args.outdir, filename)

    device, color_header, color_data, color_img = pcv.analyze_color(
        img, img, mask, 256, device, debug, None, 'v', 'img', 300, outfile)

    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, "img", obj, mask, device, debug, outfile)

    shapepath = outfile[:-4] + '_shapes.jpg'
    shapepic = cv2.imread(shapepath)
    plantsize = "The plant is " + str(np.sum(mask)) + " pixels large"
    cv2.putText(shapepic, plantsize, (500, 500), cv2.FONT_HERSHEY_SIMPLEX, 5,
                (0, 255, 0), 10)
    pcv.print_image(shapepic, outfile[:-4] + '-out_shapes.jpg')
コード例 #2
0
def calculate(rgb_img, mask, health_point):
    analysis_image = pcv.analyze_color(rgb_img, mask, 'hsv')
    logger.debug("image analyzed by PlantCV")
    all_colors = pcv.outputs.observations['hue_frequencies']['value']
    logger.debug('received all hue frequencies of the image')
    color_report = color_reporter.calculate_color_contribution(all_colors)
    logger.debug('received color report of the image')
    median_color = pcv.outputs.observations['hue_median']['value']
    logger.debug("median color of image calculated")
    logger.info(f'median color: {median_color}')
    health_score = round(1-(abs(median_color-health_point)/health_point), 2)
    logger.debug("health score of image calculated")
    logger.info(f'health score: {health_score}')
    return {
        "color_report": color_report,
        "dominant_color": median_color,
        "score": health_score
    }
コード例 #3
0
def draw_plot(x_start, y_start, x_end, y_end, reference_file, save_file):
    """
		Utilizes plantcv (citation below) to count the green pixels (Chlorophyll) of wells containg plants in a 4x6 grid format of the selected tray.
		
		Outputs
		-------
		A csv file containing the green pixel count for each well containing plants within the grid 
		
		Parameters
		----------
		x_start : int
			Contains the x coordinate of the top left of the user selection
		y_start : int
			Contains the y coordinate of the top left of the user selection
		x_end : int
			Contains the x coordinate of the bottom right of the user selection
		y_end : int
			Contains the y coordinate of the bottom right of the user selection
		reference_file : str
			A txt file containing the names of each well of the tray
		save_file : str
			A csv file to output the green pixel count for each well of the tray
		
		Citation
		--------
		Fahlgren N, Feldman M, Gehan MA, Wilson MS, Shyu C, Bryant DW, Hill ST, McEntee CJ, Warnasooriya SN, Kumar I, Ficor T, Turnipseed S, Gilbert KB, Brutnell TP, Carrington JC, Mockler TC, Baxter I. (2015) A versatile phenotyping system and analytics platform reveals diverse temporal responses to water availability in Setaria. Molecular Plant 8: 1520-1535. http://doi.org/10.1016/j.molp.2015.06.005
		
		Website Link
		------------
		https://plantcv.readthedocs.io/en/stable/
	"""

    # Resize x,y values from the resized image to the initial raw image x,y coordinates for an accurate count on pixels
    x_start = x_start * img_width / dim[0]
    y_start = y_start * img_height / dim[1]
    x_end = x_end * img_width / dim[0]
    y_end = y_end * img_height / dim[1]

    # Crop raw image to selection window
    cropped = pcv.crop(img,
                       x=int(x_start),
                       y=int(y_start),
                       h=int(y_end - y_start),
                       w=int(x_end - x_start))

    # Debug code to display cropped image. Uncomment to see cropped window
    #cropbytes = cv.imencode('.png', cropped)[1].tobytes()
    #graph.DrawImage(data=cropbytes, location=(0, 0))

    # Utilize plantcv code to count green pixels within selection window
    # For further information see : https://plantcv.readthedocs.io/en/latest/multi-plant_tutorial/
    img1 = pcv.white_balance(img=cropped, roi=(0, 0, 50, 50))
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')
    img_binary = pcv.threshold.binary(gray_img=a,
                                      threshold=115,
                                      max_value=255,
                                      object_type='dark')
    fill_image = pcv.fill(bin_img=img_binary, size=80)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=0,
                                                   y=0,
                                                   h=int(y_end - y_start),
                                                   w=int(x_end - x_start))
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=img1,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img=img1,
        roi_objects=roi_objects,
        roi_obj_hierarchy=roi_obj_hierarchy,
        nrow=4,
        ncol=6,
        show_grid=True)
    output_path, imgs, masks = pcv.cluster_contour_splitimg(
        img1,
        grouped_contour_indexes=clusters_i,
        contours=contours,
        hierarchy=hierarchies,
        file=filename,
        filenames=reference_file)

    # Save green pixel count for each well of the tray to a csv file using the reference file to name each well
    results = []
    for f in range(len(imgs)):
        color_histogram = pcv.analyze_color(rgb_img=imgs[f],
                                            mask=kept_mask,
                                            hist_plot_type='rgb')

        # Access data stored out from analyze_color
        hue_circular_mean = pcv.outputs.observations['green_frequencies'][
            'value']

        result = [output_path[f].split('_')[1], np.trapz(hue_circular_mean)]
        results.append(result)

    with open(save_file, "w", newline="") as fil:
        writer = csv.writer(fil)
        writer.writerows(results)
        sg.Popup('Finished Analysis! Please see the .csv file for results!')
コード例 #4
0
def plantCVProcess(img, x, y, w, h):

    # Convert RGB to HSV and extract the saturation channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Threshold the saturation image
    s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light')

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light')

    # Fill small objects
    # b_fill = pcv.fill(b_thresh, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=x, y=y, h=h, w=w)

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1,
                                                               roi_hierarchy=roi_hierarchy,
                                                               object_contour=id_objects,
                                                               obj_hierarchy=obj_hierarchy,
                                                               roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)

    ############### Analysis ################

    # Find shape properties, output shape image (optional)
    shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

    # Shape properties relative to user boundary line (optional)
    boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680)

    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)
    color_histogram = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='all')

    # Pseudocolor the grayscale image
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=mask, cmap='jet')

    return print_results()
コード例 #5
0
def main():
    args = options()  #create options object for argument parsing
    device = 0  #set device
    params.debug = args.debug  #set debug

    outfile = False
    if args.writeimg:
        outfile = os.path.join(args.outdir, os.path.basename(args.image)[:-4])

    # In[114]:

    img, path, filename = pcv.readimage(filename=args.image,
                                        debug=args.debug)  #read in image
    background = pcv.transform.load_matrix(
        args.npz)  #read in background mask image for subtraction

    # In[115]:

    device, mask = pcv.naive_bayes_classifier(
        img, args.pdf, device, args.debug)  #naive bayes on image

    #if args.writeimg:
    #   pcv.print_image(img=mask["94,104,47"], filename=outfile + "_nb_mask.png")

    # In[116]:

    new_mask = pcv.image_subtract(mask["94,104,47"],
                                  background)  #subtract background noise

    # In[117]:

    #image blurring using scipy median filter
    blurred_img = ndimage.median_filter(new_mask, (7, 1))
    blurred_img = ndimage.median_filter(blurred_img, (1, 7))
    device, cleaned = pcv.fill(np.copy(blurred_img), np.copy(blurred_img), 50,
                               0, args.debug)  #fill leftover noise

    # In[118]:

    #dilate and erode to repair plant breaks from background subtraction
    device, cleaned_dilated = pcv.dilate(cleaned, 6, 1, 0)
    device, cleaned = pcv.erode(cleaned_dilated, 6, 1, 0, args.debug)

    # In[119]:

    device, objects, obj_hierarchy = pcv.find_objects(
        img, cleaned, device, debug=args.debug)  #find objects using mask
    if "TM015" in args.image:
        h = 1620
    elif "TM016" in args.image:
        h = 1555
    else:
        h = 1320
    roi_contour, roi_hierarchy = pcv.roi.rectangle(x=570,
                                                   y=0,
                                                   h=h,
                                                   w=1900 - 550,
                                                   img=img)  #grab ROI

    # In[120]:

    #isolate plant objects within ROI
    device, roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img,
        'partial',
        roi_contour,
        roi_hierarchy,
        objects,
        obj_hierarchy,
        device,
        debug=args.debug)

    #Analyze only images with plants present.
    if roi_objects > 0:
        # In[121]:

        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(
            img=img,
            contours=roi_objects,
            hierarchy=hierarchy,
            device=device,
            debug=args.debug)

        if args.writeimg:
            pcv.print_image(img=plant_mask, filename=outfile + "_mask.png")

        # In[122]:

        # Find shape properties, output shape image (optional)
        device, shape_header, shape_data, shape_img = pcv.analyze_object(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            device=device,
            debug=args.debug,
            filename=outfile + ".png")

        # In[123]:

        if "TM015" in args.image:
            line_position = 380
        elif "TM016" in args.image:
            line_position = 440
        else:
            line_position = 690

        # Shape properties relative to user boundary line (optional)
        device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound_horizontal(
            img=img,
            obj=plant_contour,
            mask=plant_mask,
            line_position=line_position,
            device=device,
            debug=args.debug,
            filename=outfile + ".png")

        # In[124]:

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images (optional)
        device, color_header, color_data, color_img = pcv.analyze_color(
            img=img,
            imgname=args.image,
            mask=plant_mask,
            bins=256,
            device=device,
            debug=args.debug,
            hist_plot_type=None,
            pseudo_channel="v",
            pseudo_bkg="img",
            resolution=300,
            filename=outfile + ".png")

        # In[55]:

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        result.write('\t'.join(map(str, boundary_header)) + "\n")
        result.write('\t'.join(map(str, boundary_data)) + "\n")
        result.write('\t'.join(map(str, boundary_img)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()
コード例 #6
0
def analyze(image_name):

    # Read image
    img, path, filename = pcv.readimage(filename=image_name)

    # Convert RGB to HSV and extract the saturation channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Threshold the saturation image
    s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light')

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light')

    # Fill small objects
    # b_fill = pcv.fill(b_thresh, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(rgb_img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=0, y=500, h=2000, w=2000) 

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1,
                                                               roi_hierarchy=roi_hierarchy,
                                                               object_contour=id_objects,
                                                               obj_hierarchy=obj_hierarchy,
                                                               roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)

    ############### Analysis ################


    # Find shape properties
    shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

    # Color
    color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask)

    # Access data stored out from analyze_object
    plant_observations = pcv.outputs.observations

    return plant_observations
コード例 #7
0
def main():
    # Create input arguments object
    args = options()

    # Set debug mode
    pcv.params.debug = args.debug

    # Open a single image
    img, imgpath, imgname = pcv.readimage(filename=args.image)

    # Visualize colorspaces
    all_cs = pcv.visualize.colorspaces(rgb_img=img)

    # Extract the Blue-Yellow ("b") channel from the LAB colorspace
    gray_img = pcv.rgb2gray_lab(rgb_img=img, channel="b")

    # Plot a histogram of pixel values for the Blue-Yellow ("b") channel.
    hist_plot = pcv.visualize.histogram(gray_img=gray_img)

    # Apply a binary threshold to the Blue-Yellow ("b") grayscale image.
    thresh_img = pcv.threshold.binary(gray_img=gray_img,
                                      threshold=140,
                                      max_value=255,
                                      object_type="light")

    # Apply a dilation with a 5x5 kernel and 3 iterations
    dil_img = pcv.dilate(gray_img=thresh_img, ksize=5, i=3)

    # Fill in small holes in the leaves
    closed_img = pcv.fill_holes(bin_img=dil_img)

    # Erode the plant pixels using a 5x5 kernel and 3 iterations
    er_img = pcv.erode(gray_img=closed_img, ksize=5, i=3)

    # Apply a Gaussian blur with a 5 x 5 kernel.
    blur_img = pcv.gaussian_blur(img=er_img, ksize=(5, 5))

    # Set pixel values less than 255 to 0
    blur_img[np.where(blur_img < 255)] = 0

    # Fill/remove objects less than 300 pixels in area
    cleaned = pcv.fill(bin_img=blur_img, size=300)

    # Create a circular ROI
    roi, roi_str = pcv.roi.circle(img=img, x=1725, y=1155, r=400)

    # Identify objects in the binary image
    cnts, cnts_str = pcv.find_objects(img=img, mask=cleaned)

    # Filter objects by region of interest
    plant_cnt, plant_str, plant_mask, plant_area = pcv.roi_objects(
        img=img,
        roi_contour=roi,
        roi_hierarchy=roi_str,
        object_contour=cnts,
        obj_hierarchy=cnts_str)

    # Combine objects into one
    plant, mask = pcv.object_composition(img=img,
                                         contours=plant_cnt,
                                         hierarchy=plant_str)

    # Measure size and shape properties
    shape_img = pcv.analyze_object(img=img, obj=plant, mask=mask)
    if args.writeimg:
        pcv.print_image(img=shape_img,
                        filename=os.path.join(args.outdir,
                                              "shapes_" + imgname))

    # Analyze color properties
    color_img = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="hsv")
    if args.writeimg:
        pcv.print_image(img=color_img,
                        filename=os.path.join(args.outdir,
                                              "histogram_" + imgname))

    # Save the measurements to a file
    pcv.print_results(filename=args.result)
コード例 #8
0
def main():

    # Set variables
    args = options()
    pcv.params.debug = args.debug

    # Read and rotate image
    img, path, filename = pcv.readimage(filename=args.image)
    img = pcv.rotate(img, -90, False)

    # Create mask from LAB b channel
    l = pcv.rgb2gray_lab(rgb_img=img, channel='b')
    l_thresh = pcv.threshold.binary(gray_img=l,
                                    threshold=115,
                                    max_value=255,
                                    object_type='dark')
    l_mblur = pcv.median_blur(gray_img=l_thresh, ksize=5)

    # Apply mask to image
    masked = pcv.apply_mask(img=img, mask=l_mblur, mask_color='white')
    ab_fill = pcv.fill(bin_img=l_mblur, size=50)

    # Extract plant object from image
    id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=ab_fill)
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked,
                                            x=150,
                                            y=270,
                                            h=100,
                                            w=100)
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    ############### Analysis ################

    # Analyze shape properties
    analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask)
    boundary_image2 = pcv.analyze_bound_horizontal(img=img,
                                                   obj=obj,
                                                   mask=mask,
                                                   line_position=370)

    # Analyze colour properties
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')

    # Analyze shape independent of size
    top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)
    top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)

    # Print results
    pcv.print_results(filename='{}'.format(args.result))
    pcv.print_image(img=color_histogram,
                    filename='{}_color_hist.jpg'.format(args.outdir))
    pcv.print_image(img=kept_mask, filename='{}_mask.jpg'.format(args.outdir))
コード例 #9
0
ファイル: vis.py プロジェクト: sureshacs17/firstapp
boundary_image2 = pcv.analyze_bound_horizontal(img=img,
                                               obj=obj,
                                               mask=mask,
                                               line_position=370)

# In[26]:

# Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)

# Inputs:
#   rgb_img - RGB image data
#   mask - Binary mask of selected contours
#   hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv'
#                    This is the data to be printed to the SVG histogram file
color_histogram = pcv.analyze_color(rgb_img=img,
                                    mask=kept_mask,
                                    hist_plot_type='all')
# Print the histogram out to save it
pcv.print_image(img=color_histogram, filename="upload/Results/color_hist.png")

# In[27]:

# Divide plant object into twenty equidistant bins and assign pseudolandmark points based upon their
# actual (not scaled) position. Once this data is scaled this approach may provide some information
# regarding shape independent of size.

# Inputs:
#   img - RGB or grayscale image data
#   obj - Single or grouped contour object
#   mask - Binary mask of selected contours
top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img,
コード例 #10
0
ファイル: tray_vis.py プロジェクト: leonardblaschek/plantcv
def main():
    # Get options
    args = options()

    # Set variables
    pcv.params.debug = args.debug  # Replace the hard-coded debug with the debug flag
    pcv.params.debug_outdir = args.outdir  # set output directory

    ### Main pipeline ###

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    img, path, filename = pcv.readimage(args.image, mode='rgb')

    # Read reference image for colour correction (currently unused)
    #ref_img, ref_path, ref_filename = pcv.readimage(
    #    "/home/leonard/Dropbox/2020-01_LAC_phenotyping/images/top/renamed/20200128_2.jpg",
    #    mode="rgb")

    # Find colour cards
    #df, start, space = pcv.transform.find_color_card(rgb_img=ref_img)
    #ref_mask = pcv.transform.create_color_card_mask(rgb_img=ref_img, radius=10, start_coord=start, spacing=space, ncols=4, nrows=6)

    df, start, space = pcv.transform.find_color_card(rgb_img=img)
    img_mask = pcv.transform.create_color_card_mask(rgb_img=img,
                                                    radius=10,
                                                    start_coord=start,
                                                    spacing=space,
                                                    ncols=4,
                                                    nrows=6)

    output_directory = "."

    # Correct colour (currently unused)
    #target_matrix, source_matrix, transformation_matrix, corrected_img = pcv.transform.correct_color(ref_img, ref_mask, img, img_mask, output_directory)

    # Check that the colour correction worked (source~target should be strictly linear)
    #pcv.transform.quick_color_check(source_matrix = source_matrix, target_matrix = target_matrix, num_chips = 24)

    # Write the spacing of the colour card to file as size marker
    with open(os.path.join(path, 'output/size_marker_trays.csv'), 'a') as f:
        writer = csv.writer(f)
        writer.writerow([filename, space[0]])

    ### Crop tray ###

    # Define a bounding rectangle around the colour card
    x_cc, y_cc, w_cc, h_cc = cv2.boundingRect(img_mask)
    x_cc = int(round(x_cc - 0.3 * w_cc))
    y_cc = int(round(y_cc - 0.3 * h_cc))
    h_cc = int(round(h_cc * 1.6))
    w_cc = int(round(w_cc * 1.6))

    # Crop out colour card
    start_point = (x_cc, y_cc)
    end_point = (x_cc + w_cc, y_cc + h_cc)
    colour = (0, 0, 0)
    thickness = -1
    card_crop_img = cv2.rectangle(img, start_point, end_point, colour,
                                  thickness)

    # Convert RGB to HSV and extract the value channel
    v = pcv.rgb2gray_hsv(card_crop_img, "v")

    # Threshold the value image
    v_thresh = pcv.threshold.binary(
        v, 100, 255, "light"
    )  # start threshold at 150 with bright corner-markers, 100 without

    # Fill out bright imperfections (siliques and other dirt on the background)
    v_thresh = pcv.fill(
        v_thresh, 100)  # fill at 500 with bright corner-markers, 100 without

    # Create bounding rectangle around the tray
    x, y, w, h = cv2.boundingRect(v_thresh)

    # Crop image to tray
    #crop_img = card_crop_img[y:y+h, x:x+int(w - (w * 0.03))] # crop extra 3% from right because of tray labels
    crop_img = card_crop_img[y:y + h, x:x + w]  # crop symmetrically

    # Save cropped image for quality control
    pcv.print_image(crop_img,
                    filename=path + "/output/" + "cropped" + filename + ".png")

    ### Threshold plants ###

    # Threshold the green-magenta, blue, and hue channels
    a_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[0, 0, 0],
                                             upper_thresh=[255, 108, 255],
                                             channel='LAB')
    b_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[0, 0, 135],
                                             upper_thresh=[255, 255, 255],
                                             channel='LAB')
    h_thresh, _ = pcv.threshold.custom_range(img=crop_img,
                                             lower_thresh=[35, 0, 0],
                                             upper_thresh=[70, 255, 255],
                                             channel='HSV')

    # Join the thresholds (AND)
    ab = pcv.logical_and(b_thresh, a_thresh)
    abh = pcv.logical_and(ab, h_thresh)

    # Fill small objects depending on expected plant size based on DPG (make sure to take the correct file suffix jpg/JPG/jpeg...)
    match = re.search("(\d+).(\d)\.jpg$", filename)

    if int(match.group(1)) < 10:
        abh_clean = pcv.fill(abh, 50)
        print("50")
    elif int(match.group(1)) < 15:
        abh_clean = pcv.fill(abh, 200)
        print("200")
    else:
        abh_clean = pcv.fill(abh, 500)
        print("500")

    # Dilate to close broken borders
    abh_dilated = pcv.dilate(abh_clean, 3, 1)

    # Close holes
    # abh_fill = pcv.fill_holes(abh_dilated) # silly -- removed
    abh_fill = abh_dilated

    # Apply mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(crop_img, abh_fill, "white")

    # Save masked image for quality control
    pcv.print_image(masked,
                    filename=path + "/output/" + "masked" + filename + ".png")

    ### Filter and group contours ###

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(crop_img, abh_fill)

    # Create bounding box with margins to avoid border artifacts
    roi_y = 0 + crop_img.shape[0] * 0.05
    roi_x = 0 + crop_img.shape[0] * 0.05
    roi_h = crop_img.shape[0] - (crop_img.shape[0] * 0.1)
    roi_w = crop_img.shape[1] - (crop_img.shape[0] * 0.1)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(crop_img, roi_y, roi_x,
                                                   roi_h, roi_w)

    # Keep all objects in the bounding box
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=crop_img,
        roi_type='partial',
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy)

    # Cluster the objects by plant
    clusters, contours, hierarchies = pcv.cluster_contours(
        crop_img, roi_objects, roi_obj_hierarchy, 3, 5)

    # Split image into single plants
    out = args.outdir
    #output_path, imgs, masks = pcv.cluster_contour_splitimg(crop_img,
    #                                                        clusters,
    #                                                        contours,
    #                                                        hierarchies,
    #                                                        out,
    #                                                        file = filename)

    ### Analysis ###

    # Approximate the position of the top left plant as grid start
    coord_y = int(
        round(((crop_img.shape[0] / 3) * 0.5) + (crop_img.shape[0] * 0.025)))
    coord_x = int(
        round(((crop_img.shape[1] / 5) * 0.5) + (crop_img.shape[1] * 0.025)))

    # Set the ROI spacing relative to image dimensions
    spc_y = int((round(crop_img.shape[0] - (crop_img.shape[0] * 0.05)) / 3))
    spc_x = int((round(crop_img.shape[1] - (crop_img.shape[1] * 0.05)) / 5))

    # Set the ROI radius relative to image width
    if int(match.group(1)) < 16:
        r = int(round(crop_img.shape[1] / 12.5))
    else:
        r = int(round(crop_img.shape[1] / 20))

    # Make a grid of ROIs at the expected positions of plants
    # This allows for gaps due to dead/not germinated plants, without messing up the plant numbering
    imgs, masks = pcv.roi.multi(img=crop_img,
                                nrows=3,
                                ncols=5,
                                coord=(coord_x, coord_y),
                                radius=r,
                                spacing=(spc_x, spc_y))

    # Loop through the ROIs in the grid
    for i in range(0, len(imgs)):
        # Find objects within the ROI
        filtered_contours, filtered_hierarchy, filtered_mask, filtered_area = pcv.roi_objects(
            img=crop_img,
            roi_type="partial",
            roi_contour=imgs[i],
            roi_hierarchy=masks[i],
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy)
        # Continue only if not empty
        if len(filtered_contours) > 0:
            # Combine objects within each ROI
            plant_contour, plant_mask = pcv.object_composition(
                img=crop_img,
                contours=filtered_contours,
                hierarchy=filtered_hierarchy)

            # Analyse the shape of each plant
            analysis_images = pcv.analyze_object(img=crop_img,
                                                 obj=plant_contour,
                                                 mask=plant_mask)

            pcv.print_image(analysis_images,
                            filename=path + "/output/" + filename + "_" +
                            str(i) + "_analysed.png")

            # Determine color properties
            color_images = pcv.analyze_color(crop_img, plant_mask, "hsv")

            # Watershed plant area to count leaves (computationally intensive, use when needed)
            #watershed_images = pcv.watershed_segmentation(crop_img, plant_mask, 15)

            # Print out a .json file with the analysis data for the plant
            pcv.outputs.save_results(filename=path + "/" + filename + "_" +
                                     str(i) + '.json')

            # Clear the measurements stored globally into the Ouptuts class
            pcv.outputs.clear()
コード例 #11
0
def root():
    				uploaded_file = st.file_uploader("Choose an image...", type="jpg")
    				if uploaded_file is not None:
    					inp = Image.open(uploaded_file)
    					inp.save('input.jpg')
    					img, path, filename = pcv.readimage(filename='input.jpg')
    					image = Image.open('input.jpg')
    					st.image(image, caption='Original Image',use_column_width=True)
                    # Convert RGB to HSV and extract the saturation channel
    # Inputs:
    #   rgb_image - RGB image data 
    #   channel - Split by 'h' (hue), 's' (saturation), or 'v' (value) channel
					
    					s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')
    					pcv.print_image(s, "plant/rgbtohsv.png")
    					image = Image.open('plant/rgbtohsv.png')
    					st.image(image, caption='RGB to HSV', use_column_width=True)
    					s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light')
    					pcv.print_image(s_thresh, "plant/binary_threshold.png")
    					image = Image.open('plant/binary_threshold.png')
    					st.image(image, caption='Binary Threshold',use_column_width=True)
                   
    # Median Blur to clean noise 

    # Inputs: 
    #   gray_img - Grayscale image data 
    #   ksize - Kernel size (integer or tuple), (ksize, ksize) box if integer input,
    #           (n, m) box if tuple input 

    					s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    					pcv.print_image(s_mblur, "plant/Median_blur.png")
    					image = Image.open('plant/Median_blur.png')
    					st.image(image, caption='Median Blur',use_column_width=True)
                    
     # An alternative to using median_blur is gaussian_blur, which applies 
    # a gaussian blur filter to the image. Depending on the image, one 
    # technique may be more effective than others. 

    # Inputs:
    #   img - RGB or grayscale image data
    #   ksize - Tuple of kernel size
    #   sigma_x - Standard deviation in X direction; if 0 (default), 
    #            calculated from kernel size
    #   sigma_y - Standard deviation in Y direction; if sigmaY is 
    #            None (default), sigmaY is taken to equal sigmaX
                
    					gaussian_img = pcv.gaussian_blur(img=s_thresh, ksize=(5, 5), sigma_x=0, sigma_y=None)
    # Convert RGB to LAB and extract the blue channel ('b')

    # Input:
    #   rgb_img - RGB image data 
    #   channel- Split by 'l' (lightness), 'a' (green-magenta), or 'b' (blue-yellow) channel
    					b = pcv.rgb2gray_lab(rgb_img=img, channel='b')
    					b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, 
                                object_type='light')
                     # Join the threshold saturation and blue-yellow images with a logical or operation 

    # Inputs: 
    #   bin_img1 - Binary image data to be compared to bin_img2
    #   bin_img2 - Binary image data to be compared to bin_img1

    
    					bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_thresh)
    					pcv.print_image(bs, "plant/threshold comparison.png")
    					image = Image.open('plant/threshold comparison.png')
    					st.image(image, caption='Threshold Comparision',use_column_width=True)
                    
 # Appy Mask (for VIS images, mask_color='white')

    # Inputs:
    #   img - RGB or grayscale image data 
    #   mask - Binary mask image data 
    #   mask_color - 'white' or 'black' 
    
    					masked = pcv.apply_mask(img=img, mask=bs, mask_color='white')
    					pcv.print_image(masked, "plant/Apply_mask.png")
    					image = Image.open('plant/Apply_mask.png')
    					st.image(image, caption='Applied Mask',use_column_width=True)
                   # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    					masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    					masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')
                     # Threshold the green-magenta and blue images
    					maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark')
    					maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135,max_value=255, object_type='light')
    					maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light')
    					pcv.print_image( maskeda_thresh, "plant/maskeda_thresh.png")
    					pcv.print_image(maskeda_thresh1, "plant/maskeda_thresh1.png")
    					pcv.print_image(maskedb_thresh, "plant/maskedb_thresh1.png")
  
    					image = Image.open('plant/maskeda_thresh.png')
    					st.image(image, caption='Threshold green-magneta and blue image',use_column_width=True)


   # Join the thresholded saturation and blue-yellow images (OR)
    					ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    					ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)
        # Opening filters out bright noise from an image.

# Inputs:
#   gray_img - Grayscale or binary image data
#   kernel - Optional neighborhood, expressed as an array of 1's and 0's. If None (default),
#   uses cross-shaped structuring element.
    					opened_ab = pcv.opening(gray_img=ab)

# Depending on the situation it might be useful to use the 
# exclusive or (pcv.logical_xor) function. 

# Inputs: 
#   bin_img1 - Binary image data to be compared to bin_img2
#   bin_img2 - Binary image data to be compared to bin_img1
    					xor_img = pcv.logical_xor(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
# Fill small objects (reduce image noise) 

# Inputs: 
#   bin_img - Binary image data 
#   size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled
    					ab_fill = pcv.fill(bin_img=ab, size=200)
# Closing filters out dark noise from an image.

# Inputs:
#   gray_img - Grayscale or binary image data
#   kernel - Optional neighborhood, expressed as an array of 1's and 0's. If None (default),
#   uses cross-shaped structuring element.
    					closed_ab = pcv.closing(gray_img=ab_fill)
# Apply mask (for VIS images, mask_color=white)
    					masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white')
# Identify objects
# Inputs: 
#   img - RGB or grayscale image data for plotting 
#   mask - Binary mask used for detecting contours 
    					id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)
# Define the region of interest (ROI) 
# Inputs: 
#   img - RGB or grayscale image to plot the ROI on 
#   x - The x-coordinate of the upper left corner of the rectangle 
#   y - The y-coordinate of the upper left corner of the rectangle 
#   h - The height of the rectangle 
#   w - The width of the rectangle 
    					roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=50, y=50, h=100, w=100)
# Decide which objects to keep
# Inputs:
#    img            = img to display kept objects
#    roi_contour    = contour of roi, output from any ROI function
#    roi_hierarchy  = contour of roi, output from any ROI function
#    object_contour = contours of objects, output from pcv.find_objects function
#    obj_hierarchy  = hierarchy of objects, output from pcv.find_objects function
#    roi_type       = 'partial' (default, for partially inside the ROI), 'cutto', or 
#                     'largest' (keep only largest contour)
    					roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, 
                                                               roi_hierarchy=roi_hierarchy, 
                                                               object_contour=id_objects, 
                                                               obj_hierarchy=obj_hierarchy,
                                                               roi_type='partial')
# Object combine kept objects
# Inputs:
#   img - RGB or grayscale image data for plotting 
#   contours - Contour list 
#   hierarchy - Contour hierarchy array 
    					obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)
############### Analysis ################ 
# Find shape properties, data gets stored to an Outputs class automatically
# Inputs:
#   img - RGB or grayscale image data 
#   obj- Single or grouped contour object
#   mask - Binary image mask to use as mask for moments analysis 
    					analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask)
    					pcv.print_image(analysis_image, "plant/analysis_image.png")
    					image = Image.open('plant/analysis_image.png')
    					st.image(image, caption='Analysis_image',use_column_width=True)
# Shape properties relative to user boundary line (optional)
# Inputs:
#   img - RGB or grayscale image data 
#   obj - Single or grouped contour object 
#   mask - Binary mask of selected contours 
#   line_position - Position of boundary line (a value of 0 would draw a line 
#                   through the bottom of the image) 
    					boundary_image2 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, 
                                               line_position=370)
    					pcv.print_image(boundary_image2, "plant/boundary_image2.png")
    					image = Image.open('plant/boundary_image2.png')
    					st.image(image, caption='Boundary Image',use_column_width=True)
# Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
# Inputs:
#   rgb_img - RGB image data
#   mask - Binary mask of selected contours 
#   hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv'
#                    This is the data to be printed to the SVG histogram file  
    					color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all')
# Print the histogram out to save it 
    					pcv.print_image(img=color_histogram, filename="plant/vis_tutorial_color_hist.jpg")
    					image = Image.open('plant/vis_tutorial_color_hist.jpg')
    					st.image(image, caption='Color Histogram',use_column_width=True)
# Divide plant object into twenty equidistant bins and assign pseudolandmark points based upon their 
# actual (not scaled) position. Once this data is scaled this approach may provide some information 
# regarding shape independent of size.
# Inputs:
#   img - RGB or grayscale image data 
#   obj - Single or grouped contour object 
#   mask - Binary mask of selected contours 
    					top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img, obj=obj, mask=mask)
    					top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask)
# The print_results function will take the measurements stored when running any (or all) of these functions, format, 
# and print an output text file for data analysis. The Outputs class stores data whenever any of the following functions
# are ran: analyze_bound_horizontal, analyze_bound_vertical, analyze_color, analyze_nir_intensity, analyze_object, 
# fluor_fvfm, report_size_marker_area, watershed. If no functions have been run, it will print an empty text file 
    					pcv.print_results(filename='vis_tutorial_results.txt')
コード例 #12
0
def main():
    # Get options
    args = options()

    pcv.params.debug = args.debug  # set debug mode
    pcv.params.debug_outdir = args.outdir  # set output directory

    # Read image
    img, path, filename = pcv.readimage(filename=args.image)

    # Convert RGB to HSV and extract the value channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='v')

    # Threshold the saturation image removing highs and lows and join
    s_thresh_1 = pcv.threshold.binary(gray_img=s,
                                      threshold=10,
                                      max_value=255,
                                      object_type='light')
    s_thresh_2 = pcv.threshold.binary(gray_img=s,
                                      threshold=245,
                                      max_value=255,
                                      object_type='dark')
    s_thresh = pcv.logical_and(bin_img1=s_thresh_1, bin_img2=s_thresh_2)

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_cnt = pcv.threshold.binary(gray_img=b,
                                 threshold=128,
                                 max_value=255,
                                 object_type='light')

    # Fill small objects
    b_fill = pcv.fill(b_cnt, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_fill)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(rgb_img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=127,
                                          max_value=255,
                                          object_type='dark')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    w = 1600
    h = 1200
    pot = 230  #340
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=(2472 - w) / 2,
                                            y=(3296 - h - pot),
                                            h=h,
                                            w=w)

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    # Find shape properties, output shape image (optional)
    shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

    if args.writeimg == True:
        pcv.print_image(img=shape_imgs,
                        filename="{}_shape.png".format(args.result[:-5]))
        pcv.print_image(img=masked2,
                        filename="{}_obj_on_img.png".format(args.result[:-5]))

    # Shape properties relative to user boundary line (optional)
    #boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680)

    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')

    # Pseudocolor the grayscale image
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s,
                                                  mask=kept_mask,
                                                  cmap='jet')

    # Write shape and color data to results file
    pcv.print_results(filename=args.result)
def main():
    # Get options
    args = options()

    pcv.params.debug = args.debug  # set debug mode
    pcv.params.debug_outdir = args.outdir  # set output directory

    # Read metadata
    with open(args.metadata, 'r', encoding='utf-8') as f:
        md = json.load(f)

    camera_label = md['camera_label']

    # Read image
    img, path, filename = pcv.readimage(filename=args.image)

    # Convert RGB to HSV and extract the value channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='v')

    # Threshold the saturation image removing highs and lows and join
    s_thresh_1 = pcv.threshold.binary(gray_img=s,
                                      threshold=10,
                                      max_value=255,
                                      object_type='light')
    s_thresh_2 = pcv.threshold.binary(gray_img=s,
                                      threshold=245,
                                      max_value=255,
                                      object_type='dark')
    s_thresh = pcv.logical_and(bin_img1=s_thresh_1, bin_img2=s_thresh_2)

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_cnt = pcv.threshold.binary(gray_img=b,
                                 threshold=128,
                                 max_value=255,
                                 object_type='light')

    # Fill small objects
    b_fill = pcv.fill(b_cnt, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_fill)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(rgb_img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    # Threshold the green-magenta and blue images

    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=127,
                                          max_value=255,
                                          object_type='dark')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    # Threshold the green-magenta and blue images
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI

    W = 2472
    H = 3296

    if "far" in camera_label:
        # SIDE FAR
        w = 1600
        h = 1200
        pot = 230  #340
        roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                                x=(W - w) / 2,
                                                y=(H - h - pot),
                                                h=h,
                                                w=w)
    elif "lower" in camera_label:
        # SIDE LOWER
        w = 800
        h = 2400
        pot = 340
        roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                                x=1000 - w / 2,
                                                y=(H - h - pot),
                                                h=h,
                                                w=w)
    elif "upper" in camera_label:
        # SIDE UPPER
        w = 600
        h = 800
        pot = 550
        roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                                x=1400 - w / 2,
                                                y=(H - h - pot),
                                                h=h,
                                                w=w)
    elif "top" in camera_label:
        # TOP
        w = 450
        h = 450
        roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                                x=(H - h) / 2,
                                                y=(W - w) / 2,
                                                h=h,
                                                w=w)

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    #TODO: Update for plantCV metadata import
    for key in md.keys():
        if str(md[key]).isdigit():
            pcv.outputs.add_observation(variable=key,
                                        trait=key,
                                        method='',
                                        scale='',
                                        datatype=int,
                                        value=md[key],
                                        label='')
        else:
            pcv.outputs.add_observation(variable=key,
                                        trait=key,
                                        method='',
                                        scale='',
                                        datatype=str,
                                        value=md[key],
                                        label='')

    if obj is not None:

        # Find shape properties, output shape image (optional)
        shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

        # Shape properties relative to user boundary line (optional)
        #boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680)

        # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)
        color_histogram = pcv.analyze_color(rgb_img=img,
                                            mask=kept_mask,
                                            hist_plot_type='all')

        # Pseudocolor the grayscale image
        #pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=kept_mask, cmap='jet')

        #print(pcv.outputs.images)
        if args.writeimg == True:
            for idx, item in enumerate(pcv.outputs.images[0]):
                pcv.print_image(item,
                                "{}_{}.png".format(args.result[:-5], idx))

    # Write shape and color data to results file
    pcv.print_results(filename=args.result)
コード例 #14
0
def main():
    # Get options
    args = options()

    pcv.params.debug = args.debug  # set debug mode
    pcv.params.debug_outdir = args.outdir  # set output directory

    # Read image
    img, path, filename = pcv.readimage(filename=args.image)

    # Convert RGB to HSV and extract the saturation channel
    h = pcv.rgb2gray_hsv(rgb_img=img, channel='h')

    # Threshold the saturation image
    h_thresh = pcv.threshold.binary(gray_img=h,
                                    threshold=85,
                                    max_value=255,
                                    object_type='dark')

    # Median Blur
    h_mblur = pcv.median_blur(gray_img=h_thresh, ksize=20)

    h_cnt = pcv.median_blur(gray_img=h_thresh, ksize=20)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_thresh = pcv.threshold.binary(gray_img=b,
                                    threshold=160,
                                    max_value=255,
                                    object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b,
                                 threshold=160,
                                 max_value=255,
                                 object_type='light')

    # Fill small objects
    # b_fill = pcv.fill(b_thresh, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=h_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=115,
                                          max_value=255,
                                          object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a,
                                           threshold=135,
                                           max_value=255,
                                           object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=400,
                                            y=400,
                                            h=200,
                                            w=200)

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    ############### Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

    # Shape properties relative to user boundary line (optional)
    boundary_img1 = pcv.analyze_bound_horizontal(img=img,
                                                 obj=obj,
                                                 mask=mask,
                                                 line_position=600)

    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')

    # Pseudocolor the grayscale image
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=h,
                                                  mask=kept_mask,
                                                  cmap='jet')

    # Write shape and color data to results file
    pcv.print_results(filename=args.result)
コード例 #15
0
def main():
    # create options object for argument parsing
    args = options()
    # set device
    device = 0
    # set debug
    pcv.params.debug = args.debug

    outfile = False
    if args.writeimg:
        outfile = os.path.join(args.outdir, os.path.basename(args.image)[:-4])

    # read in image
    img, path, filename = pcv.readimage(filename=args.image, debug=args.debug)

    # read in a background image for each zoom level
    config_file = open(args.bkg, 'r')
    config = json.load(config_file)
    config_file.close()
    if "z1500" in args.image:
        bkg_image = config["z1500"]
    elif "z2500" in args.image:
        bkg_image = config["z2500"]
    else:
        pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image))

    bkg, bkg_path, bkg_filename = pcv.readimage(filename=bkg_image, debug=args.debug)

    # Detect edges in the background image
    device, bkg_sat = pcv.rgb2gray_hsv(img=bkg, channel="s", device=device, debug=args.debug)
    device += 1
    bkg_edges = feature.canny(bkg_sat)
    if args.debug == "print":
        pcv.print_image(img=bkg_edges, filename=str(device) + '_background_edges.png')
    elif args.debug == "plot":
        pcv.plot_image(img=bkg_edges, cmap="gray")

    # Close background edge contours
    bkg_edges_closed = ndi.binary_closing(bkg_edges)
    device += 1
    if args.debug == "print":
        pcv.print_image(img=bkg_edges_closed, filename=str(device) + '_closed_background_edges.png')
    elif args.debug == "plot":
        pcv.plot_image(img=bkg_edges_closed, cmap="gray")

    # Fill in closed contours in background
    bkg_fill_contours = ndi.binary_fill_holes(bkg_edges_closed)
    device += 1
    if args.debug == "print":
        pcv.print_image(img=bkg_fill_contours, filename=str(device) + '_filled_background_edges.png')
    elif args.debug == "plot":
        pcv.plot_image(img=bkg_fill_contours, cmap="gray")

    # Naive Bayes image classification/segmentation
    device, mask = pcv.naive_bayes_classifier(img=img, pdf_file=args.pdf, device=device, debug=args.debug)

    # Do a light cleaning of the plant mask to remove small objects
    cleaned = morphology.remove_small_objects(mask["plant"].astype(bool), 2)
    device += 1
    if args.debug == "print":
        pcv.print_image(img=cleaned, filename=str(device) + '_cleaned_mask.png')
    elif args.debug == "plot":
        pcv.plot_image(img=cleaned, cmap="gray")

    # Convert the input image to a saturation channel grayscale image
    device, sat = pcv.rgb2gray_hsv(img=img, channel="s", device=device, debug=args.debug)

    # Detect edges in the saturation image
    edges = feature.canny(sat)
    device += 1
    if args.debug == "print":
        pcv.print_image(img=edges, filename=str(device) + '_plant_edges.png')
    elif args.debug == "plot":
        pcv.plot_image(img=edges, cmap="gray")

    # Combine pixels that are in both foreground edges and the filled background edges
    device, combined_bkg = pcv.logical_and(img1=edges.astype(np.uint8) * 255,
                                           img2=bkg_fill_contours.astype(np.uint8) * 255, device=device,
                                           debug=args.debug)

    # Remove background pixels from the foreground edges
    device += 1
    filtered = np.copy(edges)
    filtered[np.where(combined_bkg == 255)] = False
    if args.debug == "print":
        pcv.print_image(img=filtered, filename=str(device) + '_filtered_edges.png')
    elif args.debug == "plot":
        pcv.plot_image(img=filtered, cmap="gray")

    # Combine the cleaned naive Bayes mask and the filtered foreground edges
    device += 1
    combined = cleaned + filtered
    if args.debug == "print":
        pcv.print_image(img=combined, filename=str(device) + '_combined_foreground.png')
    elif args.debug == "plot":
        pcv.plot_image(img=combined, cmap="gray")

    # Close off broken edges and other incomplete contours
    device += 1
    closed_features = ndi.binary_closing(combined, structure=np.ones((3, 3)))
    if args.debug == "print":
        pcv.print_image(img=closed_features, filename=str(device) + '_closed_features.png')
    elif args.debug == "plot":
        pcv.plot_image(img=closed_features, cmap="gray")

    # Fill in holes in contours
    # device += 1
    # fill_contours = ndi.binary_fill_holes(closed_features)
    # if args.debug == "print":
    #     pcv.print_image(img=fill_contours, filename=str(device) + '_filled_contours.png')
    # elif args.debug == "plot":
    #     pcv.plot_image(img=fill_contours, cmap="gray")

    # Use median blur to break horizontal and vertical thin edges (pot edges)
    device += 1
    blurred_img = ndi.median_filter(closed_features.astype(np.uint8) * 255, (3, 1))
    blurred_img = ndi.median_filter(blurred_img, (1, 3))
    # Remove small objects left behind by blurring
    cleaned2 = morphology.remove_small_objects(blurred_img.astype(bool), 200)
    if args.debug == "print":
        pcv.print_image(img=cleaned2, filename=str(device) + '_cleaned_by_median_blur.png')
    elif args.debug == "plot":
        pcv.plot_image(img=cleaned2, cmap="gray")

    # Define region of interest based on camera zoom level for masking the naive Bayes classified image
    # if "z1500" in args.image:
    #     h = 1000
    # elif "z2500" in args.image:
    #     h = 1050
    # else:
    #     pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image))
    # roi, roi_hierarchy = pcv.roi.rectangle(x=300, y=150, w=1850, h=h, img=img)

    # Mask the classified image to remove noisy areas prior to finding contours
    # side_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8)
    # cv2.drawContours(side_mask, roi, -1, (255), -1)
    # device, masked_img = pcv.apply_mask(img=cv2.merge([mask["plant"], mask["plant"], mask["plant"]]), mask=side_mask,
    #                                     mask_color="black", device=device, debug=args.debug)
    # Convert the masked image back to grayscale
    # masked_img = masked_img[:, :, 0]
    # Close off contours at the base of the plant
    # if "z1500" in args.image:
    #     pt1 = (1100, 1118)
    #     pt2 = (1340, 1120)
    # elif "z2500" in args.image:
    #     pt1 = (1020, 1162)
    #     pt2 = (1390, 1166)
    # else:
    #     pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image))
    # masked_img = cv2.rectangle(np.copy(masked_img), pt1, pt2, (255), -1)
    # closed_mask = ndi.binary_closing(masked_img.astype(bool), iterations=3)

    # Find objects in the masked naive Bayes mask
    # device, objects, obj_hierarchy = pcv.find_objects(img=img, mask=np.copy(masked_img), device=device,
    #                                                   debug=args.debug)
    # objects, obj_hierarchy = cv2.findContours(np.copy(closed_mask.astype(np.uint8) * 255), cv2.RETR_CCOMP,
    #                                           cv2.CHAIN_APPROX_NONE)[-2:]

    # Clean up the combined plant edges/mask image by removing filled in gaps/holes
    # device += 1
    # cleaned3 = np.copy(cleaned2)
    # cleaned3 = cleaned3.astype(np.uint8) * 255
    # # Loop over the contours from the naive Bayes mask
    # for c, contour in enumerate(objects):
    #     # Calculate the area of each contour
    #     # area = cv2.contourArea(contour)
    #     # If the contour is a hole (i.e. it has no children and it has a parent)
    #     # And it is not a small hole in a leaf that was not classified
    #     if obj_hierarchy[0][c][2] == -1 and obj_hierarchy[0][c][3] > -1:
    #         # Then fill in the contour (hole) black on the cleaned mask
    #         cv2.drawContours(cleaned3, objects, c, (0), -1, hierarchy=obj_hierarchy)
    # if args.debug == "print":
    #     pcv.print_image(img=cleaned3, filename=str(device) + '_gaps_removed.png')
    # elif args.debug == "plot":
    #     pcv.plot_image(img=cleaned3, cmap="gray")

    # Find contours using the cleaned mask
    device, contours, contour_hierarchy = pcv.find_objects(img=img, mask=np.copy(cleaned2.astype(np.uint8)),
                                                           device=device, debug=args.debug)

    # Define region of interest based on camera zoom level for contour filtering
    if "z1500" in args.image:
        h = 940
    elif "z2500" in args.image:
        h = 980
    else:
        pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image))
    roi, roi_hierarchy = pcv.roi.rectangle(x=300, y=150, w=1850, h=h, img=img)

    # Filter contours in the region of interest
    device, roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img, roi_type='partial', roi_contour=roi,
                                                                          roi_hierarchy=roi_hierarchy,
                                                                          object_contour=contours,
                                                                          obj_hierarchy=contour_hierarchy,
                                                                          device=device, debug=args.debug)

    # Analyze only images with plants present
    if len(roi_objects) > 0:
        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy,
                                                                   device=device, debug=args.debug)

        if args.writeimg:
            # Save the plant mask if requested
            pcv.print_image(img=plant_mask, filename=outfile + "_mask.png")

        # Find shape properties, output shape image
        device, shape_header, shape_data, shape_img = pcv.analyze_object(img=img, imgname=args.image, obj=plant_contour,
                                                                         mask=plant_mask, device=device,
                                                                         debug=args.debug,
                                                                         filename=outfile)
        # Set the boundary line based on the camera zoom level
        if "z1500" in args.image:
            line_position = 930
        elif "z2500" in args.image:
            line_position = 885
        else:
            pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image))

        # Shape properties relative to user boundary line
        device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound_horizontal(img=img, obj=plant_contour,
                                                                                            mask=plant_mask,
                                                                                            line_position=line_position,
                                                                                            device=device,
                                                                                            debug=args.debug,
                                                                                            filename=outfile)

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images
        device, color_header, color_data, color_img = pcv.analyze_color(img=img, imgname=args.image, mask=plant_mask,
                                                                        bins=256,
                                                                        device=device, debug=args.debug,
                                                                        hist_plot_type=None,
                                                                        pseudo_channel="v", pseudo_bkg="img",
                                                                        resolution=300,
                                                                        filename=outfile)

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        result.write('\t'.join(map(str, boundary_header)) + "\n")
        result.write('\t'.join(map(str, boundary_data)) + "\n")
        result.write('\t'.join(map(str, boundary_img)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()
コード例 #16
0
def main():
    
    # Get options
    args = options()
    
    # Set variables
    pcv.params.debug = args.debug        # Replace the hard-coded debug with the debug flag
    img_file = args.image     # Replace the hard-coded input image with image flag

    ############### Image read-in ################

    # Read target image
    img, path, filename = pcv.readimage(filename = img_file, mode = "rgb")
    
    ############### Find scale and crop ################
    
    # find colour card in the image to be analysed
    df, start, space = pcv.transform.find_color_card(rgb_img = img)
    if int(start[0]) < 2000:
            img = imutils.rotate_bound(img, -90)
            rotated = 1
            df, start, space = pcv.transform.find_color_card(rgb_img = img)
    else: rotated = 0
    #if img.shape[0] > 6000:
    #    rotated = 1
    #else: rotated = 0
    img_mask = pcv.transform.create_color_card_mask(rgb_img = img, radius = 10, start_coord = start, spacing = space, ncols = 4, nrows = 6)
    
    # write the spacing of the colour card to file as size marker   
    with open(r'size_marker.csv', 'a') as f:
        writer = csv.writer(f)
        writer.writerow([filename, space[0]])

    # define a bounding rectangle around the colour card
    x_cc,y_cc,w_cc,h_cc = cv2.boundingRect(img_mask)
    x_cc = int(round(x_cc - 0.3 * w_cc))
    y_cc = int(round(y_cc - 0.3 * h_cc))
    h_cc = int(round(h_cc * 1.6))
    w_cc = int(round(w_cc * 1.6))

    # crop out colour card
    start_point = (x_cc, y_cc)
    end_point = (x_cc+w_cc, y_cc+h_cc)
    colour = (0, 0, 0)
    thickness = -1
    crop_img = cv2.rectangle(img, start_point, end_point, colour, thickness)
    
    ############### Fine segmentation ################
    
    # Threshold A and B channels of the LAB colourspace and the Hue channel of the HSV colourspace
    l_thresh, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[70,0,0], upper_thresh=[255,255,255], channel='LAB')
    a_thresh, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[0,0,0], upper_thresh=[255,145,255], channel='LAB')
    b_thresh, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[0,0,123], upper_thresh=[255,255,255], channel='LAB')
    h_thresh_low, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[0,0,0], upper_thresh=[130,255,255], channel='HSV')
    h_thresh_high, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[150,0,0], upper_thresh=[255,255,255], channel='HSV')
    h_thresh = pcv.logical_or(h_thresh_low, h_thresh_high)

    # Join the thresholded images to keep only consensus pixels
    ab = pcv.logical_and(b_thresh, a_thresh)
    lab = pcv.logical_and(l_thresh, ab)
    labh = pcv.logical_and(lab, h_thresh)

    # Fill small objects
    labh_clean = pcv.fill(labh, 200)

    # Dilate to close broken borders
    #labh_dilated = pcv.dilate(labh_clean, 4, 1)
    labh_dilated = labh_clean

    # Apply mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(crop_img, labh_dilated, "white")

    # Identify objects
    contours, hierarchy = pcv.find_objects(crop_img, labh_dilated)

    # Define ROI

    if rotated == 1:
        roi_height = 3000
        roi_lwr_bound = y_cc + (h_cc * 0.5) - roi_height
        roi_contour, roi_hierarchy= pcv.roi.rectangle(x=1000, y=roi_lwr_bound, h=roi_height, w=2000, img=crop_img)
    else:
        roi_height = 1500
        roi_lwr_bound = y_cc + (h_cc * 0.5) - roi_height
        roi_contour, roi_hierarchy= pcv.roi.rectangle(x=2000, y=roi_lwr_bound, h=roi_height, w=2000, img=crop_img)

    # Decide which objects to keep
    filtered_contours, filtered_hierarchy, mask, area = pcv.roi_objects(img = crop_img,
                                                                roi_type = 'partial',
                                                                roi_contour = roi_contour,
                                                                roi_hierarchy = roi_hierarchy,
                                                                object_contour = contours,
                                                                obj_hierarchy = hierarchy)
    # Combine kept objects
    obj, mask = pcv.object_composition(crop_img, filtered_contours, filtered_hierarchy)

    ############### Analysis ################

    outfile=False
    if args.writeimg==True:
        outfile_black=args.outdir+"/"+filename+"_black"
        outfile_white=args.outdir+"/"+filename+"_white"
        outfile_analysed=args.outdir+"/"+filename+"_analysed"

    # analyse shape
    shape_img = pcv.analyze_object(crop_img, obj, mask)
    pcv.print_image(shape_img, outfile_analysed)

    # analyse colour
    colour_img = pcv.analyze_color(crop_img, mask, 'hsv')

    # keep the segmented plant for visualisation
    picture_mask = pcv.apply_mask(crop_img, mask, "black")
    pcv.print_image(picture_mask, outfile_black)
    
    picture_mask = pcv.apply_mask(crop_img, mask, "white")
    pcv.print_image(picture_mask, outfile_white)

    # print out results
    pcv.outputs.save_results(filename=args.result, outformat="json")
コード例 #17
0
def main():

    # Get options
    pcv.params.debug = args.debug  #set debug mode
    pcv.params.debug_outdir = args.outdir  #set output directory

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    # Inputs:
    #   filename - Image file to be read in
    #   mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv'
    img, path, filename = pcv.readimage(filename=args.image, mode='rgb')

    ### SELECTING THE PLANT

    ### Attempt 5 combineren
    # Parameters
    hue_lower_tresh = 22  # 24
    hue_higher_tresh = 50  # 50
    saturation_lower_tresh = 138  # 140
    saturation_higher_tresh = 230  # 230
    value_lower_tresh = 120  # 125
    value_higher_tresh = 255  # 255
    # RGB color space
    green_lower_tresh = 105  # 110
    green_higher_tresh = 255  # 255
    red_lower_tresh = 22  # 24
    red_higher_thresh = 98  # 98
    blue_lower_tresh = 85  # 85
    blue_higher_tresh = 253  # 255
    # CIELAB color space
    #lab_blue_lower_tresh = 0            # Blue yellow channel
    #lab_blue_higher_tresh = 255

    s = pcv.rgb2gray_hsv(rgb_img=img, channel='h')
    mask, masked_image = pcv.threshold.custom_range(
        rgb_img=s,
        lower_thresh=[hue_lower_tresh],
        upper_thresh=[hue_higher_tresh],
        channel='gray')
    masked = pcv.apply_mask(rgb_img=img, mask=mask, mask_color='white')
    # Filtered on Hue
    s = pcv.rgb2gray_hsv(rgb_img=masked, channel='s')
    mask, masked_image = pcv.threshold.custom_range(
        rgb_img=s,
        lower_thresh=[saturation_lower_tresh],
        upper_thresh=[saturation_higher_tresh],
        channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on saturation
    s = pcv.rgb2gray_hsv(rgb_img=masked, channel='v')
    mask, masked_image = pcv.threshold.custom_range(
        rgb_img=s,
        lower_thresh=[value_lower_tresh],
        upper_thresh=[value_higher_tresh],
        channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on value
    mask, masked = pcv.threshold.custom_range(
        rgb_img=masked,
        lower_thresh=[0, green_lower_tresh, 0],
        upper_thresh=[255, green_higher_tresh, 255],
        channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on green
    mask, masked = pcv.threshold.custom_range(
        rgb_img=masked,
        lower_thresh=[red_lower_tresh, 0, 0],
        upper_thresh=[red_higher_thresh, 255, 255],
        channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on red
    mask_old, masked_old = pcv.threshold.custom_range(
        rgb_img=masked,
        lower_thresh=[0, 0, blue_lower_tresh],
        upper_thresh=[255, 255, blue_higher_tresh],
        channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked_old,
                            mask=mask_old,
                            mask_color='white')
    #filtered on blue
    #b = pcv.rgb2gray_lab(rgb_img = masked, channel = 'b')   # Converting toe CIElab blue_yellow image
    #b_thresh =pcv.threshold.binary(gray_img = b, threshold=lab_blue_lower_tresh, max_value = lab_blue_higher_tresh)

    ###_____________________________________ Now to identify objects
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(
        gray_img=masked_a,
        threshold=125,  # original 115
        max_value=255,
        object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(
        gray_img=masked_a,
        threshold=140,  # original 135
        max_value=255,
        object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    # Inputs:
    #   bin_img - Binary image data
    #   size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled
    ab = pcv.median_blur(gray_img=ab, ksize=3)
    ab_fill = pcv.fill(bin_img=ab, size=1000)
    #print("filled")
    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')
    # ID the objects
    id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill)
    # Let's just take the largest
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=0,
                                            y=0,
                                            h=960,
                                            w=1280)  # Currently hardcoded

    # Decide which objects to keep
    # Inputs:
    #    img            = img to display kept objects
    #    roi_contour    = contour of roi, output from any ROI function
    #    roi_hierarchy  = contour of roi, output from any ROI function
    #    object_contour = contours of objects, output from pcv.find_objects function
    #    obj_hierarchy  = hierarchy of objects, output from pcv.find_objects function
    #    roi_type       = 'partial' (default, for partially inside), 'cutto', or
    #    'largest' (keep only largest contour)
    with HiddenPrints():
        roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
            img=img,
            roi_contour=roi1,
            roi_hierarchy=roi_hierarchy,
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy,
            roi_type='partial')
    # Object combine kept objects
    # Inputs:
    #   img - RGB or grayscale image data for plotting
    #   contours - Contour list
    #   hierarchy - Contour hierarchy array
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)
    #print("final plant")
    new_im = Image.fromarray(masked2)
    new_im.save("output//" + args.filename + "last_masked.png")

    ##################_________________ Analysis

    outfile = args.outdir + "/" + filename
    # Here come all the analyse functions.
    # pcv.acute_vertex(img, obj, 30, 15, 100)

    color_img = pcv.analyze_color(rgb_img=img,
                                  mask=kept_mask,
                                  hist_plot_type=None)
    #new_im = Image.fromarray(color_img)
    #new_im.save(args.filename + "color_img.png")

    # Find shape properties, output shape image (optional)

    # Inputs:
    #   img - RGB or grayscale image data
    #   obj- Single or grouped contour object
    #   mask - Binary image mask to use as mask for moments analysis
    shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask)
    new_im = Image.fromarray(shape_img)
    new_im.save("output//" + args.filename + "shape_img.png")
    # Shape properties relative to user boundary line (optional)

    # Inputs:
    #   img - RGB or grayscale image data
    #   obj - Single or grouped contour object
    #   mask - Binary mask of selected contours
    #   line_position - Position of boundary line (a value of 0 would draw a line
    #                   through the bottom of the image)
    boundary_img1 = pcv.analyze_bound_horizontal(img=img,
                                                 obj=obj,
                                                 mask=mask,
                                                 line_position=1680)
    new_im = Image.fromarray(boundary_img1)
    new_im.save("output//" + args.filename + "boundary_img.png")
    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)

    # Inputs:
    #   rgb_img - RGB image data
    #   mask - Binary mask of selected contours
    #   hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv'
    #                    This is the data to be printed to the SVG histogram file
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')
    #new_im = Image.fromarray(color_histogram)
    #new_im.save(args.filename + "color_histogram_img.png")

    # Pseudocolor the grayscale image

    # Inputs:
    #     gray_img - Grayscale image data
    #     obj - Single or grouped contour object (optional), if provided the pseudocolored image gets
    #           cropped down to the region of interest.
    #     mask - Binary mask (optional)
    #     background - Background color/type. Options are "image" (gray_img, default), "white", or "black". A mask
    #                  must be supplied.
    #     cmap - Colormap
    #     min_value - Minimum value for range of interest
    #     max_value - Maximum value for range of interest
    #     dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi).
    #     axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True).
    #     colorbar - If False then the colorbar won't be displayed (default colorbar=True)
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s,
                                                  mask=kept_mask,
                                                  cmap='jet')
    #new_im = Image.fromarray(pseudocolored_img)
    #new_im.save(args.filename + "pseudocolored.png")

    # Write shape and color data to results file
    pcv.print_results(filename=args.result)
コード例 #18
0
def iterate_rois(img, c, h, rc, rh, args, masked=True, gi=False, shape=False, hist=True, hue=False):
    """Analyze each ROI separately and store results

    Parameters
    ----------
    img : ndarray
        rgb image
    c : list
        object contours
    h : list
        object countour hierarchy
    rc : list
        roi contours
    rh : list
        roi contour hierarchy
    threshold_mask : ndarray
        binary image (mask) from threshold steps
    args : dict
        commandline arguments and metadata from running the workflow
    masked : boolean
        whether to print masked rgb images for each roi
    gi : boolean
        whether to print greenness index false color
    shape : boolean
        whether to print object shapes on an image
    hist : boolean
        whether to print color histogram
    hue : boolean
        whether to save hsv color info and print the hue false color image

    Returns
    -------
        binary image of plant mask that includes both threshold and roi filter steps : ndarray

    """

    final_mask = np.zeros(shape=np.shape(img)[0:2], dtype='uint8')

    # Compute greenness
    if gi:
        img_gi = cppc.compute.greenness_index(img=img, mask=final_mask+1)

    if hue:
        img_h = pcv.rgb2gray_hsv(img, 'h')

    for i, rc_i in enumerate(rc):
        rh_i = rh[i]

        # Add ROI number to output. Before roi_objects so result has NA if no object.
        pcv.outputs.add_observation(
            sample='default',
            variable='roi',
            trait='roi',
            method='roi',
            scale='int',
            datatype=int,
            value=i,
            label='#')

        roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
            img, roi_contour=rc_i, roi_hierarchy=rh_i, object_contour=c, obj_hierarchy=h, roi_type='partial')

        if obj_area == 0:

            print('\t!!! No object found in ROI', str(i))
            pcv.outputs.add_observation(
                sample='default',
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=cppc.pixelresolution,
                datatype="<class 'float'>",
                value=0,
                label='sq mm')

        else:

            # Combine multiple objects
            # ple plant objects within an roi together
            plant_object, plant_mask = pcv.object_composition(
                img=img, contours=roi_obj, hierarchy=hierarchy_obj)

            final_mask = pcv.image_add(final_mask, plant_mask)

            if gi:
                # Save greenness for individual ROI
                grnindex = cppc.utils.mean(img_gi, plant_mask)
                grnindexstd = cppc.utils.std(img_gi, plant_mask)
                pcv.outputs.add_observation(
                    sample='default',
                    variable='greenness_index',
                    trait='mean normalized greenness index',
                    method='g/sum(b+g+r)',
                    scale='[0,1]',
                    datatype="<class 'float'>",
                    value=float(grnindex),
                    label='/1')

                pcv.outputs.add_observation(
                    sample='default',
                    variable='greenness_index_std',
                    trait='std normalized greenness index',
                    method='g/sum(b+g+r)',
                    scale='[0,1]',
                    datatype="<class 'float'>",
                    value=float(grnindexstd),
                    label='/1')

            # Analyze all colors
            if hist:
                colorhist = pcv.analyze_color(img, plant_mask, 'all')
            elif hue:
                _ = pcv.analyze_color(img, plant_mask, 'hsv')

            # Analyze the shape of the current plant (always do this even if shape is False so you can get plant_area)
            img_shape = pcv.analyze_object(img, plant_object, plant_mask)
            plant_area = pcv.outputs.observations['default']['area']['value'] * cppc.pixelresolution**2
            pcv.outputs.add_observation(
                sample='default',
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=cppc.pixelresolution,
                datatype="<class 'float'>",
                value=plant_area,
                label='sq mm')
        # end if-else

        # At this point we have observations for one plant
        # We can write these out to a unique results file
        write_output(args, i)

        if args.writeimg and obj_area != 0:
            if shape:
                imgdir = os.path.join(args.outdir, 'shape_images', args.plantbarcode)
                os.makedirs(imgdir, exist_ok=True)
                pcv.print_image(img_shape, os.path.join(imgdir, args.imagename + '-roi' + str(i) + '-shape.png'))

            if hist:
                imgdir = os.path.join(args.outdir, 'colorhist_images', args.plantbarcode)
                os.makedirs(imgdir, exist_ok=True)
                pcv.print_image(colorhist, os.path.join(imgdir, args.imagename + '-roi' + str(i) + '-colorhist.png'))

            if masked:
                # save masked rgb image for entire tray but only 1 plant
                imgdir = os.path.join(args.outdir, 'maskedrgb_images')
                os.makedirs(imgdir, exist_ok=True)
                img_masked = pcv.apply_mask(img, plant_mask, 'black')
                pcv.print_image(
                    img_masked,
                    os.path.join(imgdir,
                                 args.imagename + '-roi' + str(i) + '-masked.png'))

            if hue:
                # save hue false color image for entire tray but only 1 plant
                imgdir = os.path.join(args.outdir, 'hue_images')
                os.makedirs(imgdir, exist_ok=True)
                fig_hue = pcv.visualize.pseudocolor(img_h*2, obj=None,
                                                    mask=plant_mask,
                                                    cmap=cppc.viz.get_cmap('hue'),
                                                    axes=False,
                                                    min_value=0, max_value=179,
                                                    background='black', obj_padding=0)
                fig_hue = cppc.viz.add_scalebar(fig_hue,
                                    pixelresolution=cppc.pixelresolution,
                                    barwidth=10,
                                    barlabel='1 cm',
                                    barlocation='lower left')
                fig_hue.set_size_inches(6, 6, forward=False)
                fig_hue.savefig(os.path.join(imgdir, args.imagename + '-roi' + str(i) + '-hue.png'),
                                bbox_inches='tight',
                                dpi=300)
                fig_hue.clf()

            if gi:
                # save grnness image of entire tray but only 1 plant
                imgdir = os.path.join(args.outdir, 'grnindex_images')
                os.makedirs(imgdir, exist_ok=True)
                fig_gi = pcv.visualize.pseudocolor(img_gi,
                                                   obj=None,
                                                   mask=plant_mask,
                                                   cmap='viridis',
                                                   axes=False,
                                                   min_value=0.3,
                                                   max_value=0.6,
                                                   background='black',
                                                   obj_padding=0)
                fig_gi = cppc.viz.add_scalebar(
                    fig_gi,
                    pixelresolution=cppc.pixelresolution,
                    barwidth=10,
                    barlabel='1 cm',
                    barlocation='lower left')
                fig_gi.set_size_inches(6, 6, forward=False)
                fig_gi.savefig(os.path.join(
                    imgdir,
                    args.imagename + '-roi' + str(i) + '-greenness.png'),
                               bbox_inches='tight',
                               dpi=300)
                fig_gi.clf()

        # end roi loop
    return final_mask
コード例 #19
0
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, img1 = pcv.white_balance(device, img, debug, roi=(1000, 1000, 500, 500))

    device, a = pcv.rgb2gray_lab(img1, 'a', device, debug)

    device, img_binary = pcv.binary_threshold(a, 116, 255, 'dark', device, debug)

    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 300, device, debug)

    device, id_objects, obj_hierarchy = pcv.find_objects(img1, fill_image, device, debug)

    device, roi, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, True,
                                                1800, 1600, -1500, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img1, 'partial', roi, roi_hierarchy,
                                                                                  id_objects, obj_hierarchy, device,
                                                                                  debug)

    outfile = os.path.join(args.outdir, filename)

    device, color_header, color_data, color_img = pcv.analyze_color(img1, img1, kept_mask, 256, device, debug, None,
                                                                    'v', 'img', 300, outfile)

    device, masked = pcv.apply_mask(img1, kept_mask, 'white', device, debug)
    device, dilated = pcv.dilate(kept_mask, 10, 2, device, debug)
    device, plant_objects, plant_hierarchy = pcv.find_objects(img1, dilated, device, debug)

    img_copy = np.copy(img1)

    color = [(255, 0, 255), (0, 255, 0), (66, 134, 244), (255, 255, 0)]

    for i in range(0, len(plant_objects)):
        if len(plant_objects[i]) < 100:
            pass
        else:
            background = np.zeros((np.shape(img1)), np.uint8)
            cv2.drawContours(background, plant_objects, i, (255, 255, 255), -1, lineType=8, hierarchy=plant_hierarchy)
            device, grayimg = pcv.rgb2gray(background, device, debug)
            device, masked1 = pcv.apply_mask(masked, grayimg, 'white', device, debug)
            device, a1 = pcv.rgb2gray_lab(masked1, 'a', device, debug)
            device, img_binary1 = pcv.binary_threshold(a1, 116, 255, 'dark', device, debug)
            device, single_object, single_hierarchy = pcv.find_objects(masked1, img_binary1, device, debug)
            device, obj, mask = pcv.object_composition(img1, single_object, single_hierarchy, device, debug)
            device, shape_header, shape_data, shape_img = pcv.analyze_object(img, "img", obj, mask, device, debug)
            cv2.drawContours(img_copy, plant_objects, i, color[i], -1, lineType=8, hierarchy=plant_hierarchy)
            plantsize = "Plant matching this color is " + str(shape_data[1]) + " pixels large"
            cv2.putText(img_copy, plantsize, (500, (i + 1) * 300), cv2.FONT_HERSHEY_SIMPLEX, 5, color[i], 10)

    pcv.print_image(img_copy, os.path.join(args.outdir, "arabidopsis-out_shapes.jpg"))
コード例 #20
0
def main():
    # Get options
    args = options()

    # Set variables
    device = 0
    pcv.params.debug = args.debug
    img_file = args.image

    # Read image
    img, path, filename = pcv.readimage(filename=img_file, mode='rgb')

    # Process saturation channel from HSV colour space
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')
    lp_s = pcv.laplace_filter(s, 1, 1)
    shrp_s = pcv.image_subtract(s, lp_s)
    s_eq = pcv.hist_equalization(shrp_s)
    s_thresh = pcv.threshold.binary(gray_img=s_eq,
                                    threshold=215,
                                    max_value=255,
                                    object_type='light')
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Process green-magenta channel from LAB colour space
    b = pcv.rgb2gray_lab(rgb_img=img, channel='a')
    b_lp = pcv.laplace_filter(b, 1, 1)
    b_shrp = pcv.image_subtract(b, b_lp)
    b_thresh = pcv.threshold.otsu(b_shrp, 255, object_type='dark')

    # Create and apply mask
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_thresh)
    filled = pcv.fill_holes(bs)
    masked = pcv.apply_mask(img=img, mask=filled, mask_color='white')

    # Extract colour channels from masked image
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=115,
                                          max_value=255,
                                          object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a,
                                           threshold=140,
                                           max_value=255,
                                           object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Produce and apply a mask
    opened_ab = pcv.opening(gray_img=ab)
    ab_fill = pcv.fill(bin_img=ab, size=200)
    closed_ab = pcv.closing(gray_img=ab_fill)
    masked2 = pcv.apply_mask(img=masked, mask=bs, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define region of interest (ROI)
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=250,
                                            y=100,
                                            h=200,
                                            w=200)

    # Decide what objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    ############### Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Analyze the plant
    analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask)
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')
    top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)
    top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)

    # Print results of the analysis
    pcv.print_results(filename=args.result)
    pcv.output_mask(img,
                    kept_mask,
                    filename,
                    outdir=args.outdir,
                    mask_only=True)
コード例 #21
0
def main():
    # Get options
    args = options()

    if args.debug:
        pcv.params.debug = args.debug  # set debug mode
        if args.debugdir:
            pcv.params.debug_outdir = args.debugdir  # set debug directory
            os.makedirs(args.debugdir, exist_ok=True)

    # pixel_resolution
    # mm
    # see pixel_resolution.xlsx for calibration curve for pixel to mm translation
    pixelresolution = 0.052

    # The result file should exist if plantcv-workflow.py was run
    if os.path.exists(args.result):
        # Open the result file
        results = open(args.result, "r")
        # The result file would have image metadata in it from plantcv-workflow.py, read it into memory
        metadata = json.load(results)
        # Close the file
        results.close()
        # Delete the file, we will create new ones
        os.remove(args.result)
        plantbarcode = metadata['metadata']['plantbarcode']['value']
        print(plantbarcode,
              metadata['metadata']['timestamp']['value'],
              sep=' - ')

    else:
        # If the file did not exist (for testing), initialize metadata as an empty string
        metadata = "{}"
        regpat = re.compile(args.regex)
        plantbarcode = re.search(regpat, args.image).groups()[0]

    # read images and create mask
    img, _, fn = pcv.readimage(args.image)
    imagename = os.path.splitext(fn)[0]

    # create mask

    # taf=filters.try_all_threshold(s_img)
    ## remove background
    s_img = pcv.rgb2gray_hsv(img, 's')
    min_s = filters.threshold_minimum(s_img)
    thresh_s = pcv.threshold.binary(s_img, min_s, 255, 'light')
    rm_bkgrd = pcv.fill_holes(thresh_s)

    ## low greenness
    thresh_s = pcv.threshold.binary(s_img, min_s + 15, 255, 'dark')
    # taf = filters.try_all_threshold(s_img)
    c = pcv.logical_xor(rm_bkgrd, thresh_s)
    cinv = pcv.invert(c)
    cinv_f = pcv.fill(cinv, 500)
    cinv_f_c = pcv.closing(cinv_f, np.ones((5, 5)))
    cinv_f_c_e = pcv.erode(cinv_f_c, 2, 1)

    ## high greenness
    a_img = pcv.rgb2gray_lab(img, channel='a')
    # taf = filters.try_all_threshold(a_img)
    t_a = filters.threshold_isodata(a_img)
    thresh_a = pcv.threshold.binary(a_img, t_a, 255, 'dark')
    thresh_a = pcv.closing(thresh_a, np.ones((5, 5)))
    thresh_a_f = pcv.fill(thresh_a, 500)
    ## combined mask
    lor = pcv.logical_or(cinv_f_c_e, thresh_a_f)
    close = pcv.closing(lor, np.ones((2, 2)))
    fill = pcv.fill(close, 800)
    erode = pcv.erode(fill, 3, 1)
    fill2 = pcv.fill(erode, 1200)
    # dilate = pcv.dilate(fill2,2,2)
    mask = fill2

    final_mask = np.zeros_like(mask)

    # Compute greenness
    # split color channels
    b, g, r = cv2.split(img)
    # print green intensity
    # g_img = pcv.visualize.pseudocolor(g, cmap='Greens', background='white', min_value=0, max_value=255, mask=mask, axes=False)

    # convert color channels to int16 so we can add them (values will be greater than 255 which is max of current uint8 format)
    g = g.astype('uint16')
    r = r.astype('uint16')
    b = b.astype('uint16')
    denom = g + r + b

    # greenness index
    out_flt = np.zeros_like(denom, dtype='float32')
    # divide green by sum of channels to compute greenness index with values 0-1
    gi = np.divide(g,
                   denom,
                   out=out_flt,
                   where=np.logical_and(denom != 0, mask > 0))

    # find objects
    c, h = pcv.find_objects(img, mask)
    rc, rh = pcv.roi.multi(img, coord=[(1300, 900), (1300, 2400)], radius=350)
    # Turn off debug temporarily, otherwise there will be a lot of plots
    pcv.params.debug = None
    # Loop over each region of interest
    i = 0
    rc_i = rc[i]
    for i, rc_i in enumerate(rc):
        rh_i = rh[i]

        # Add ROI number to output. Before roi_objects so result has NA if no object.
        pcv.outputs.add_observation(variable='roi',
                                    trait='roi',
                                    method='roi',
                                    scale='int',
                                    datatype=int,
                                    value=i,
                                    label='#')

        roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
            img,
            roi_contour=rc_i,
            roi_hierarchy=rh_i,
            object_contour=c,
            obj_hierarchy=h,
            roi_type='partial')

        if obj_area == 0:

            print('\t!!! No object found in ROI', str(i))
            pcv.outputs.add_observation(
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=pixelresolution,
                datatype="<class 'float'>",
                value=0,
                label='sq mm')

        else:

            # Combine multiple objects
            # ple plant objects within an roi together
            plant_object, plant_mask = pcv.object_composition(
                img=img, contours=roi_obj, hierarchy=hierarchy_obj)

            final_mask = pcv.image_add(final_mask, plant_mask)

            # Save greenness for individual ROI
            grnindex = np.mean(gi[np.where(plant_mask > 0)])
            pcv.outputs.add_observation(
                variable='greenness_index',
                trait='mean normalized greenness index',
                method='g/sum(b+g+r)',
                scale='[0,1]',
                datatype="<class 'float'>",
                value=float(grnindex),
                label='/1')

            # Analyze all colors
            hist = pcv.analyze_color(img, plant_mask, 'all')

            # Analyze the shape of the current plant
            shape_img = pcv.analyze_object(img, plant_object, plant_mask)
            plant_area = pcv.outputs.observations['area'][
                'value'] * pixelresolution**2
            pcv.outputs.add_observation(
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=pixelresolution,
                datatype="<class 'float'>",
                value=plant_area,
                label='sq mm')

        # end if-else

        # At this point we have observations for one plant
        # We can write these out to a unique results file
        # Here I will name the results file with the ROI ID combined with the original result filename
        basename, ext = os.path.splitext(args.result)
        filename = basename + "-roi" + str(i) + ext
        # Save the existing metadata to the new file
        with open(filename, "w") as r:
            json.dump(metadata, r)
        pcv.print_results(filename=filename)
        # The results are saved, now clear out the observations so the next loop adds new ones for the next plant
        pcv.outputs.clear()

        if args.writeimg and obj_area != 0:
            imgdir = os.path.join(args.outdir, 'shape_images', plantbarcode)
            os.makedirs(imgdir, exist_ok=True)
            pcv.print_image(
                shape_img,
                os.path.join(imgdir,
                             imagename + '-roi' + str(i) + '-shape.png'))

            imgdir = os.path.join(args.outdir, 'colorhist_images',
                                  plantbarcode)
            os.makedirs(imgdir, exist_ok=True)
            pcv.print_image(
                hist,
                os.path.join(imgdir,
                             imagename + '-roi' + str(i) + '-colorhist.png'))

# end roi loop

    if args.writeimg:
        # save grnness image of entire tray
        imgdir = os.path.join(args.outdir, 'pseudocolor_images', plantbarcode)
        os.makedirs(imgdir, exist_ok=True)
        gi_img = pcv.visualize.pseudocolor(gi,
                                           obj=None,
                                           mask=final_mask,
                                           cmap='viridis',
                                           axes=False,
                                           min_value=0.3,
                                           max_value=0.6,
                                           background='black',
                                           obj_padding=0)
        gi_img = add_scalebar(gi_img,
                              pixelresolution=pixelresolution,
                              barwidth=20,
                              barlocation='lower left')
        gi_img.set_size_inches(6, 6, forward=False)
        gi_img.savefig(os.path.join(imgdir, imagename + '-greenness.png'),
                       bbox_inches='tight')
        gi_img.clf()
コード例 #22
0
def main():
    args = options()

    os.chdir(args.outdir)

    # Read RGB image
    img, path, filename = pcv.readimage(args.image, mode="native")

    # Get metadata from file name
    geno_name = filename.split("}{")
    geno_name = geno_name[5]
    geno_name = geno_name.split("_")
    geno_name = geno_name[1]

    day = filename.split("}{")
    day = day[7]
    day = day.split("_")
    day = day[1]
    day = day.split("}")
    day = day[0]

    plot = filename.split("}{")
    plot = plot[0]
    plot = plot.split("_")
    plot = plot[1]

    exp_name = filename.split("}{")
    exp_name = exp_name[1]
    exp_name = exp_name.split("_")
    exp_name = exp_name[1]

    treat_name = filename.split("}{")
    treat_name = treat_name[6]

    # Create masks using Naive Bayes Classifier and PDFs file
    masks = pcv.naive_bayes_classifier(img, args.pdfs)

    # The following code will identify the racks in the image, find the top edge, and choose a line along the edge to pick a y coordinate to trim any soil/pot pixels identified as plant material.

    # Convert RGB to HSV and extract the Value channel
    v = pcv.rgb2gray_hsv(img, 'v')

    # Threshold the Value image
    v_thresh = pcv.threshold.binary(v, 98, 255, 'light')

    # Dilate mask to fill holes
    dilate_racks = pcv.dilate(v_thresh, 2, 1)

    # Fill in small objects
    mask = np.copy(dilate_racks)
    fill_racks = pcv.fill(mask, 100000)

    #edge detection
    edges = cv2.Canny(fill_racks, 60, 180)

    #write all the straight lines from edge detection
    lines = cv2.HoughLinesP(edges,
                            rho=1,
                            theta=1 * np.pi / 180,
                            threshold=150,
                            minLineLength=50,
                            maxLineGap=15)
    N = lines.shape[0]
    for i in range(N):
        x1 = lines[i][0][0]
        y1 = lines[i][0][1]
        x2 = lines[i][0][2]
        y2 = lines[i][0][3]
        cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 2)

    # keep only horizontal lines
    N = lines.shape[0]
    tokeep = []
    for i in range(N):
        want = (abs(lines[i][0][1] - lines[i][0][3])) <= 10
        tokeep.append(want)

    lines = lines[tokeep]

    # keep only lines in lower half of image
    N = lines.shape[0]
    tokeep = []
    for i in range(N):
        want = 3100 > lines[i][0][1] > 2300
        tokeep.append(want)

    lines = lines[tokeep]

    # assign lines to positions around plants
    N = lines.shape[0]
    tokeep = []
    left = []
    mid = []
    right = []

    for i in range(N):
        leftones = lines[i][0][2] <= 2000
        left.append(leftones)

        midones = 3000 > lines[i][0][2] > 2000
        mid.append(midones)

        rightones = lines[i][0][0] >= 3300
        right.append(rightones)

    right = lines[right]
    left = lines[left]
    mid = lines[mid]

    # choose y values for right left mid adding some pixels to go about the pot (subtract because of orientation of axis)
    y_left = left[0][0][3] - 50
    y_mid = mid[0][0][3] - 50
    y_right = right[0][0][3] - 50

    # reload original image to write new lines on
    img, path, filename = pcv.readimage(args.image)

    # write horizontal lines on image
    cv2.line(img, (left[0][0][0], left[0][0][1]),
             (left[0][0][2], left[0][0][3]), (255, 255, 51), 2)
    cv2.line(img, (mid[0][0][0], mid[0][0][1]), (mid[0][0][2], mid[0][0][3]),
             (255, 255, 51), 2)
    cv2.line(img, (right[0][0][0], right[0][0][1]),
             (right[0][0][2], right[0][0][3]), (255, 255, 51), 2)

    # Add masks together
    added = masks["healthy"] + masks["necrosis"] + masks["stem"]

    # Dilate mask to fill holes
    dilate_img = pcv.dilate(added, 2, 1)

    # Fill in small objects
    mask = np.copy(dilate_img)
    fill_img = pcv.fill(mask, 400)

    ret, inverted = cv2.threshold(fill_img, 75, 255, cv2.THRESH_BINARY_INV)

    # Dilate mask to fill holes of plant
    dilate_inv = pcv.dilate(inverted, 2, 1)

    # Fill in small objects of plant
    mask2 = np.copy(dilate_inv)
    fill_plant = pcv.fill(mask2, 20)

    inverted_img = pcv.invert(fill_plant)

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img, inverted_img)

    # Define ROIs
    roi_left, roi_hierarchy_left = pcv.roi.rectangle(280, 1280, 1275, 1200,
                                                     img)
    roi_mid, roi_hierarchy_mid = pcv.roi.rectangle(1900, 1280, 1275, 1200, img)
    roi_right, roi_hierarchy_right = pcv.roi.rectangle(3600, 1280, 1275, 1200,
                                                       img)

    # Decide which objects to keep
    roi_objects_left, roi_obj_hierarchy_left, kept_mask_left, obj_area_left = pcv.roi_objects(
        img, 'partial', roi_left, roi_hierarchy_left, id_objects,
        obj_hierarchy)
    roi_objects_mid, roi_obj_hierarchy_mid, kept_mask_mid, obj_area_mid = pcv.roi_objects(
        img, 'partial', roi_mid, roi_hierarchy_mid, id_objects, obj_hierarchy)
    roi_objects_right, roi_obj_hierarchy_right, kept_mask_right, obj_area_right = pcv.roi_objects(
        img, 'partial', roi_right, roi_hierarchy_right, id_objects,
        obj_hierarchy)

    # Combine objects
    obj_r, mask_r = pcv.object_composition(img, roi_objects_right,
                                           roi_obj_hierarchy_right)
    obj_m, mask_m = pcv.object_composition(img, roi_objects_mid,
                                           roi_obj_hierarchy_mid)
    obj_l, mask_l = pcv.object_composition(img, roi_objects_left,
                                           roi_obj_hierarchy_left)

    def analyze_bound_horizontal2(img,
                                  obj,
                                  mask,
                                  line_position,
                                  filename=False):

        ori_img = np.copy(img)

        # Draw line horizontal line through bottom of image, that is adjusted to user input height
        if len(np.shape(ori_img)) == 3:
            iy, ix, iz = np.shape(ori_img)
        else:
            iy, ix = np.shape(ori_img)
        size = (iy, ix)
        size1 = (iy, ix, 3)
        background = np.zeros(size, dtype=np.uint8)
        wback = np.zeros(size1, dtype=np.uint8)
        x_coor = int(ix)
        y_coor = int(iy) - int(line_position)
        rec_corner = int(iy - 2)
        rec_point1 = (1, rec_corner)
        rec_point2 = (x_coor - 2, y_coor - 2)
        cv2.rectangle(background, rec_point1, rec_point2, (255), 1)
        below_contour, below_hierarchy = cv2.findContours(
            background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

        below = []
        above = []
        mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
        obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
        obj_points1 = np.transpose(obj_points)

        for i, c in enumerate(obj_points1):
            xy = tuple(c)
            pptest = cv2.pointPolygonTest(below_contour[0],
                                          xy,
                                          measureDist=False)
            if pptest == 1:
                below.append(xy)
                cv2.circle(ori_img, xy, 1, (0, 0, 255))
                cv2.circle(wback, xy, 1, (0, 0, 0))
            else:
                above.append(xy)
                cv2.circle(ori_img, xy, 1, (0, 255, 0))
                cv2.circle(wback, xy, 1, (255, 255, 255))

        return wback

    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 3:
        iy, ix, iz = np.shape(ori_img)
    else:
        iy, ix = np.shape(ori_img)

    if obj_r is not None:
        wback_r = analyze_bound_horizontal2(img, obj_r, mask_r, iy - y_right)
    if obj_m is not None:
        wback_m = analyze_bound_horizontal2(img, obj_m, mask_m, iy - y_mid)
    if obj_l is not None:
        wback_l = analyze_bound_horizontal2(img, obj_l, mask_l, iy - y_left)

    threshold_light = pcv.threshold.binary(img, 1, 1, 'dark')

    if obj_r is not None:
        fgmask_r = pcv.background_subtraction(wback_r, threshold_light)
    if obj_m is not None:
        fgmask_m = pcv.background_subtraction(wback_m, threshold_light)
    if obj_l is not None:
        fgmask_l = pcv.background_subtraction(wback_l, threshold_light)

    if obj_l is not None:
        id_objects_left, obj_hierarchy_left = pcv.find_objects(img, fgmask_l)
    if obj_m is not None:
        id_objects_mid, obj_hierarchy_mid = pcv.find_objects(img, fgmask_m)
    if obj_r is not None:
        id_objects_right, obj_hierarchy_right = pcv.find_objects(img, fgmask_r)

    # Combine objects
    if obj_r is not None:
        obj_r2, mask_r2 = pcv.object_composition(img, id_objects_right,
                                                 obj_hierarchy_right)
    if obj_m is not None:
        obj_m2, mask_m2 = pcv.object_composition(img, id_objects_mid,
                                                 obj_hierarchy_mid)
    if obj_l is not None:
        obj_l2, mask_l2 = pcv.object_composition(img, id_objects_left,
                                                 obj_hierarchy_left)

    # Shape measurements
    if obj_l is not None:
        shape_header_left, shape_data_left, shape_img_left = pcv.analyze_object(
            img, obj_l2, fgmask_l,
            geno_name + '_' + plot + '_' + 'A' + '_' + day + '_' + 'shape.jpg')

    if obj_r is not None:
        shape_header_right, shape_data_right, shape_img_right = pcv.analyze_object(
            img, obj_r2, fgmask_r,
            geno_name + '_' + plot + '_' + 'C' + '_' + day + '_' + 'shape.jpg')

    if obj_m is not None:
        shape_header_mid, shape_data_mid, shape_img_mid = pcv.analyze_object(
            img, obj_m2, fgmask_m,
            geno_name + '_' + plot + '_' + 'B' + '_' + day + '_' + 'shape.jpg')

    # Color data
    if obj_r is not None:
        color_header_right, color_data_right, norm_slice_right = pcv.analyze_color(
            img, fgmask_r, 256, None, 'v', 'img',
            geno_name + '_' + plot + '_' + 'C' + '_' + day + '_' + 'color.jpg')

    if obj_m is not None:
        color_header_mid, color_data_mid, norm_slice_mid = pcv.analyze_color(
            img, fgmask_m, 256, None, 'v', 'img',
            geno_name + '_' + plot + '_' + 'B' + '_' + day + '_' + 'color.jpg')

    if obj_l is not None:
        color_header_left, color_data_left, norm_slice_left = pcv.analyze_color(
            img, fgmask_l, 256, None, 'v', 'img',
            geno_name + '_' + plot + '_' + 'A' + '_' + day + '_' + 'color.jpg')

    new_header = [
        'experiment', 'day', 'genotype', 'treatment', 'plot', 'plant',
        'percent.necrosis', 'area', 'hull-area', 'solidity', 'perimeter',
        'width', 'height', 'longest_axis', 'center-of-mass-x',
        'center-of-mass-y', 'hull_vertices', 'in_bounds', 'ellipse_center_x',
        'ellipse_center_y', 'ellipse_major_axis', 'ellipse_minor_axis',
        'ellipse_angle', 'ellipse_eccentricity', 'bin-number', 'bin-values',
        'blue', 'green', 'red', 'lightness', 'green-magenta', 'blue-yellow',
        'hue', 'saturation', 'value'
    ]
    table = []
    table.append(new_header)

    added2 = masks["healthy"] + masks["stem"]

    # Object combine kept objects
    if obj_l is not None:
        masked_image_healthy_left = pcv.apply_mask(added2, fgmask_l, 'black')
        masked_image_necrosis_left = pcv.apply_mask(masks["necrosis"],
                                                    fgmask_l, 'black')
        added_obj_left = masked_image_healthy_left + masked_image_necrosis_left

        sample = "A"

        # Calculations
        necrosis_left = np.sum(masked_image_necrosis_left)
        necrosis_percent_left = float(necrosis_left) / np.sum(added_obj_left)

        table.append([
            exp_name, day, geno_name, treat_name, plot, sample,
            round(necrosis_percent_left, 5), shape_data_left[1],
            shape_data_left[2], shape_data_left[3], shape_data_left[4],
            shape_data_left[5], shape_data_left[6], shape_data_left[7],
            shape_data_left[8], shape_data_left[9], shape_data_left[10],
            shape_data_left[11], shape_data_left[12], shape_data_left[13],
            shape_data_left[14], shape_data_left[15], shape_data_left[16],
            shape_data_left[17], '"{}"'.format(color_data_left[1]),
            '"{}"'.format(color_data_left[2]), '"{}"'.format(
                color_data_left[3]), '"{}"'.format(color_data_left[4]),
            '"{}"'.format(color_data_left[5]), '"{}"'.format(
                color_data_left[6]), '"{}"'.format(color_data_left[7]),
            '"{}"'.format(color_data_left[8]), '"{}"'.format(
                color_data_left[9]), '"{}"'.format(color_data_left[10]),
            '"{}"'.format(color_data_left[11])
        ])

    # Object combine kept objects
    if obj_m is not None:
        masked_image_healthy_mid = pcv.apply_mask(added2, fgmask_m, 'black')
        masked_image_necrosis_mid = pcv.apply_mask(masks["necrosis"], fgmask_m,
                                                   'black')
        added_obj_mid = masked_image_healthy_mid + masked_image_necrosis_mid

        sample = "B"

        # Calculations
        necrosis_mid = np.sum(masked_image_necrosis_mid)
        necrosis_percent_mid = float(necrosis_mid) / np.sum(added_obj_mid)

        table.append([
            exp_name, day, geno_name, treat_name, plot, sample,
            round(necrosis_percent_mid,
                  5), shape_data_mid[1], shape_data_mid[2], shape_data_mid[3],
            shape_data_mid[4], shape_data_mid[5], shape_data_mid[6],
            shape_data_mid[7], shape_data_mid[8], shape_data_mid[9],
            shape_data_mid[10], shape_data_mid[11], shape_data_mid[12],
            shape_data_mid[13], shape_data_mid[14], shape_data_mid[15],
            shape_data_mid[16], shape_data_mid[17],
            '"{}"'.format(color_data_mid[1]), '"{}"'.format(color_data_mid[2]),
            '"{}"'.format(color_data_mid[3]), '"{}"'.format(color_data_mid[4]),
            '"{}"'.format(color_data_mid[5]), '"{}"'.format(color_data_mid[6]),
            '"{}"'.format(color_data_mid[7]), '"{}"'.format(color_data_mid[8]),
            '"{}"'.format(color_data_mid[9]), '"{}"'.format(
                color_data_mid[10]), '"{}"'.format(color_data_mid[11])
        ])

    # Object combine kept objects
    if obj_r is not None:
        masked_image_healthy_right = pcv.apply_mask(added2, fgmask_r, 'black')
        masked_image_necrosis_right = pcv.apply_mask(masks["necrosis"],
                                                     fgmask_r, 'black')
        added_obj_right = masked_image_healthy_right + masked_image_necrosis_right

        sample = "C"

        # Calculations
        necrosis_right = np.sum(masked_image_necrosis_right)
        necrosis_percent_right = float(necrosis_right) / np.sum(
            added_obj_right)

        table.append([
            exp_name, day, geno_name, treat_name, plot, sample,
            round(necrosis_percent_right, 5), shape_data_right[1],
            shape_data_right[2], shape_data_right[3], shape_data_right[4],
            shape_data_right[5], shape_data_right[6], shape_data_right[7],
            shape_data_right[8], shape_data_right[9], shape_data_right[10],
            shape_data_right[11], shape_data_right[12], shape_data_right[13],
            shape_data_right[14], shape_data_right[15], shape_data_right[16],
            shape_data_right[17], '"{}"'.format(color_data_right[1]),
            '"{}"'.format(color_data_right[2]), '"{}"'.format(
                color_data_right[3]), '"{}"'.format(color_data_right[4]),
            '"{}"'.format(color_data_right[5]), '"{}"'.format(
                color_data_right[6]), '"{}"'.format(color_data_right[7]),
            '"{}"'.format(color_data_right[8]), '"{}"'.format(
                color_data_right[9]), '"{}"'.format(color_data_right[10]),
            '"{}"'.format(color_data_right[11])
        ])

    if obj_l is not None:
        merged2 = cv2.merge([
            masked_image_healthy_left,
            np.zeros(np.shape(masks["healthy"]), dtype=np.uint8),
            masked_image_necrosis_left
        ])  #blue, green, red
        pcv.print_image(
            merged2, geno_name + '_' + plot + '_' + 'A' + '_' + day + '_' +
            'merged.jpg')
    if obj_m is not None:
        merged3 = cv2.merge([
            masked_image_healthy_mid,
            np.zeros(np.shape(masks["healthy"]), dtype=np.uint8),
            masked_image_necrosis_mid
        ])  #blue, green, red
        pcv.print_image(
            merged3, geno_name + '_' + plot + '_' + 'B' + '_' + day + '_' +
            'merged.jpg')
    if obj_r is not None:
        merged4 = cv2.merge([
            masked_image_healthy_right,
            np.zeros(np.shape(masks["healthy"]), dtype=np.uint8),
            masked_image_necrosis_right
        ])  #blue, green, red
        pcv.print_image(
            merged4, geno_name + '_' + plot + '_' + 'C' + '_' + day + '_' +
            'merged.jpg')

    # Save area results to file (individual csv files for one image...)
    file_name = filename.split("}{")
    file_name = file_name[0] + "}{" + file_name[5] + "}{" + file_name[7]

    outfile = str(file_name[:-4]) + 'csv'
    with open(outfile, 'w') as f:
        for row in table:
            f.write(','.join(map(str, row)) + '\n')

    print(filename)
コード例 #23
0
def main():
    # Get options
    #args = options()
    parser = argparse.ArgumentParser(
        description="Imaging processing with opencv")
    parser.add_argument("-i",
                        "--image",
                        help="Input image file.",
                        required=True)
    parser.add_argument("-o",
                        "--outdir",
                        help="Output directory for image files.",
                        required=False)
    parser.add_argument("-r", "--result", help="result file.", required=False)
    parser.add_argument("-w",
                        "--writeimg",
                        help="write out images.",
                        default=False,
                        action="store_true")
    parser.add_argument("-f",
                        "--fileout",
                        help="output mask file path",
                        required=True)
    parser.add_argument(
        "-D",
        "--debug",
        help=
        "can be set to 'print' or None (or 'plot' if in jupyter) prints intermediate images.",
        default=None)
    args = parser.parse_args()

    pcv.params.debug = args.debug  # set debug mode
    pcv.params.debug_outdir = args.outdir  # set output directory

    # Read image

    img, path, filename = pcv.readimage(filename=args.image)
    img = cv2.resize(img, (1280, 960), interpolation=cv2.INTER_AREA)

    # Convert RGB to HSV and extract the saturation channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Threshold the saturation image
    s_thresh = pcv.threshold.binary(gray_img=s,
                                    threshold=35,
                                    max_value=255,
                                    object_type='light')

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_thresh = pcv.threshold.binary(gray_img=b,
                                    threshold=180,
                                    max_value=255,
                                    object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b,
                                 threshold=180,
                                 max_value=255,
                                 object_type='light')

    # Fill small objects
    # b_fill = pcv.fill(b_thresh, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(rgb_img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=95,
                                          max_value=255,
                                          object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a,
                                           threshold=135,
                                           max_value=255,
                                           object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')
    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=0,
                                            y=0,
                                            h=img.shape[0],
                                            w=img.shape[1])

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    ############### Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

    # Shape properties relative to user boundary line (optional)
    boundary_img1 = pcv.analyze_bound_horizontal(img=img,
                                                 obj=obj,
                                                 mask=mask,
                                                 line_position=1680)

    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')

    # Pseudocolor the grayscale image
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s,
                                                  mask=kept_mask,
                                                  cmap='jet')

    # Write shape and color data to results file
    #pcv.print_results(filename=args.result)

    count = 0
    [rows, cols] = mask.shape
    for i in range(rows):
        for j in range(cols):
            if mask[i, j] > 128:
                count += 1
    re = float(count) / (rows * cols)
    text = "rec_rate:" + str(round(re, 4))
    cv2.putText(mask, text, (40, 50), cv2.FONT_HERSHEY_PLAIN, 2.0, 255, 2)
    #cv2.imshow("tt",mask)
    #cv2.waitKey(0)
    cv2.imwrite(args.fileout, mask)
    print(str(round(re, 4)))