def prune(skel_img, size): """ The pruning algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699 Iteratively remove endpoints (tips) from a skeletonized image. "Prunes" barbs off a skeleton. Inputs: skel_img = Skeletonized image size = Size to get pruned off each branch Returns: pruned_img = Pruned image :param skel_img: numpy.ndarray :param size: int :return pruned_img: numpy.ndarray """ # Store debug debug = params.debug params.debug = None pruned_img = skel_img.copy() # Check to see if the skeleton has multiple objects objects, _ = find_objects(pruned_img, pruned_img) if not len(objects) == 1: print("Warning: Multiple objects detected! Pruning will further separate the difference pieces.") # Iteratively remove endpoints (tips) from a skeleton for i in range(0, size): endpoints = find_tips(pruned_img) pruned_img = image_subtract(pruned_img, endpoints) # Make debugging image pruned_plot = np.zeros(skel_img.shape[:2], np.uint8) pruned_plot = cv2.cvtColor(pruned_plot, cv2.COLOR_GRAY2RGB) skel_obj, skel_hierarchy = find_objects(skel_img, skel_img) pruned_obj, pruned_hierarchy = find_objects(pruned_img, pruned_img) cv2.drawContours(pruned_plot, skel_obj, -1, (0, 0, 255), params.line_thickness, lineType=8, hierarchy=skel_hierarchy) cv2.drawContours(pruned_plot, pruned_obj, -1, (255, 255, 255), params.line_thickness, lineType=8, hierarchy=pruned_hierarchy) # Reset debug mode params.debug = debug params.device += 1 if params.debug == 'print': print_image(pruned_img, os.path.join(params.debug_outdir, str(params.device) + '_pruned.png')) print_image(pruned_plot, os.path.join(params.debug_outdir, str(params.device) + '_pruned_debug.png')) elif params.debug == 'plot': plot_image(pruned_img, cmap='gray') plot_image(pruned_plot) return pruned_img
def segment_skeleton(skel_img, mask=None): """ Segment a skeleton image into pieces Inputs: skel_img = Skeletonized image mask = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask. Returns: segmented_img = Segmented debugging image objects = list of contours hierarchy = contour hierarchy list :param skel_img: numpy.ndarray :param mask: numpy.ndarray :return segmented_img: numpy.ndarray :return segment_objects: list "return segment_hierarchies: numpy.ndarray """ # Store debug debug = params.debug params.debug = None # Find branch points bp = find_branch_pts(skel_img) bp = dilate(bp, 3, 1) # Subtract from the skeleton so that leaves are no longer connected segments = image_subtract(skel_img, bp) # Gather contours of leaves segment_objects, _ = find_objects(segments, segments) # Color each segment a different color rand_color = color_palette(len(segment_objects)) if mask is None: segmented_img = skel_img.copy() else: segmented_img = mask.copy() segmented_img = cv2.cvtColor(segmented_img, cv2.COLOR_GRAY2RGB) for i, cnt in enumerate(segment_objects): cv2.drawContours(segmented_img, segment_objects, i, rand_color[i], params.line_thickness, lineType=8) # Reset debug mode params.debug = debug # Auto-increment device params.device += 1 if params.debug == 'print': print_image(segmented_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented.png')) elif params.debug == 'plot': plot_image(segmented_img) return segmented_img, segment_objects
def _iterative_prune(skel_img, size): """ The pruning algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699 Iteratively remove endpoints (tips) from a skeletonized image. "Prunes" barbs off a skeleton. Inputs: skel_img = Skeletonized image size = Size to get pruned off each branch Returns: pruned_img = Pruned image :param skel_img: numpy.ndarray :param size: int :return pruned_img: numpy.ndarray """ pruned_img = skel_img.copy() # Store debug debug = params.debug params.debug = None # Check to see if the skeleton has multiple objects objects, _ = find_objects(pruned_img, pruned_img) # Iteratively remove endpoints (tips) from a skeleton for i in range(0, size): endpoints = find_tips(pruned_img) pruned_img = image_subtract(pruned_img, endpoints) # Make debugging image pruned_plot = np.zeros(skel_img.shape[:2], np.uint8) pruned_plot = cv2.cvtColor(pruned_plot, cv2.COLOR_GRAY2RGB) skel_obj, skel_hierarchy = find_objects(skel_img, skel_img) pruned_obj, pruned_hierarchy = find_objects(pruned_img, pruned_img) # Reset debug mode params.debug = debug cv2.drawContours(pruned_plot, skel_obj, -1, (0, 0, 255), params.line_thickness, lineType=8, hierarchy=skel_hierarchy) cv2.drawContours(pruned_plot, pruned_obj, -1, (255, 255, 255), params.line_thickness, lineType=8, hierarchy=pruned_hierarchy) return pruned_img
def main(): # Get options args = options() # Set variables device = 0 pcv.params.debug = args.debug img_file = args.image # Read image img, path, filename = pcv.readimage(filename=img_file, mode='rgb') # Process saturation channel from HSV colour space s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') lp_s = pcv.laplace_filter(s, 1, 1) shrp_s = pcv.image_subtract(s, lp_s) s_eq = pcv.hist_equalization(shrp_s) s_thresh = pcv.threshold.binary(gray_img=s_eq, threshold=215, max_value=255, object_type='light') s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) # Process green-magenta channel from LAB colour space b = pcv.rgb2gray_lab(rgb_img=img, channel='a') b_lp = pcv.laplace_filter(b, 1, 1) b_shrp = pcv.image_subtract(b, b_lp) b_thresh = pcv.threshold.otsu(b_shrp, 255, object_type='dark') # Create and apply mask bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_thresh) filled = pcv.fill_holes(bs) masked = pcv.apply_mask(img=img, mask=filled, mask_color='white') # Extract colour channels from masked image masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=140, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Produce and apply a mask opened_ab = pcv.opening(gray_img=ab) ab_fill = pcv.fill(bin_img=ab, size=200) closed_ab = pcv.closing(gray_img=ab_fill) masked2 = pcv.apply_mask(img=masked, mask=bs, mask_color='white') # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define region of interest (ROI) roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=250, y=100, h=200, w=200) # Decide what objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Analyze the plant analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask) color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img, obj=obj, mask=mask) top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask) # Print results of the analysis pcv.print_results(filename=args.result) pcv.output_mask(img, kept_mask, filename, outdir=args.outdir, mask_only=True)
def segment_skeleton(skel_img, mask=None): """ Segment a skeleton image into pieces Inputs: skel_img = Skeletonized image mask = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask. Returns: segmented_img = Segmented debugging image segment_objects = list of contours :param skel_img: numpy.ndarray :param mask: numpy.ndarray :return segmented_img: numpy.ndarray :return segment_objects: list """ # Store debug debug = params.debug params.debug = None # Find branch points bp = find_branch_pts(skel_img) bp = dilate(bp, 3, 1) # Subtract from the skeleton so that leaves are no longer connected segments = image_subtract(skel_img, bp) # Gather contours of leaves segment_objects, _ = find_objects(segments, segments) # Reset debug mode params.debug = debug # Color each segment a different color, do not used a previously saved color scale rand_color = color_palette(num=len(segment_objects), saved=False) if mask is None: segmented_img = skel_img.copy() else: segmented_img = mask.copy() segmented_img = cv2.cvtColor(segmented_img, cv2.COLOR_GRAY2RGB) for i, cnt in enumerate(segment_objects): cv2.drawContours(segmented_img, segment_objects, i, rand_color[i], params.line_thickness, lineType=8) # Auto-increment device params.device += 1 if params.debug == 'print': print_image( segmented_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented.png')) elif params.debug == 'plot': plot_image(segmented_img) return segmented_img, segment_objects
def main(): args = options() #create options object for argument parsing device = 0 #set device params.debug = args.debug #set debug outfile = False if args.writeimg: outfile = os.path.join(args.outdir, os.path.basename(args.image)[:-4]) # In[114]: img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) #read in image background = pcv.transform.load_matrix( args.npz) #read in background mask image for subtraction # In[115]: device, mask = pcv.naive_bayes_classifier( img, args.pdf, device, args.debug) #naive bayes on image #if args.writeimg: # pcv.print_image(img=mask["94,104,47"], filename=outfile + "_nb_mask.png") # In[116]: new_mask = pcv.image_subtract(mask["94,104,47"], background) #subtract background noise # In[117]: #image blurring using scipy median filter blurred_img = ndimage.median_filter(new_mask, (7, 1)) blurred_img = ndimage.median_filter(blurred_img, (1, 7)) device, cleaned = pcv.fill(np.copy(blurred_img), np.copy(blurred_img), 50, 0, args.debug) #fill leftover noise # In[118]: #dilate and erode to repair plant breaks from background subtraction device, cleaned_dilated = pcv.dilate(cleaned, 6, 1, 0) device, cleaned = pcv.erode(cleaned_dilated, 6, 1, 0, args.debug) # In[119]: device, objects, obj_hierarchy = pcv.find_objects( img, cleaned, device, debug=args.debug) #find objects using mask if "TM015" in args.image: h = 1620 elif "TM016" in args.image: h = 1555 else: h = 1320 roi_contour, roi_hierarchy = pcv.roi.rectangle(x=570, y=0, h=h, w=1900 - 550, img=img) #grab ROI # In[120]: #isolate plant objects within ROI device, roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi_contour, roi_hierarchy, objects, obj_hierarchy, device, debug=args.debug) #Analyze only images with plants present. if roi_objects > 0: # In[121]: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_objects, hierarchy=hierarchy, device=device, debug=args.debug) if args.writeimg: pcv.print_image(img=plant_mask, filename=outfile + "_mask.png") # In[122]: # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile + ".png") # In[123]: if "TM015" in args.image: line_position = 380 elif "TM016" in args.image: line_position = 440 else: line_position = 690 # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound_horizontal( img=img, obj=plant_contour, mask=plant_mask, line_position=line_position, device=device, debug=args.debug, filename=outfile + ".png") # In[124]: # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile + ".png") # In[55]: # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") result.write('\t'.join(map(str, boundary_header)) + "\n") result.write('\t'.join(map(str, boundary_data)) + "\n") result.write('\t'.join(map(str, boundary_img)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close()
def prune(skel_img, size=0, mask=None): """ The pruning algorithm proposed by https://github.com/karnoldbio Segments a skeleton into discrete pieces, prunes off all segments less than or equal to user specified size. Returns the remaining objects as a list and the pruned skeleton. Inputs: skel_img = Skeletonized image size = Size to get pruned off each branch mask = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask. Returns: pruned_img = Pruned image segmented_img = Segmented debugging image segment_objects = List of contours :param skel_img: numpy.ndarray :param size: int :param mask: numpy.ndarray :return pruned_img: numpy.ndarray :return segmented_img: numpy.ndarray :return segment_objects: list """ # Store debug debug = params.debug params.debug = None pruned_img = skel_img.copy() # Check to see if the skeleton has multiple objects skel_objects, _ = find_objects(skel_img, skel_img) _, objects = segment_skeleton(skel_img) kept_segments = [] removed_segments = [] if size > 0: # If size>0 then check for segments that are smaller than size pixels long # Sort through segments since we don't want to remove primary segments secondary_objects, primary_objects = segment_sort(skel_img, objects) # Keep segments longer than specified size for i in range(0, len(secondary_objects)): if len(secondary_objects[i]) > size: kept_segments.append(secondary_objects[i]) else: removed_segments.append(secondary_objects[i]) # Draw the contours that got removed removed_barbs = np.zeros(skel_img.shape[:2], np.uint8) cv2.drawContours(removed_barbs, removed_segments, -1, 255, 1, lineType=8) # Subtract all short segments from the skeleton image pruned_img = image_subtract(pruned_img, removed_barbs) pruned_img = _iterative_prune(pruned_img, 1) # Reset debug mode params.debug = debug # Make debugging image if mask is None: pruned_plot = np.zeros(skel_img.shape[:2], np.uint8) else: pruned_plot = mask.copy() pruned_plot = cv2.cvtColor(pruned_plot, cv2.COLOR_GRAY2RGB) pruned_obj, pruned_hierarchy = find_objects(pruned_img, pruned_img) cv2.drawContours(pruned_plot, removed_segments, -1, (0, 0, 255), params.line_thickness, lineType=8) cv2.drawContours(pruned_plot, pruned_obj, -1, (150, 150, 150), params.line_thickness, lineType=8) # Auto-increment device params.device += 1 if params.debug == 'print': print_image( pruned_img, os.path.join(params.debug_outdir, str(params.device) + '_pruned.png')) print_image( pruned_plot, os.path.join(params.debug_outdir, str(params.device) + '_pruned_debug.png')) elif params.debug == 'plot': plot_image(pruned_img, cmap='gray') plot_image(pruned_plot) # Segment the pruned skeleton segmented_img, segment_objects = segment_skeleton(pruned_img, mask) return pruned_img, segmented_img, segment_objects
def main_side(): # Setting "args" # Get options pcv.params.debug = args.debug #set debug mode pcv.params.debug_outdir = args.outdir #set output directory # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb') # Inputs: # filename - Image file to be read in # mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv' filename = args.image img = cv2.imread(args.image, flags=0) #img = pcv.invert(img) path, img_name = os.path.split(args.image) img_bkgrd = cv2.imread("background.png", flags=0) #print(img) #print(img_bkgrd) bkg_sub_img = pcv.image_subtract(img_bkgrd, img) bkg_sub_thres_img, masked_img = pcv.threshold.custom_range( rgb_img=bkg_sub_img, lower_thresh=[50], upper_thresh=[255], channel='gray') # Laplace filtering (identify edges based on 2nd derivative) # Inputs: # gray_img - Grayscale image data # ksize - Aperture size used to calculate the second derivative filter, # specifies the size of the kernel (must be an odd integer) # scale - Scaling factor applied (multiplied) to computed Laplacian values # (scale = 1 is unscaled) lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1) # Plot histogram of grayscale values pcv.visualize.histogram(gray_img=lp_img) # Lapacian image sharpening, this step will enhance the darkness of the edges detected lp_shrp_img = pcv.image_subtract(gray_img1=img, gray_img2=lp_img) # Plot histogram of grayscale values, this helps to determine thresholding value pcv.visualize.histogram(gray_img=lp_shrp_img) # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1) # Inputs: # gray_img - Grayscale image data # dx - Derivative of x to analyze # dy - Derivative of y to analyze # ksize - Aperture size used to calculate 2nd derivative, specifies the size of the kernel and must be an odd integer # NOTE: Aperture size must be greater than the largest derivative (ksize > dx & ksize > dy) sbx_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1) # 1st derivative sobel filtering along vertical axis, kernel = 1) sby_img = pcv.sobel_filter(gray_img=img, dx=0, dy=1, ksize=1) # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphasize edges found in both images # Inputs: # gray_img1 - Grayscale image data to be added to gray_img2 # gray_img2 - Grayscale image data to be added to gray_img1 sb_img = pcv.image_add(gray_img1=sbx_img, gray_img2=sby_img) # Use a lowpass (blurring) filter to smooth sobel image # Inputs: # gray_img - Grayscale image data # ksize - Kernel size (integer or tuple), (ksize, ksize) box if integer input, # (n, m) box if tuple input mblur_img = pcv.median_blur(gray_img=sb_img, ksize=1) # Inputs: # gray_img - Grayscale image data mblur_invert_img = pcv.invert(gray_img=mblur_img) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 edge_shrp_img = pcv.image_add(gray_img1=mblur_invert_img, gray_img2=lp_shrp_img) # Perform thresholding to generate a binary image tr_es_img = pcv.threshold.binary(gray_img=edge_shrp_img, threshold=145, max_value=255, object_type='dark') # Do erosion with a 3x3 kernel (ksize=3) # Inputs: # gray_img - Grayscale (usually binary) image data # ksize - The size used to build a ksize x ksize # matrix using np.ones. Must be greater than 1 to have an effect # i - An integer for the number of iterations e1_img = pcv.erode(gray_img=tr_es_img, ksize=3, i=1) # Bring the two object identification approaches together. # Using a logical OR combine object identified by background subtraction and the object identified by derivative filter. # Inputs: # bin_img1 - Binary image data to be compared in bin_img2 # bin_img2 - Binary image data to be compared in bin_img1 comb_img = pcv.logical_or(bin_img1=e1_img, bin_img2=bkg_sub_thres_img) # Get masked image, Essentially identify pixels corresponding to plant and keep those. # Inputs: # rgb_img - RGB image data # mask - Binary mask image data # mask_color - 'black' or 'white' masked_erd = pcv.apply_mask(rgb_img=img, mask=comb_img, mask_color='black') # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (1280 X 960) # mask for the bottom of the image # Inputs: # img - RGB or grayscale image data # p1 - Point at the top left corner of the rectangle (tuple) # p2 - Point at the bottom right corner of the rectangle (tuple) # color 'black' (default), 'gray', or 'white' # masked1, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img=img, p1=(500, 875), p2=(720, 960)) # mask the edges masked2, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img=img, p1=(1, 1), p2=(1279, 959)) bx12_img = pcv.logical_or(bin_img1=box1_img, bin_img2=box2_img) inv_bx1234_img = bx12_img # we dont invert inv_bx1234_img = bx12_img #inv_bx1234_img = pcv.invert(gray_img=bx12_img) edge_masked_img = pcv.apply_mask(rgb_img=masked_erd, mask=inv_bx1234_img, mask_color='black') #print("here we create a mask") mask, masked = pcv.threshold.custom_range(rgb_img=edge_masked_img, lower_thresh=[25], upper_thresh=[175], channel='gray') masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white') #print("end") # Identify objects # Inputs: # img - RGB or grayscale image data for plotting # mask - Binary mask used for detecting contours id_objects, obj_hierarchy = pcv.find_objects(img=edge_masked_img, mask=mask) # Define ROI # Inputs: # img - RGB or grayscale image to plot the ROI on # x - The x-coordinate of the upper left corner of the rectangle # y - The y-coordinate of the upper left corner of the rectangle # h - The height of the rectangle # w - The width of the rectangle roi1, roi_hierarchy = pcv.roi.rectangle(img=edge_masked_img, x=100, y=100, h=800, w=1000) # Decide which objects to keep # Inputs: # img = img to display kept objects # roi_contour = contour of roi, output from any ROI function # roi_hierarchy = contour of roi, output from any ROI function # object_contour = contours of objects, output from pcv.find_objects function # obj_hierarchy = hierarchy of objects, output from pcv.find_objects function # roi_type = 'partial' (default, for partially inside), 'cutto', or # 'largest' (keep only largest contour) with HiddenPrints(): roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects( img=edge_masked_img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='largest') rgb_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # Inputs: # img - RGB or grayscale image data for plotting # contours - Contour list # hierarchy - Contour hierarchy array o, m = pcv.object_composition(img=rgb_img, contours=roi_objects, hierarchy=hierarchy5) ### Analysis ### outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Perform signal analysis # Inputs: # img - RGB or grayscale image data # obj- Single or grouped contour object # mask - Binary image mask to use as mask for moments analysis shape_img = pcv.analyze_object(img=img, obj=o, mask=m) new_im = Image.fromarray(shape_img) new_im.save("output//" + args.filename + "shape_img_side.png") # Inputs: # gray_img - 8 or 16-bit grayscale image data # mask - Binary mask made from selected contours # bins - Number of classes to divide the spectrum into # histplot - If True, plots the histogram of intensity values nir_hist = pcv.analyze_nir_intensity(gray_img=img, mask=kept_mask, bins=256, histplot=True) # Pseudocolor the grayscale image to a colormap # Inputs: # gray_img - Grayscale image data # obj - Single or grouped contour object (optional), if provided the pseudocolored image gets cropped down to the region of interest. # mask - Binary mask (optional) # background - Background color/type. Options are "image" (gray_img), "white", or "black". A mask must be supplied. # cmap - Colormap # min_value - Minimum value for range of interest # max_value - Maximum value for range of interest # dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi). # axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True). # colorbar - If False then the colorbar won't be displayed (default colorbar=True) pseudocolored_img = pcv.visualize.pseudocolor(gray_img=img, mask=kept_mask, cmap='viridis') # Perform shape analysis # Inputs: # img - RGB or grayscale image data # obj- Single or grouped contour object # mask - Binary image mask to use as mask for moments analysis shape_imgs = pcv.analyze_object(img=rgb_img, obj=o, mask=m) # Write shape and nir data to results file pcv.print_results(filename=args.result)