def analyze_bound(img, imgname, obj, mask, line_position, device, debug=None, filename=False): """User-input boundary line tool Inputs: img = image imgname = name of input image obj = single or grouped contour object mask = mask made from selected contours shape_header = pass shape header data to function shape_data = pass shape data so that analyze_bound data can be appended to it line_position = position of boundry line (a value of 0 would draw the line through the bottom of the image) device = device number. Used to count steps in the pipeline debug = None, print, or plot. Print = save to file, Plot = print to screen. filename = False or image name. If defined print image. Returns: device = device number bound_header = data table column headers bound_data = boundary data table analysis_images = output image filenames :param img: numpy array :param imgname: str :param obj: list :param mask: numpy array :param line_position: int :param device: int :param debug: str :param filename: str :return device: int :return bound_header: tuple :return bound_data: tuple :return analysis_images: list """ # Note analyze_bound is now a wrapper for newer function analyze_bound Horizontal sys.stderr.write( 'analyze_bound function will be depricated in the near future, please use analyze_bound_horizontal, which has the same functionality\n' ) device, bound_header, bound_data, analysis_images = analyze_bound_horizontal( img, obj, mask, line_position, device, debug, filename) return device, bound_header, bound_data, analysis_images
def main(): # Set variables args = options() pcv.params.debug = args.debug # Read and rotate image img, path, filename = pcv.readimage(filename=args.image) img = pcv.rotate(img, -90, False) # Create mask from LAB b channel l = pcv.rgb2gray_lab(rgb_img=img, channel='b') l_thresh = pcv.threshold.binary(gray_img=l, threshold=115, max_value=255, object_type='dark') l_mblur = pcv.median_blur(gray_img=l_thresh, ksize=5) # Apply mask to image masked = pcv.apply_mask(img=img, mask=l_mblur, mask_color='white') ab_fill = pcv.fill(bin_img=l_mblur, size=50) # Extract plant object from image id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=ab_fill) roi1, roi_hierarchy = pcv.roi.rectangle(img=masked, x=150, y=270, h=100, w=100) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ # Analyze shape properties analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask) boundary_image2 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=370) # Analyze colour properties color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # Analyze shape independent of size top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img, obj=obj, mask=mask) top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask) # Print results pcv.print_results(filename='{}'.format(args.result)) pcv.print_image(img=color_histogram, filename='{}_color_hist.jpg'.format(args.outdir)) pcv.print_image(img=kept_mask, filename='{}_mask.jpg'.format(args.outdir))
analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask) pcv.print_image(img=analysis_image, filename="upload/output_imgs/object_img.jpg") # In[25]: # Shape properties relative to user boundary line (optional) # Inputs: # img - RGB or grayscale image data # obj - Single or grouped contour object # mask - Binary mask of selected contours # line_position - Position of boundary line (a value of 0 would draw a line # through the bottom of the image) boundary_image2 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=370) # In[26]: # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) # Inputs: # rgb_img - RGB image data # mask - Binary mask of selected contours # hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv' # This is the data to be printed to the SVG histogram file color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # Print the histogram out to save it
def main(): # create options object for argument parsing args = options() # set device device = 0 # set debug pcv.params.debug = args.debug outfile = False if args.writeimg: outfile = os.path.join(args.outdir, os.path.basename(args.image)[:-4]) # read in image img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) # read in a background image for each zoom level config_file = open(args.bkg, 'r') config = json.load(config_file) config_file.close() if "z1500" in args.image: bkg_image = config["z1500"] elif "z2500" in args.image: bkg_image = config["z2500"] else: pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image)) bkg, bkg_path, bkg_filename = pcv.readimage(filename=bkg_image, debug=args.debug) # Detect edges in the background image device, bkg_sat = pcv.rgb2gray_hsv(img=bkg, channel="s", device=device, debug=args.debug) device += 1 bkg_edges = feature.canny(bkg_sat) if args.debug == "print": pcv.print_image(img=bkg_edges, filename=str(device) + '_background_edges.png') elif args.debug == "plot": pcv.plot_image(img=bkg_edges, cmap="gray") # Close background edge contours bkg_edges_closed = ndi.binary_closing(bkg_edges) device += 1 if args.debug == "print": pcv.print_image(img=bkg_edges_closed, filename=str(device) + '_closed_background_edges.png') elif args.debug == "plot": pcv.plot_image(img=bkg_edges_closed, cmap="gray") # Fill in closed contours in background bkg_fill_contours = ndi.binary_fill_holes(bkg_edges_closed) device += 1 if args.debug == "print": pcv.print_image(img=bkg_fill_contours, filename=str(device) + '_filled_background_edges.png') elif args.debug == "plot": pcv.plot_image(img=bkg_fill_contours, cmap="gray") # Naive Bayes image classification/segmentation device, mask = pcv.naive_bayes_classifier(img=img, pdf_file=args.pdf, device=device, debug=args.debug) # Do a light cleaning of the plant mask to remove small objects cleaned = morphology.remove_small_objects(mask["plant"].astype(bool), 2) device += 1 if args.debug == "print": pcv.print_image(img=cleaned, filename=str(device) + '_cleaned_mask.png') elif args.debug == "plot": pcv.plot_image(img=cleaned, cmap="gray") # Convert the input image to a saturation channel grayscale image device, sat = pcv.rgb2gray_hsv(img=img, channel="s", device=device, debug=args.debug) # Detect edges in the saturation image edges = feature.canny(sat) device += 1 if args.debug == "print": pcv.print_image(img=edges, filename=str(device) + '_plant_edges.png') elif args.debug == "plot": pcv.plot_image(img=edges, cmap="gray") # Combine pixels that are in both foreground edges and the filled background edges device, combined_bkg = pcv.logical_and(img1=edges.astype(np.uint8) * 255, img2=bkg_fill_contours.astype(np.uint8) * 255, device=device, debug=args.debug) # Remove background pixels from the foreground edges device += 1 filtered = np.copy(edges) filtered[np.where(combined_bkg == 255)] = False if args.debug == "print": pcv.print_image(img=filtered, filename=str(device) + '_filtered_edges.png') elif args.debug == "plot": pcv.plot_image(img=filtered, cmap="gray") # Combine the cleaned naive Bayes mask and the filtered foreground edges device += 1 combined = cleaned + filtered if args.debug == "print": pcv.print_image(img=combined, filename=str(device) + '_combined_foreground.png') elif args.debug == "plot": pcv.plot_image(img=combined, cmap="gray") # Close off broken edges and other incomplete contours device += 1 closed_features = ndi.binary_closing(combined, structure=np.ones((3, 3))) if args.debug == "print": pcv.print_image(img=closed_features, filename=str(device) + '_closed_features.png') elif args.debug == "plot": pcv.plot_image(img=closed_features, cmap="gray") # Fill in holes in contours # device += 1 # fill_contours = ndi.binary_fill_holes(closed_features) # if args.debug == "print": # pcv.print_image(img=fill_contours, filename=str(device) + '_filled_contours.png') # elif args.debug == "plot": # pcv.plot_image(img=fill_contours, cmap="gray") # Use median blur to break horizontal and vertical thin edges (pot edges) device += 1 blurred_img = ndi.median_filter(closed_features.astype(np.uint8) * 255, (3, 1)) blurred_img = ndi.median_filter(blurred_img, (1, 3)) # Remove small objects left behind by blurring cleaned2 = morphology.remove_small_objects(blurred_img.astype(bool), 200) if args.debug == "print": pcv.print_image(img=cleaned2, filename=str(device) + '_cleaned_by_median_blur.png') elif args.debug == "plot": pcv.plot_image(img=cleaned2, cmap="gray") # Define region of interest based on camera zoom level for masking the naive Bayes classified image # if "z1500" in args.image: # h = 1000 # elif "z2500" in args.image: # h = 1050 # else: # pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image)) # roi, roi_hierarchy = pcv.roi.rectangle(x=300, y=150, w=1850, h=h, img=img) # Mask the classified image to remove noisy areas prior to finding contours # side_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8) # cv2.drawContours(side_mask, roi, -1, (255), -1) # device, masked_img = pcv.apply_mask(img=cv2.merge([mask["plant"], mask["plant"], mask["plant"]]), mask=side_mask, # mask_color="black", device=device, debug=args.debug) # Convert the masked image back to grayscale # masked_img = masked_img[:, :, 0] # Close off contours at the base of the plant # if "z1500" in args.image: # pt1 = (1100, 1118) # pt2 = (1340, 1120) # elif "z2500" in args.image: # pt1 = (1020, 1162) # pt2 = (1390, 1166) # else: # pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image)) # masked_img = cv2.rectangle(np.copy(masked_img), pt1, pt2, (255), -1) # closed_mask = ndi.binary_closing(masked_img.astype(bool), iterations=3) # Find objects in the masked naive Bayes mask # device, objects, obj_hierarchy = pcv.find_objects(img=img, mask=np.copy(masked_img), device=device, # debug=args.debug) # objects, obj_hierarchy = cv2.findContours(np.copy(closed_mask.astype(np.uint8) * 255), cv2.RETR_CCOMP, # cv2.CHAIN_APPROX_NONE)[-2:] # Clean up the combined plant edges/mask image by removing filled in gaps/holes # device += 1 # cleaned3 = np.copy(cleaned2) # cleaned3 = cleaned3.astype(np.uint8) * 255 # # Loop over the contours from the naive Bayes mask # for c, contour in enumerate(objects): # # Calculate the area of each contour # # area = cv2.contourArea(contour) # # If the contour is a hole (i.e. it has no children and it has a parent) # # And it is not a small hole in a leaf that was not classified # if obj_hierarchy[0][c][2] == -1 and obj_hierarchy[0][c][3] > -1: # # Then fill in the contour (hole) black on the cleaned mask # cv2.drawContours(cleaned3, objects, c, (0), -1, hierarchy=obj_hierarchy) # if args.debug == "print": # pcv.print_image(img=cleaned3, filename=str(device) + '_gaps_removed.png') # elif args.debug == "plot": # pcv.plot_image(img=cleaned3, cmap="gray") # Find contours using the cleaned mask device, contours, contour_hierarchy = pcv.find_objects(img=img, mask=np.copy(cleaned2.astype(np.uint8)), device=device, debug=args.debug) # Define region of interest based on camera zoom level for contour filtering if "z1500" in args.image: h = 940 elif "z2500" in args.image: h = 980 else: pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image)) roi, roi_hierarchy = pcv.roi.rectangle(x=300, y=150, w=1850, h=h, img=img) # Filter contours in the region of interest device, roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img, roi_type='partial', roi_contour=roi, roi_hierarchy=roi_hierarchy, object_contour=contours, obj_hierarchy=contour_hierarchy, device=device, debug=args.debug) # Analyze only images with plants present if len(roi_objects) > 0: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy, device=device, debug=args.debug) if args.writeimg: # Save the plant mask if requested pcv.print_image(img=plant_mask, filename=outfile + "_mask.png") # Find shape properties, output shape image device, shape_header, shape_data, shape_img = pcv.analyze_object(img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile) # Set the boundary line based on the camera zoom level if "z1500" in args.image: line_position = 930 elif "z2500" in args.image: line_position = 885 else: pcv.fatal_error("Image {0} has an unsupported zoom level.".format(args.image)) # Shape properties relative to user boundary line device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound_horizontal(img=img, obj=plant_contour, mask=plant_mask, line_position=line_position, device=device, debug=args.debug, filename=outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images device, color_header, color_data, color_img = pcv.analyze_color(img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") result.write('\t'.join(map(str, boundary_header)) + "\n") result.write('\t'.join(map(str, boundary_data)) + "\n") result.write('\t'.join(map(str, boundary_img)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close()
# Outline (blue) of all combined objects. obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) # In[49]: # Shape analysis. shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask) # In[50]: # Boundary line output. boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680) # In[54]: # Histogram of color analysis. color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # In[55]: # Pseudocolored image based on value channels. This one analyzes saturation but can be # manipulated to analyze hue or value. pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=kept_mask,
def plantCVProcess(img, x, y, w, h): # Convert RGB to HSV and extract the saturation channel s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') # Threshold the saturation image s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light') # Median Blur s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light') b_cnt = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light') # Fill small objects # b_fill = pcv.fill(b_thresh, 10) # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt) # Apply Mask (for VIS images, mask_color=white) masked = pcv.apply_mask(img=img, mask=bs, mask_color='white') # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects ab_fill = pcv.fill(bin_img=ab, size=200) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white') # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define ROI roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=x, y=y, h=h, w=w) # Decide which objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ # Find shape properties, output shape image (optional) shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask) # Shape properties relative to user boundary line (optional) boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680) # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional) color_histogram = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='all') # Pseudocolor the grayscale image pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=mask, cmap='jet') return print_results()
def silhouette_top(): "First we draw the picture from the 3D data" ######################################################################################################################################################################## x = [] y = [] z = [] image_top = Image.new("RGB", (width, height), color='white') draw = ImageDraw.Draw(image_top) data_3d = open(args.image, "r") orignal_file = args.image for line in data_3d: line = line.split(",") y.append(int(line[0])) x.append(int(line[1])) z.append(int(line[2])) i = 0 for point_x in x: point_y = y[i] draw.rectangle([point_x, point_y, point_x + 1, point_y + 1], fill="black") #rectange takes input [x0, y0, x1, y1] i += 1 image_top.save("top_temp.png") image_side = Image.new("RGB", (1280, 960), color='white') draw = ImageDraw.Draw(image_side) i = 0 for point_y in y: point_z = z[i] draw.rectangle([point_z, point_y, point_z + 1, point_y + 1], fill="black") #rectange takes input [x0, y0, x1, y1] i += 1 image_side.save("side_temp.png") ######################################################################################################################################################################## args.image = "top_temp.png" # Get options pcv.params.debug = args.debug #set debug mode pcv.params.debug_outdir = args.outdir #set output directory pcv.params.debug = args.debug # set debug mode pcv.params.debug_outdir = args.outdir # set output directory # Read image img, path, filename = pcv.readimage(filename=args.image) v = pcv.rgb2gray_hsv(rgb_img=img, channel='v') v_thresh, maskedv_image = pcv.threshold.custom_range(rgb_img=v, lower_thresh=[0], upper_thresh=[200], channel='gray') id_objects, obj_hierarchy = pcv.find_objects(img=maskedv_image, mask=v_thresh) # Define ROI roi1, roi_hierarchy = pcv.roi.rectangle(img=maskedv_image, x=0, y=0, h=height, w=width) # Decide which objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) outfile = args.outdir + "/" + filename # Shape properties relative to user boundary line (optional) boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680) new_im = Image.fromarray(boundary_img1) new_im.save("output//" + args.filename + "_top_boundary.png") # Find shape properties, output shape image (optional) shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask) new_im = Image.fromarray(shape_img) new_im.save("output//" + args.filename + "_top_shape.png") new_im.save("output//" + args.filename + "shape_img.png") GT = re.sub(pattern, replacement, files_names[file_counter]) pcv.outputs.add_observation(variable="genotype", trait="genotype", method="Regexed from the filename", scale=None, datatype=str, value=int(GT), label="GT") # Write shape and color data to results file pcv.print_results(filename=args.result) ########################################################################################################################################## args.image = "side_temp.png" # Get options pcv.params.debug = args.debug #set debug mode pcv.params.debug_outdir = args.outdir #set output directory pcv.params.debug = args.debug # set debug mode pcv.params.debug_outdir = args.outdir # set output directory # Read image img, path, filename = pcv.readimage(filename=args.image) v = pcv.rgb2gray_hsv(rgb_img=img, channel='v') v_thresh, maskedv_image = pcv.threshold.custom_range(rgb_img=v, lower_thresh=[0], upper_thresh=[200], channel='gray') id_objects, obj_hierarchy = pcv.find_objects(img=maskedv_image, mask=v_thresh) # Define ROI roi1, roi_hierarchy = pcv.roi.rectangle(img=maskedv_image, x=0, y=0, h=height, w=width) # Decide which objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) outfile = args.outdir + "/" + filename # Shape properties relative to user boundary line (optional) boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680) new_im = Image.fromarray(boundary_img1) new_im.save("output//" + args.filename + "_side_boundary.png") # Find shape properties, output shape image (optional) shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask) new_im = Image.fromarray(shape_img) new_im.save("output//" + args.filename + "_side_shape.png") GT = re.sub(pattern, replacement, files_names[file_counter]) pcv.outputs.add_observation(variable="genotype", trait="genotype", method="Regexed from the filename", scale=None, datatype=str, value=int(GT), label="GT") # Write shape and color data to results file pcv.print_results(filename=args.result_side)
def main(): args = options() #create options object for argument parsing device = 0 #set device params.debug = args.debug #set debug outfile = False if args.writeimg: outfile = os.path.join(args.outdir, os.path.basename(args.image)[:-4]) # In[114]: img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) #read in image background = pcv.transform.load_matrix( args.npz) #read in background mask image for subtraction # In[115]: device, mask = pcv.naive_bayes_classifier( img, args.pdf, device, args.debug) #naive bayes on image #if args.writeimg: # pcv.print_image(img=mask["94,104,47"], filename=outfile + "_nb_mask.png") # In[116]: new_mask = pcv.image_subtract(mask["94,104,47"], background) #subtract background noise # In[117]: #image blurring using scipy median filter blurred_img = ndimage.median_filter(new_mask, (7, 1)) blurred_img = ndimage.median_filter(blurred_img, (1, 7)) device, cleaned = pcv.fill(np.copy(blurred_img), np.copy(blurred_img), 50, 0, args.debug) #fill leftover noise # In[118]: #dilate and erode to repair plant breaks from background subtraction device, cleaned_dilated = pcv.dilate(cleaned, 6, 1, 0) device, cleaned = pcv.erode(cleaned_dilated, 6, 1, 0, args.debug) # In[119]: device, objects, obj_hierarchy = pcv.find_objects( img, cleaned, device, debug=args.debug) #find objects using mask if "TM015" in args.image: h = 1620 elif "TM016" in args.image: h = 1555 else: h = 1320 roi_contour, roi_hierarchy = pcv.roi.rectangle(x=570, y=0, h=h, w=1900 - 550, img=img) #grab ROI # In[120]: #isolate plant objects within ROI device, roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi_contour, roi_hierarchy, objects, obj_hierarchy, device, debug=args.debug) #Analyze only images with plants present. if roi_objects > 0: # In[121]: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_objects, hierarchy=hierarchy, device=device, debug=args.debug) if args.writeimg: pcv.print_image(img=plant_mask, filename=outfile + "_mask.png") # In[122]: # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile + ".png") # In[123]: if "TM015" in args.image: line_position = 380 elif "TM016" in args.image: line_position = 440 else: line_position = 690 # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound_horizontal( img=img, obj=plant_contour, mask=plant_mask, line_position=line_position, device=device, debug=args.debug, filename=outfile + ".png") # In[124]: # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile + ".png") # In[55]: # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") result.write('\t'.join(map(str, boundary_header)) + "\n") result.write('\t'.join(map(str, boundary_data)) + "\n") result.write('\t'.join(map(str, boundary_img)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close()
def main(): # Get options pcv.params.debug = args.debug #set debug mode pcv.params.debug_outdir = args.outdir #set output directory # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb') # Inputs: # filename - Image file to be read in # mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv' img, path, filename = pcv.readimage(filename=args.image, mode='rgb') ### SELECTING THE PLANT ### Attempt 5 combineren # Parameters hue_lower_tresh = 22 # 24 hue_higher_tresh = 50 # 50 saturation_lower_tresh = 138 # 140 saturation_higher_tresh = 230 # 230 value_lower_tresh = 120 # 125 value_higher_tresh = 255 # 255 # RGB color space green_lower_tresh = 105 # 110 green_higher_tresh = 255 # 255 red_lower_tresh = 22 # 24 red_higher_thresh = 98 # 98 blue_lower_tresh = 85 # 85 blue_higher_tresh = 253 # 255 # CIELAB color space #lab_blue_lower_tresh = 0 # Blue yellow channel #lab_blue_higher_tresh = 255 s = pcv.rgb2gray_hsv(rgb_img=img, channel='h') mask, masked_image = pcv.threshold.custom_range( rgb_img=s, lower_thresh=[hue_lower_tresh], upper_thresh=[hue_higher_tresh], channel='gray') masked = pcv.apply_mask(rgb_img=img, mask=mask, mask_color='white') # Filtered on Hue s = pcv.rgb2gray_hsv(rgb_img=masked, channel='s') mask, masked_image = pcv.threshold.custom_range( rgb_img=s, lower_thresh=[saturation_lower_tresh], upper_thresh=[saturation_higher_tresh], channel='gray') masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white') #filtered on saturation s = pcv.rgb2gray_hsv(rgb_img=masked, channel='v') mask, masked_image = pcv.threshold.custom_range( rgb_img=s, lower_thresh=[value_lower_tresh], upper_thresh=[value_higher_tresh], channel='gray') masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white') #filtered on value mask, masked = pcv.threshold.custom_range( rgb_img=masked, lower_thresh=[0, green_lower_tresh, 0], upper_thresh=[255, green_higher_tresh, 255], channel='RGB') masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white') #filtered on green mask, masked = pcv.threshold.custom_range( rgb_img=masked, lower_thresh=[red_lower_tresh, 0, 0], upper_thresh=[red_higher_thresh, 255, 255], channel='RGB') masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white') #filtered on red mask_old, masked_old = pcv.threshold.custom_range( rgb_img=masked, lower_thresh=[0, 0, blue_lower_tresh], upper_thresh=[255, 255, blue_higher_tresh], channel='RGB') masked = pcv.apply_mask(rgb_img=masked_old, mask=mask_old, mask_color='white') #filtered on blue #b = pcv.rgb2gray_lab(rgb_img = masked, channel = 'b') # Converting toe CIElab blue_yellow image #b_thresh =pcv.threshold.binary(gray_img = b, threshold=lab_blue_lower_tresh, max_value = lab_blue_higher_tresh) ###_____________________________________ Now to identify objects masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary( gray_img=masked_a, threshold=125, # original 115 max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary( gray_img=masked_a, threshold=140, # original 135 max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled ab = pcv.median_blur(gray_img=ab, ksize=3) ab_fill = pcv.fill(bin_img=ab, size=1000) #print("filled") # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white') # ID the objects id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill) # Let's just take the largest roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=0, y=0, h=960, w=1280) # Currently hardcoded # Decide which objects to keep # Inputs: # img = img to display kept objects # roi_contour = contour of roi, output from any ROI function # roi_hierarchy = contour of roi, output from any ROI function # object_contour = contours of objects, output from pcv.find_objects function # obj_hierarchy = hierarchy of objects, output from pcv.find_objects function # roi_type = 'partial' (default, for partially inside), 'cutto', or # 'largest' (keep only largest contour) with HiddenPrints(): roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects # Inputs: # img - RGB or grayscale image data for plotting # contours - Contour list # hierarchy - Contour hierarchy array obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) #print("final plant") new_im = Image.fromarray(masked2) new_im.save("output//" + args.filename + "last_masked.png") ##################_________________ Analysis outfile = args.outdir + "/" + filename # Here come all the analyse functions. # pcv.acute_vertex(img, obj, 30, 15, 100) color_img = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type=None) #new_im = Image.fromarray(color_img) #new_im.save(args.filename + "color_img.png") # Find shape properties, output shape image (optional) # Inputs: # img - RGB or grayscale image data # obj- Single or grouped contour object # mask - Binary image mask to use as mask for moments analysis shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask) new_im = Image.fromarray(shape_img) new_im.save("output//" + args.filename + "shape_img.png") # Shape properties relative to user boundary line (optional) # Inputs: # img - RGB or grayscale image data # obj - Single or grouped contour object # mask - Binary mask of selected contours # line_position - Position of boundary line (a value of 0 would draw a line # through the bottom of the image) boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680) new_im = Image.fromarray(boundary_img1) new_im.save("output//" + args.filename + "boundary_img.png") # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional) # Inputs: # rgb_img - RGB image data # mask - Binary mask of selected contours # hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv' # This is the data to be printed to the SVG histogram file color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') #new_im = Image.fromarray(color_histogram) #new_im.save(args.filename + "color_histogram_img.png") # Pseudocolor the grayscale image # Inputs: # gray_img - Grayscale image data # obj - Single or grouped contour object (optional), if provided the pseudocolored image gets # cropped down to the region of interest. # mask - Binary mask (optional) # background - Background color/type. Options are "image" (gray_img, default), "white", or "black". A mask # must be supplied. # cmap - Colormap # min_value - Minimum value for range of interest # max_value - Maximum value for range of interest # dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi). # axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True). # colorbar - If False then the colorbar won't be displayed (default colorbar=True) pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=kept_mask, cmap='jet') #new_im = Image.fromarray(pseudocolored_img) #new_im.save(args.filename + "pseudocolored.png") # Write shape and color data to results file pcv.print_results(filename=args.result)
def root(): uploaded_file = st.file_uploader("Choose an image...", type="jpg") if uploaded_file is not None: inp = Image.open(uploaded_file) inp.save('input.jpg') img, path, filename = pcv.readimage(filename='input.jpg') image = Image.open('input.jpg') st.image(image, caption='Original Image',use_column_width=True) # Convert RGB to HSV and extract the saturation channel # Inputs: # rgb_image - RGB image data # channel - Split by 'h' (hue), 's' (saturation), or 'v' (value) channel s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') pcv.print_image(s, "plant/rgbtohsv.png") image = Image.open('plant/rgbtohsv.png') st.image(image, caption='RGB to HSV', use_column_width=True) s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light') pcv.print_image(s_thresh, "plant/binary_threshold.png") image = Image.open('plant/binary_threshold.png') st.image(image, caption='Binary Threshold',use_column_width=True) # Median Blur to clean noise # Inputs: # gray_img - Grayscale image data # ksize - Kernel size (integer or tuple), (ksize, ksize) box if integer input, # (n, m) box if tuple input s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) pcv.print_image(s_mblur, "plant/Median_blur.png") image = Image.open('plant/Median_blur.png') st.image(image, caption='Median Blur',use_column_width=True) # An alternative to using median_blur is gaussian_blur, which applies # a gaussian blur filter to the image. Depending on the image, one # technique may be more effective than others. # Inputs: # img - RGB or grayscale image data # ksize - Tuple of kernel size # sigma_x - Standard deviation in X direction; if 0 (default), # calculated from kernel size # sigma_y - Standard deviation in Y direction; if sigmaY is # None (default), sigmaY is taken to equal sigmaX gaussian_img = pcv.gaussian_blur(img=s_thresh, ksize=(5, 5), sigma_x=0, sigma_y=None) # Convert RGB to LAB and extract the blue channel ('b') # Input: # rgb_img - RGB image data # channel- Split by 'l' (lightness), 'a' (green-magenta), or 'b' (blue-yellow) channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light') # Join the threshold saturation and blue-yellow images with a logical or operation # Inputs: # bin_img1 - Binary image data to be compared to bin_img2 # bin_img2 - Binary image data to be compared to bin_img1 bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_thresh) pcv.print_image(bs, "plant/threshold comparison.png") image = Image.open('plant/threshold comparison.png') st.image(image, caption='Threshold Comparision',use_column_width=True) # Appy Mask (for VIS images, mask_color='white') # Inputs: # img - RGB or grayscale image data # mask - Binary mask image data # mask_color - 'white' or 'black' masked = pcv.apply_mask(img=img, mask=bs, mask_color='white') pcv.print_image(masked, "plant/Apply_mask.png") image = Image.open('plant/Apply_mask.png') st.image(image, caption='Applied Mask',use_column_width=True) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135,max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') pcv.print_image( maskeda_thresh, "plant/maskeda_thresh.png") pcv.print_image(maskeda_thresh1, "plant/maskeda_thresh1.png") pcv.print_image(maskedb_thresh, "plant/maskedb_thresh1.png") image = Image.open('plant/maskeda_thresh.png') st.image(image, caption='Threshold green-magneta and blue image',use_column_width=True) # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Opening filters out bright noise from an image. # Inputs: # gray_img - Grayscale or binary image data # kernel - Optional neighborhood, expressed as an array of 1's and 0's. If None (default), # uses cross-shaped structuring element. opened_ab = pcv.opening(gray_img=ab) # Depending on the situation it might be useful to use the # exclusive or (pcv.logical_xor) function. # Inputs: # bin_img1 - Binary image data to be compared to bin_img2 # bin_img2 - Binary image data to be compared to bin_img1 xor_img = pcv.logical_xor(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) # Fill small objects (reduce image noise) # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled ab_fill = pcv.fill(bin_img=ab, size=200) # Closing filters out dark noise from an image. # Inputs: # gray_img - Grayscale or binary image data # kernel - Optional neighborhood, expressed as an array of 1's and 0's. If None (default), # uses cross-shaped structuring element. closed_ab = pcv.closing(gray_img=ab_fill) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white') # Identify objects # Inputs: # img - RGB or grayscale image data for plotting # mask - Binary mask used for detecting contours id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define the region of interest (ROI) # Inputs: # img - RGB or grayscale image to plot the ROI on # x - The x-coordinate of the upper left corner of the rectangle # y - The y-coordinate of the upper left corner of the rectangle # h - The height of the rectangle # w - The width of the rectangle roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=50, y=50, h=100, w=100) # Decide which objects to keep # Inputs: # img = img to display kept objects # roi_contour = contour of roi, output from any ROI function # roi_hierarchy = contour of roi, output from any ROI function # object_contour = contours of objects, output from pcv.find_objects function # obj_hierarchy = hierarchy of objects, output from pcv.find_objects function # roi_type = 'partial' (default, for partially inside the ROI), 'cutto', or # 'largest' (keep only largest contour) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects # Inputs: # img - RGB or grayscale image data for plotting # contours - Contour list # hierarchy - Contour hierarchy array obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ # Find shape properties, data gets stored to an Outputs class automatically # Inputs: # img - RGB or grayscale image data # obj- Single or grouped contour object # mask - Binary image mask to use as mask for moments analysis analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask) pcv.print_image(analysis_image, "plant/analysis_image.png") image = Image.open('plant/analysis_image.png') st.image(image, caption='Analysis_image',use_column_width=True) # Shape properties relative to user boundary line (optional) # Inputs: # img - RGB or grayscale image data # obj - Single or grouped contour object # mask - Binary mask of selected contours # line_position - Position of boundary line (a value of 0 would draw a line # through the bottom of the image) boundary_image2 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=370) pcv.print_image(boundary_image2, "plant/boundary_image2.png") image = Image.open('plant/boundary_image2.png') st.image(image, caption='Boundary Image',use_column_width=True) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) # Inputs: # rgb_img - RGB image data # mask - Binary mask of selected contours # hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv' # This is the data to be printed to the SVG histogram file color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # Print the histogram out to save it pcv.print_image(img=color_histogram, filename="plant/vis_tutorial_color_hist.jpg") image = Image.open('plant/vis_tutorial_color_hist.jpg') st.image(image, caption='Color Histogram',use_column_width=True) # Divide plant object into twenty equidistant bins and assign pseudolandmark points based upon their # actual (not scaled) position. Once this data is scaled this approach may provide some information # regarding shape independent of size. # Inputs: # img - RGB or grayscale image data # obj - Single or grouped contour object # mask - Binary mask of selected contours top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img, obj=obj, mask=mask) top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask) # The print_results function will take the measurements stored when running any (or all) of these functions, format, # and print an output text file for data analysis. The Outputs class stores data whenever any of the following functions # are ran: analyze_bound_horizontal, analyze_bound_vertical, analyze_color, analyze_nir_intensity, analyze_object, # fluor_fvfm, report_size_marker_area, watershed. If no functions have been run, it will print an empty text file pcv.print_results(filename='vis_tutorial_results.txt')
def main(): # Get options args = options() pcv.params.debug = args.debug # set debug mode pcv.params.debug_outdir = args.outdir # set output directory # Read image img, path, filename = pcv.readimage(filename=args.image) # Convert RGB to HSV and extract the saturation channel h = pcv.rgb2gray_hsv(rgb_img=img, channel='h') # Threshold the saturation image h_thresh = pcv.threshold.binary(gray_img=h, threshold=85, max_value=255, object_type='dark') # Median Blur h_mblur = pcv.median_blur(gray_img=h_thresh, ksize=20) h_cnt = pcv.median_blur(gray_img=h_thresh, ksize=20) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light') b_cnt = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light') # Fill small objects # b_fill = pcv.fill(b_thresh, 10) # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=h_mblur, bin_img2=b_cnt) # Apply Mask (for VIS images, mask_color=white) masked = pcv.apply_mask(img=img, mask=bs, mask_color='white') # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects ab_fill = pcv.fill(bin_img=ab, size=200) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white') # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define ROI roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=400, y=400, h=200, w=200) # Decide which objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask) # Shape properties relative to user boundary line (optional) boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=600) # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional) color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # Pseudocolor the grayscale image pseudocolored_img = pcv.visualize.pseudocolor(gray_img=h, mask=kept_mask, cmap='jet') # Write shape and color data to results file pcv.print_results(filename=args.result)
def main(): # Get options #args = options() parser = argparse.ArgumentParser( description="Imaging processing with opencv") parser.add_argument("-i", "--image", help="Input image file.", required=True) parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=False) parser.add_argument("-r", "--result", help="result file.", required=False) parser.add_argument("-w", "--writeimg", help="write out images.", default=False, action="store_true") parser.add_argument("-f", "--fileout", help="output mask file path", required=True) parser.add_argument( "-D", "--debug", help= "can be set to 'print' or None (or 'plot' if in jupyter) prints intermediate images.", default=None) args = parser.parse_args() pcv.params.debug = args.debug # set debug mode pcv.params.debug_outdir = args.outdir # set output directory # Read image img, path, filename = pcv.readimage(filename=args.image) img = cv2.resize(img, (1280, 960), interpolation=cv2.INTER_AREA) # Convert RGB to HSV and extract the saturation channel s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') # Threshold the saturation image s_thresh = pcv.threshold.binary(gray_img=s, threshold=35, max_value=255, object_type='light') # Median Blur s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image b_thresh = pcv.threshold.binary(gray_img=b, threshold=180, max_value=255, object_type='light') b_cnt = pcv.threshold.binary(gray_img=b, threshold=180, max_value=255, object_type='light') # Fill small objects # b_fill = pcv.fill(b_thresh, 10) # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt) # Apply Mask (for VIS images, mask_color=white) masked = pcv.apply_mask(rgb_img=img, mask=bs, mask_color='white') # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=95, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects ab_fill = pcv.fill(bin_img=ab, size=200) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white') # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define ROI roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=0, y=0, h=img.shape[0], w=img.shape[1]) # Decide which objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask) # Shape properties relative to user boundary line (optional) boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680) # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional) color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # Pseudocolor the grayscale image pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=kept_mask, cmap='jet') # Write shape and color data to results file #pcv.print_results(filename=args.result) count = 0 [rows, cols] = mask.shape for i in range(rows): for j in range(cols): if mask[i, j] > 128: count += 1 re = float(count) / (rows * cols) text = "rec_rate:" + str(round(re, 4)) cv2.putText(mask, text, (40, 50), cv2.FONT_HERSHEY_PLAIN, 2.0, 255, 2) #cv2.imshow("tt",mask) #cv2.waitKey(0) cv2.imwrite(args.fileout, mask) print(str(round(re, 4)))