def platCV(imagePath): img, path, filename = pcv.readimage(imagePath) # Pipeline step device = 0 debug = 'print' # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 85, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 130, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 130, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug) # Join the thresholded saturation and blue-yellow images #device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, b_fill, 'white', device, debug)
def main(): # Get options args = options() # Read image (converting fmax and track to 8 bit just to create a mask, use 16-bit for all the math) mask, path, filename = pcv.readimage(args.fmax) #mask = cv2.imread(args.fmax) track = cv2.imread(args.track) mask1, mask2, mask3= cv2.split(mask) # Pipeline step device = 0 # Mask pesky track autofluor device, track1= pcv.rgb2gray_hsv(track, 'v', device, args.debug) device, track_thresh = pcv.binary_threshold(track1, 0, 255, 'light', device, args.debug) device, track_inv=pcv.invert(track_thresh, device, args.debug) device, track_masked = pcv.apply_mask(mask1, track_inv, 'black', device, args.debug) # Threshold the Saturation image device, fmax_thresh = pcv.binary_threshold(track_masked, 20, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(fmax_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(fmax_thresh, 0, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 5, device, args.debug) device, sfill_cnt = pcv.fill(s_mblur, s_cnt, 5, device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(mask, sfill_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(mask,'circle', device, None, 'default', args.debug,True, 0,0,-100,-100) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(mask,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, masked = pcv.object_composition(mask, roi_objects, hierarchy3, device, args.debug) ################ Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(mask, args.fmax, obj, masked, device,args.debug, args.outdir+'/'+filename) # Fluorescence Measurement (read in 16-bit images) fdark=cv2.imread(args.fdark, -1) fmin=cv2.imread(args.fmin, -1) fmax=cv2.imread(args.fmax, -1) device, fvfm_header, fvfm_data=pcv.fluor_fvfm(fdark,fmin,fmax,kept_mask, device, args.outdir+'/'+filename, 1000, args.debug) # Output shape and color data pcv.print_results(args.fmax, shape_header, shape_data) pcv.print_results(args.fmax, fvfm_header, fvfm_data)
def test_plantcv_binary_threshold(): img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1) device, binary_img = pcv.binary_threshold(img=img, threshold=25, maxValue=255, object_type="light", device=0, debug=None) # Assert that the output image has the dimensions of the input image if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)): # Assert that the image is binary if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])): assert 1 else: assert 0 else: assert 0
def find_object(image, rand_int): head, tail = os.path.split(image) print "Finding object in image " + tail + "..." # Read image img, path, filename = pcv.readimage(image) # Pipeline step device = rand_int debug = "print" # or "plot" # Convert RGB to HSV and extract the Saturation channel # hue, saturation, value device, h = pcv.rgb2gray_hsv(img, 'h', device) # Threshold the Saturation image device, h_thresh = pcv.binary_threshold(h, 30, 255, 'light', device) device, h_mblur = pcv.median_blur(h_thresh, 5, device) device, h_cnt = pcv.median_blur(h_thresh, 5, device) # Fill small objects device, ab_fill = pcv.fill(h_mblur, h_mblur, 200, device) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, h_mblur, 'white', device) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked, h_mblur, device) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked, 'rectangle', device, None, 'default', False, 0, 0, 0, 0) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) return
def binary_mask(): if request.method == 'POST': imagefile = request.files['image'] i = os.path.join(app.config['UPLOAD_FOLDER'], imagefile.filename) imagefile.save(i) print("Files uploaded successfully") img, path, filename = pcv.readimage(i) names = {"h": "hue", "s": "saturation", "v": "value"} hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) channels = {"h": h, "s": s, "v": v} s = channels["s"] #Creating the binary threshold image using the channel, the threshold, the max value, and the object type device, s_thresh = pcv.binary_threshold(s, 85, 255, 'light', device=0, debug=None) #Converting the image from Numpy array to Base64 string. The image is not BGR so it does not need to be converted to RGB this time im = Image.fromarray(s_thresh.astype("uint8")) rawBytes = io.BytesIO() im.save(rawBytes, "PNG") rawBytes.seek(0) outimage = base64.b64encode(rawBytes.read()) #Returning the template and image return render_template("result.html", image=outimage)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) #roi = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug) # Select area with black bars and find overlapping plant material device, roi1, roi_hierarchy1= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 0, 0,-1900,0) device, id_objects1,obj_hierarchy1 = pcv.find_objects(masked2, ab_fill, device, args.debug) device,roi_objects1, hierarchy1, kept_mask1, obj_area1 = pcv.roi_objects(masked2,'cutto',roi1,roi_hierarchy1,id_objects1,obj_hierarchy1,device, args.debug) device, masked3 = pcv.apply_mask(masked2, kept_mask1, 'white', device, args.debug) device, masked_a1 = pcv.rgb2gray_lab(masked3, 'a', device, args.debug) device, masked_b1 = pcv.rgb2gray_lab(masked3, 'b', device, args.debug) device, maskeda_thresh1 = pcv.binary_threshold(masked_a1, 122, 255, 'dark', device, args.debug) device, maskedb_thresh1 = pcv.binary_threshold(masked_b1, 170, 255, 'light', device, args.debug) device, ab1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug) device, ab_cnt1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug) device, ab_fill1 = pcv.fill(ab1, ab_cnt1, 300, device, args.debug) device, roi2, roi_hierarchy2= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 1900, 0,0,0) device, id_objects2,obj_hierarchy2 = pcv.find_objects(masked2, ab_fill, device, args.debug) device,roi_objects2, hierarchy2, kept_mask2, obj_area2 = pcv.roi_objects(masked2,'cutto',roi2,roi_hierarchy2,id_objects2,obj_hierarchy2,device, args.debug) device, masked4 = pcv.apply_mask(masked2, kept_mask2, 'white', device, args.debug) device, masked_a2 = pcv.rgb2gray_lab(masked4, 'a', device, args.debug) device, masked_b2 = pcv.rgb2gray_lab(masked4, 'b', device, args.debug) device, maskeda_thresh2 = pcv.binary_threshold(masked_a2, 122, 255, 'dark', device, args.debug) device, maskedb_thresh2 = pcv.binary_threshold(masked_b2, 170, 255, 'light', device, args.debug) device, ab2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug) device, ab_cnt2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug) device, ab_fill2 = pcv.fill(ab2, ab_cnt2, 200, device, args.debug) device, ab_cnt3 = pcv.logical_or(ab_fill1, ab_fill2, device, args.debug) device, masked3 = pcv.apply_mask(masked2, ab_cnt3, 'white', device, args.debug) # Identify objects device, id_objects3,obj_hierarchy3 = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi3, roi_hierarchy3= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 500, 0,-450,-530) # Decide which objects to keep and combine with objects overlapping with black bars device,roi_objects3, hierarchy3, kept_mask3, obj_area1 = pcv.roi_objects(img,'cutto',roi3,roi_hierarchy3,id_objects3,obj_hierarchy3,device, args.debug) device, kept_mask4_1 = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug) device, kept_cnt = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug) device, kept_mask4 = pcv.fill(kept_mask4_1, kept_cnt, 200, device, args.debug) device, masked5 = pcv.apply_mask(masked2, kept_mask4, 'white', device, args.debug) device, id_objects4,obj_hierarchy4 = pcv.find_objects(masked5, kept_mask4, device, args.debug) device, roi4, roi_hierarchy4= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,False, 0, 0,0,0) device,roi_objects4, hierarchy4, kept_mask4, obj_area = pcv.roi_objects(img,'partial',roi4,roi_hierarchy4,id_objects4,obj_hierarchy4,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects4, hierarchy4, device, args.debug) ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename) # Shape properties relative to user boundary line (optional) device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 950, device,args.debug,args.outdir+'/'+filename) # Tiller Tool Test device, tillering_header, tillering_data, tillering_img= pcv.tiller_count(img, args.image,obj, mask, 965, device,args.debug,args.outdir+'/'+filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask4, 256, device, args.debug,'all','rgb','v',args.outdir+'/'+filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data) pcv.print_results(args.image, boundary_header, boundary_data) pcv.print_results(args.image, tillering_header,tillering_data)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) #roi = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_lab(img, 'l', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 100, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug) # Fill small objects #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 145, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 145, 255, 'light', device, args.debug) # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 20, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', args.debug, True, 30, 25, -10, -15) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, args.outdir + '/' + filename) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound( img, args.image, obj, mask, 25, device, args.debug, args.outdir + '/' + filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, norm_slice = pcv.analyze_color( img, args.image, kept_mask, 256, device, args.debug, 'all', 'rgb', 'v', 'img', 300, args.outdir + '/' + filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data) pcv.print_results(args.image, boundary_header, boundary_data)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) #Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) #Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, s_mblur, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta device, soil_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) # # Threshold the green-magenta device, soila_thresh = pcv.binary_threshold(soil_a, 133, 255, 'light', device, args.debug) device, soila_cnt = pcv.binary_threshold(soil_a, 133, 255, 'light', device, args.debug) # # Fill small objects device, soil_fill = pcv.fill(soila_thresh, soila_cnt, 200, device, args.debug) # # Median Filter device, soil_mblur = pcv.median_blur(soil_fill, 13, device, args.debug) device, soil_cnt = pcv.median_blur(soil_fill, 13, device, args.debug) # # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_mblur, soil_cnt, 'white', device, args.debug) # # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,True, 400,400,-400,-400) # # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) # ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data)
def main(): # Get options args = options() if args.debug: print("Analyzing your image dude...") # Read image img = cv2.imread(args.image, flags=0) # if a region of interest is specified read it in roi = cv2.imread(args.roi) # Pipeline step device = 0 # Start by examining the distribution of pixel intensity values if args.debug: pcv.plot_hist(img, 'hist_img') # Will intensity transformation enhance your ability to isolate object of interest by thesholding? device, he_img = pcv.HistEqualization(img, device, args.debug) if args.debug: pcv.plot_hist(he_img, 'hist_img_he') # Laplace filtering (identify edges based on 2nd derivative) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(lp_img, 'hist_lp') # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) if args.debug: pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp') # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled) device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sbx_img, 'hist_sbx') # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sby_img, 'hist_sby') # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphesize edges found in both images device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) if args.debug: pcv.plot_hist(sb_img, 'hist_sb_comb_img') # Use a lowpass (blurring) filter to smooth sobel image device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) if args.debug: pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img') # Perform thresholding to generate a binary image device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug) # Prepare a few small kernels for morphological filtering kern = np.zeros((3,3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1,1:3]=1 kern2 = np.copy(kern) kern2[1,0:2]=1 kern3 = np.copy(kern) kern3[0:2,1]=1 kern4 = np.copy(kern) kern4[1:3,1]=1 # Prepare a larger kernel for dilation kern[1,0:3]=1 kern[0:3,1]=1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug) device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug) device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug) device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) # Perform dilation device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug) # Get masked image # The dilated image may contain some pixels which are not plant device, masked_erd = pcv.apply_mask(img, c1234_img, 'black', device, args.debug) device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (1,1), (64,252), device, args.debug) device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (256,1), (318,252), device, args.debug) device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (1,184), (318,252), device, args.debug) device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # Apply the box mask to the image device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, 'example')
def back_for_ground_sub(img, sliders): args = options() debug = args.debug stop = 0 sat_thresh = 85 blue_thresh = 135 green_magenta_dark_thresh = 117 green_magenta_light_thresh = 180 blue_yellow_thresh = 128 def nothing(x): pass if sliders == True: Stop = np.zeros((100, 512, 3), np.uint8) cv2.namedWindow('Saturation', cv2.WINDOW_NORMAL) cv2.namedWindow('Blue', cv2.WINDOW_NORMAL) cv2.namedWindow('Green_magenta_dark', cv2.WINDOW_NORMAL) cv2.namedWindow('Green_magenta_light', cv2.WINDOW_NORMAL) cv2.namedWindow('Blue_yellow_light', cv2.WINDOW_NORMAL) cv2.namedWindow('Stop') cv2.createTrackbar('sat_thresh', 'Saturation', 85, 255, nothing) cv2.createTrackbar('blue_thresh', 'Blue', 135, 255, nothing) cv2.createTrackbar('green_magenta_dark_thresh', 'Green_magenta_dark', 117, 255, nothing) cv2.createTrackbar('green_magenta_light_thresh', 'Green_magenta_light', 180, 255, nothing) cv2.createTrackbar('blue_yellow_thresh', 'Blue_yellow_light', 128, 255, nothing) cv2.createTrackbar('stop', 'Stop', 0, 1, nothing) while (stop == 0): if sliders == True: # get current positions of five trackbars sat_thresh = cv2.getTrackbarPos('sat_thresh', 'Saturation') blue_thresh = cv2.getTrackbarPos('blue_thresh', 'Blue') green_magenta_dark_thresh = cv2.getTrackbarPos('green_magenta_dark_thresh', 'Green_magenta_dark') green_magenta_light_thresh = cv2.getTrackbarPos('green_magenta_light_thresh', 'Green_magenta_light') blue_yellow_thresh = cv2.getTrackbarPos('blue_yellow_thresh', 'Blue_yellow_light') # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel # Extract the light and dark form the image device, s = pcv.rgb2gray_hsv(img, 's', device) # device, s_thresh = pcv.binary_threshold(s, sat_thresh, 255, 'light', device) device, s_thresh = pcv.otsu_auto_threshold(s, 255, 'light', device, debug="plot") device, s_mblur = pcv.median_blur(s_thresh, 5, device) device, s_cnt = pcv.median_blur(s_thresh, 5, device) # Convert RGB to LAB and extract the Blue channel # Threshold the blue image # Combine the threshed saturation and the blue theshed image with the logical or device, b = pcv.rgb2gray_lab(img, 'b', device) device, b_thresh = pcv.otsu_auto_threshold(b, 255, 'light', device, debug="plot") device, b_cnt = pcv.otsu_auto_threshold(b, 255, 'light', device, debug="plot") device, b_cnt_2 = pcv.binary_threshold(b, 135, 255, 'light', device, debug="plot") device, bs = pcv.logical_or(s_mblur, b_cnt, device) # Mask the original image with the theshed combination of the blue&saturation device, masked = pcv.apply_mask(img, bs, 'white', device, debug="plot") # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device) # Focus on capturing the plant from the masked image 'masked' # Extract plant green-magenta and blue-yellow channels # Channels are threshold to cap different portions of the plant # Threshold the green-magenta and blue images # Images joined together # device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', device) device, maskeda_thresh = pcv.binary_threshold(masked_a, green_magenta_dark_thresh, 255, 'dark', device, debug="plot") # Original 115 New 125 device, maskeda_thresh1 = pcv.binary_threshold(masked_a, green_magenta_light_thresh, 255, 'light', device, debug="plot") # Original 135 New 170 device, maskedb_thresh = pcv.binary_threshold(masked_b, blue_yellow_thresh, 255, 'light', device, debug="plot") # Original 150`, New 165 device, maskeda_thresh2 = pcv.binary_threshold(masked_a, green_magenta_dark_thresh, 255, 'dark', device, debug="plot") # Original 115 New 125 # Join the thresholded saturation and blue-yellow images (OR) device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug="plot") device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug="plot") device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug="plot") device, ab_cnt_2 = pcv.logical_and(b_cnt_2, maskeda_thresh2, device, debug="plot") # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug="plot") # Original 200 New: 120 # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug="plot") device, masked3 = pcv.apply_mask(masked, ab_cnt_2, 'white', device, debug="plot") # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug="plot") # Define ROI # Plant extracton done----------------------------------------------------------------------------------- if sliders == True: stop = cv2.getTrackbarPos('stop', 'Stop') cv2.imshow('Stop', Stop) cv2.imshow('Saturation', s_thresh) cv2.imshow('Blue', b_thresh) cv2.imshow('Green_magenta_dark', maskeda_thresh) cv2.imshow('Green_magenta_light', maskeda_thresh1) cv2.imshow('Blue_yellow_light', maskedb_thresh) cv2.imshow('Mask', masked) cv2.imshow('Mask2', masked2) cv2.imshow('Mask3', masked3) cv2.imshow('masked_a', masked_a) cv2.imshow('masked_b', masked_b) cv2.imshow('fill', ab_fill) cv2.imshow('ab_cnt', ab) cv2.imshow('ab1', ab1) cv2.imshow('ab_cnt2', ab_cnt_2) k = cv2.waitKey(1) & 0xFF if k == 27: break else: stop = 1 device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, roi=None, roi_input='default', debug=False, adjust=True, x_adj=100, y_adj=50, w_adj=-150, h_adj=-50) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug=False) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug=False) return device, ab_fill, mask, obj
def main(): # Initialize device device = 0 # Parse command-line options args = options() # Read image img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) # Convert RGB to LAB and extract the Blue-Yellow channel device, blue_channel = pcv.rgb2gray_lab(img=img, channel="b", device=device, debug=args.debug) # Threshold the blue image using the triangle autothreshold method device, blue_tri = pcv.triangle_auto_threshold(device=device, img=blue_channel, maxvalue=255, object_type="light", xstep=1, debug=args.debug) # Extract core plant region from the image to preserve delicate plant features during filtering device += 1 plant_region = blue_tri[0:1750, 600:2080] if args.debug is not None: pcv.print_image(filename=str(device) + "_extract_plant_region.png", img=plant_region) # Use a Gaussian blur to disrupt the strong edge features in the cabinet device, blur_gaussian = pcv.gaussian_blur(device=device, img=blue_tri, ksize=(3, 3), sigmax=0, sigmay=None, debug=args.debug) # Threshold the blurred image to remove features that were blurred device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian, threshold=250, maxValue=255, object_type="light", device=device, debug=args.debug) # Add the plant region back in to the filtered image device += 1 blur_thresholded[0:1750, 600:2080] = plant_region if args.debug is not None: pcv.print_image(filename=str(device) + "_replace_plant_region.png", img=blur_thresholded) # Fill small noise device, blue_fill_50 = pcv.fill(img=np.copy(blur_thresholded), mask=np.copy(blur_thresholded), size=50, device=device, debug=args.debug) # Identify objects device, contours, contour_hierarchy = pcv.find_objects(img=img, mask=blue_fill_50, device=device, debug=args.debug) # Define ROI device, roi, roi_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=565, y_adj=0, w_adj=-490, h_adj=-250) # Decide which objects to keep device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects( img=img, roi_type="partial", roi_contour=roi, roi_hierarchy=roi_hierarchy, object_contour=contours, obj_hierarchy=contour_hierarchy, device=device, debug=args.debug) # If there are no contours left we cannot measure anything if len(roi_contours) > 0: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_contours, hierarchy=roi_contour_hierarchy, device=device, debug=args.debug) outfile = False if args.writeimg: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, line_position=440, device=device, debug=args.debug, filename=outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") result.write('\t'.join(map(str, boundary_header)) + "\n") result.write('\t'.join(map(str, boundary_data)) + "\n") result.write('\t'.join(map(str, boundary_img)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close() # Find matching NIR image device, nirpath = pcv.get_nir(path=path, filename=filename, device=device, debug=args.debug) nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath) nir_img = cv2.imread(nirpath, 0) # Make mask glovelike in proportions via dilation device, d_mask = pcv.dilate(plant_mask, kernel=1, i=0, device=device, debug=args.debug) # Resize mask prop2, prop1 = conv_ratio() device, nmask = pcv.resize(img=d_mask, resize_x=prop1, resize_y=prop2, device=device, debug=args.debug) # Convert the resized mask to a binary mask device, bmask = pcv.binary_threshold(img=nmask, threshold=0, maxValue=255, object_type="light", device=device, debug=args.debug) device, crop_img = crop_sides_equally(mask=bmask, nir=nir_img, device=device, debug=args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(img=nir_img, mask=crop_img, device=device, x=34, y=9, v_pos="top", h_pos="right", debug=args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb, mask=newmask, device=device, debug=args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( img=nir_rgb, contours=nir_objects, hierarchy=nir_hierarchy, device=device, debug=args.debug) if args.writeimg: outfile = args.outdir + "/" + nir_filename # Analyze NIR signal data device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( img=nir_img, rgbimg=nir_rgb, mask=nir_combinedmask, bins=256, device=device, histplot=False, debug=args.debug, filename=outfile) # Analyze the shape of the plant contour from the NIR image device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( img=nir_img, imgname=nir_filename, obj=nir_combined, mask=nir_combinedmask, device=device, debug=args.debug, filename=outfile) # Write NIR data to co-results file coresult = open(args.coresult, "a") coresult.write('\t'.join(map(str, nhist_header)) + "\n") coresult.write('\t'.join(map(str, nhist_data)) + "\n") for row in nir_imgs: coresult.write('\t'.join(map(str, row)) + "\n") coresult.write('\t'.join(map(str, nshape_header)) + "\n") coresult.write('\t'.join(map(str, nshape_data)) + "\n") coresult.write('\t'.join(map(str, nir_shape)) + "\n") coresult.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, args.debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 150, device, args.debug) # Median Filter #device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) #device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', args.debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300, outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header))) result.write("\n") result.write('\t'.join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, color_header))) result.write("\n") result.write('\t'.join(map(str, color_data))) result.write("\n") for row in color_img: result.write('\t'.join(map(str, row))) result.write("\n") result.close() ############################# Use VIS image mask for NIR image######################### # Find matching NIR image device, nirpath = pcv.get_nir(path, filename, device, args.debug) nir, path1, filename1 = pcv.readimage(nirpath) nir2 = cv2.imread(nirpath, -1) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, args.debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top", "right", args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects( nir, newmask, device, args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( nir, nir_objects, nir_hierarchy, device, args.debug) ####################################### Analysis ############################################# outfile1 = False if args.writeimg == True: outfile1 = args.outdir + "/" + filename1 device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1) coresult = open(args.coresult, "a") coresult.write('\t'.join(map(str, nhist_header))) coresult.write("\n") coresult.write('\t'.join(map(str, nhist_data))) coresult.write("\n") for row in nir_imgs: coresult.write('\t'.join(map(str, row))) coresult.write("\n") coresult.write('\t'.join(map(str, nshape_header))) coresult.write("\n") coresult.write('\t'.join(map(str, nshape_data))) coresult.write("\n") coresult.write('\t'.join(map(str, nir_shape))) coresult.write("\n") coresult.close()
def process_sv_images(session, url, vis_id, nir_id, traits, debug=None): """Process side-view images from Clowder. Inputs: session = requests session object url = Clowder URL vis_id = The Clowder ID of an RGB image nir_img = The Clowder ID of an NIR grayscale image traits = traits table (dictionary) debug = None, print, or plot. Print = save to file, Plot = print to screen :param session: requests session object :param url: str :param vis_id: str :param nir_id: str :param traits: dict :param debug: str :return traits: dict """ # Read VIS image from Clowder vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True) img_array = np.asarray(bytearray(vis_r.content), dtype="uint8") img = cv2.imdecode(img_array, -1) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug) # Fill small objects # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug) # Dilate to join small objects with larger ones device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug) device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug) # Fill dilated image mask device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug) device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug) device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug) device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700, 0, -600, -300) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) ############## VIS Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, vis_id, obj, mask, 384, device, debug) # Determine color properties: Histograms, Color Slices and # Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(1, len(boundary_header)): vis_traits[boundary_header[i]] = boundary_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) #print(vis_traits) add_plantcv_metadata(session, url, vis_id, vis_traits) ############################# Use VIS image mask for NIR image######################### # Read NIR image from Clowder nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True) nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8") nir = cv2.imdecode(nir_array, -1) nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR) # Flip mask device, f_mask = pcv.flip(mask, "vertical", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) #print(nir_traits) add_plantcv_metadata(session, url, nir_id, nir_traits) # Add data to traits table traits['sv_area'].append(vis_traits['area']) traits['hull_area'].append(vis_traits['hull-area']) traits['solidity'].append(vis_traits['solidity']) traits['height'].append(vis_traits['height_above_bound']) traits['perimeter'].append(vis_traits['perimeter']) return traits
def process_sv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, traits, debug=None): # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug) # Fill small objects # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug) # Dilate to join small objects with larger ones device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug) device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug) # Fill dilated image mask device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug) device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug) device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug) device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, ab_fill, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700, 0, -600, -300) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug) ############## VIS Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( vis_img, vis_id, obj, mask, device, debug) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound( vis_img, vis_id, obj, mask, 384, device, debug) # Determine color properties: Histograms, Color Slices and # Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(1, len(boundary_header)): vis_traits[boundary_header[i]] = boundary_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) ############################# Use VIS image mask for NIR image######################### # Flip mask device, f_mask = pcv.flip(mask, "vertical", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects( nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) # Add data to traits table traits['sv_area'].append(vis_traits['area']) traits['hull_area'].append(vis_traits['hull-area']) traits['solidity'].append(vis_traits['solidity']) traits['height'].append(vis_traits['height_above_bound']) traits['perimeter'].append(vis_traits['perimeter']) return [vis_traits, nir_traits]
def process_sv_images(vis_img, nir_img, debug=None): """Process side-view images. Inputs: vis_img = An RGB image. nir_img = An NIR grayscale image. debug = None, print, or plot. Print = save to file, Plot = print to screen. :param vis_img: str :param nir_img: str :param debug: str :return: """ # Read VIS image img, path, filename = pcv.readimage(vis_img) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug) # Fill small objects # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug) # Dilate to join small objects with larger ones device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug) device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug) # Fill dilated image mask device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug) device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug) device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug) device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700, 0, -600, -300) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) ############## VIS Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, vis_img, obj, mask, 384, device, debug) # Determine color properties: Histograms, Color Slices and # Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data print('\t'.join(map(str, shape_header)) + '\n') print('\t'.join(map(str, shape_data)) + '\n') for row in shape_img: print('\t'.join(map(str, row)) + '\n') print('\t'.join(map(str, color_header)) + '\n') print('\t'.join(map(str, color_data)) + '\n') print('\t'.join(map(str, boundary_header)) + '\n') print('\t'.join(map(str, boundary_data)) + '\n') print('\t'.join(map(str, boundary_img1)) + '\n') for row in color_img: print('\t'.join(map(str, row)) + '\n') ############################# Use VIS image mask for NIR image######################### # Read NIR image nir, path1, filename1 = pcv.readimage(nir_img) nir2 = cv2.imread(nir_img, -1) # Flip mask device, f_mask = pcv.flip(mask, "vertical", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 30, 4, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask, device, debug) print('\t'.join(map(str, nhist_header)) + '\n') print('\t'.join(map(str, nhist_data)) + '\n') for row in nir_imgs: print('\t'.join(map(str, row)) + '\n') print('\t'.join(map(str, nshape_header)) + '\n') print('\t'.join(map(str, nshape_data)) + '\n') print('\t'.join(map(str, nir_shape)) + '\n')
def process_tv_images(vis_img, nir_img, debug=False): """Process top-view images. Inputs: vis_img = An RGB image. nir_img = An NIR grayscale image. debug = None, print, or plot. Print = save to file, Plot = print to screen. :param vis_img: str :param nir_img: str :param debug: str :return: """ # Read image img, path, filename = pcv.readimage(vis_img) brass_mask = cv2.imread('mask_brass_tv_z1_L1.png') device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug, None, 'v', 'img', 300) print('\t'.join(map(str, shape_header)) + '\n') print('\t'.join(map(str, shape_data)) + '\n') for row in shape_img: print('\t'.join(map(str, row)) + '\n') print('\t'.join(map(str, color_header)) + '\n') print('\t'.join(map(str, color_data)) + '\n') for row in color_img: print('\t'.join(map(str, row)) + '\n') ############################# Use VIS image mask for NIR image######################### # Read NIR image nir, path1, filename1 = pcv.readimage(nir_img) nir2 = cv2.imread(nir_img, -1) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask, device, debug) print('\t'.join(map(str,nhist_header)) + '\n') print('\t'.join(map(str,nhist_data)) + '\n') for row in nir_imgs: print('\t'.join(map(str,row)) + '\n') print('\t'.join(map(str,nshape_header)) + '\n') print('\t'.join(map(str,nshape_data)) + '\n') print('\t'.join(map(str,nir_shape)) + '\n')
def main(): # Get options 1 args = options() # lee imagen 2 img, path, filename = pcv.readimage(args.image) # cv2.imshow("imagen",img) # pasos del pipeline 3 device = 0 debug=args.debug # Convert RGB to HSV and extract the Saturation channel 4 #convertir RGB a HSV y extraer el canal de saturacion device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # cv2.imshow("rgb a hsv y extraer saturacion 4",s) # Threshold the Saturation image 5 #sacar imagen binaria del canal de saturacion device, s_thresh = pcv.binary_threshold(s, 85, 255, 'light', device, debug) # cv2.imshow("imagen binaria de hsv",s_thresh) # Median Filter 6 #sacar un filtro median_blur device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # cv2.imshow("s_mblur",s_mblur) # cv2.imshow("s_cnt",s_cnt) # Convert RGB to LAB and extract the Blue channel 7 #convertir RGB(imagen original) a LAB Y extraer el canal azul device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # cv2.imshow("convertir RGB a LAB",b) # Threshold the blue image 8 #sacar imagen binaria de LAB imagen blue device, b_thresh = pcv.binary_threshold(b, 160, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 160, 255, 'light', device, debug) # cv2.imshow("imagen binaria de LAB",b_thresh) # cv2.imshow("imagen binaria",b_cnt) # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug) # Join the thresholded saturation and blue-yellow images 9 # device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug) # cv2.imshow("suma logica s_mblur and b_cnt",bs) # Apply Mask (for vis images, mask_color=white) 10 device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # cv2.imshow("aplicar mascara masked",masked) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels 11 device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # cv2.imshow("canal verde-magenta",masked_a) # cv2.imshow("canal azul-amarillo",masked_b) # Threshold the green-magenta and blue images 12 device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', device, debug) device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # cv2.imshow("threshold de canal verde-magenta dark",maskeda_thresh) # cv2.imshow("threshold de canal verde-magenta light",maskeda_thresh1) # cv2.imshow("threshold de canal azul-amarillo",maskedb_thresh) # Join the thresholded saturation and blue-yellow images (OR) 13 device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug) # cv2.imshow("suma logica or 1",ab1) # cv2.imshow("suma logica or 2 ab",ab) # cv2.imshow("suma logica or 3 ab_cnt",ab_cnt) # Fill small objects 14 device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug) # cv2.imshow("ab_fill",ab_fill) # Apply mask (for vis images, mask_color=white) 15 device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug) # cv2.imshow("aplicar maskara2 white",masked2) ####################entendible hasta aqui###################### # Identify objects 16 solo print Se utiliza para identificar objetos (material vegetal) en una imagen. #imprime la imagen si uso print o no si uso plot no almacena la imagen pero en pritn si la aguarda #usa b_thresh y observa device,id_objects,obj_hierarchy = pcv.find_objects(masked2,ab_fill, device, debug) # Define ROI 17 solo print encierra el objeto detectato pero aun es manual aun no automatico device, roi1, roi_hierarchy= pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 92, 80, -127, -343) # Decide which objects to keep 18 device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects 19 device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) ############### Analysis ################ outfile=False if args.writeimg==True: outfile=args.outdir+"/"+filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img,'image', obj, mask, device,args.outdir + '/' + filename) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, args.image, obj, mask, 1680, device, debug, args.outdir + '/' + filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color(img, args.image, kept_mask, 256, device, debug, 'all', 'v', 'img', 300, args.outdir + '/' + filename) #Write shape and color data to results file result=open(args.result,"a") result.write('\t'.join(map(str,shape_header))) result.write("\n") result.write('\t'.join(map(str,shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,color_header))) result.write("\n") result.write('\t'.join(map(str,color_data))) result.write("\n") for row in color_img: result.write('\t'.join(map(str,row))) result.write("\n") result.close() cv2.waitKey() cv2.destroyAllWindows()
args = parser.parse_args() return args args = options() debug = args.debug device = 0 img = cv2.imread("/home/matthijs/Downloads/tomaat_3.jpg") img_2 = cv2.imread("/home/matthijs/Downloads/tomaat_3.jpg", 0) cimg = cv2.cvtColor(img_2, cv2.COLOR_BAYER_BG2BGR) device, s = pcv.rgb2gray_hsv(img, 's', device) device, a = pcv.rgb2gray_lab(img, 'a', device, debug) # looks most promissing device, a_thresh = pcv.binary_threshold(a, 135, 255, 'light', device, debug) device, a_mblur = pcv.median_blur(a_thresh, 5, device, debug) kernel = np.zeros((3, 3), dtype=np.uint8) device, mask_watershed, = pcv.erode(a_mblur, 5, 1, device, debug) circles = cv2.HoughCircles(img_2, cv2.cv.CV_HOUGH_GRADIENT, 1, 30, param1=50, param2=20, minRadius=130, maxRadius=220) print("watt?")
def process_tv_images(session, url, vis_id, nir_id, traits, debug=False): """Process top-view images. Inputs: session = requests session object url = Clowder URL vis_id = The Clowder ID of an RGB image nir_img = The Clowder ID of an NIR grayscale image traits = traits table (dictionary) debug = None, print, or plot. Print = save to file, Plot = print to screen. :param session: requests session object :param url: str :param vis_id: str :param nir_id: str :param traits: dict :param debug: str :return traits: dict """ # Read VIS image from Clowder vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True) img_array = np.asarray(bytearray(vis_r.content), dtype="uint8") img = cv2.imdecode(img_array, -1) # Read the VIS top-view image mask for zoom = 1 from Clowder mask_r = session.get(posixpath.join(url, "api/files/57451b28e4b0efbe2dc3d4d5"), stream=True) mask_array = np.asarray(bytearray(mask_r.content), dtype="uint8") brass_mask = cv2.imdecode(mask_array, -1) device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) #print(vis_traits) add_plantcv_metadata(session, url, vis_id, vis_traits) ############################# Use VIS image mask for NIR image######################### # Read NIR image from Clowder nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True) nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8") nir = cv2.imdecode(nir_array, -1) nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) #print(nir_traits) add_plantcv_metadata(session, url, nir_id, nir_traits) # Add data to traits table traits['tv_area'] = vis_traits['area'] return traits
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, "light", device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug) # Fill small objects # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, "b", device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, "light", device, args.debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, "light", device, args.debug) # Fill small objects # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, "white", device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, "a", device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, "b", device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, "dark", device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, "light", device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug) # Dilate to join small objects with larger ones device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, args.debug) device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, args.debug) # Fill dilated image mask device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, args.debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, "white", device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, "a", device, args.debug) device, masked2_b = pcv.rgb2gray_lab(masked2, "b", device, args.debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, "dark", device, args.debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, "light", device, args.debug) device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi( masked2, "rectangle", device, None, "default", args.debug, True, 500, 0, -600, -885 ) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug ) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile ) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound( img, args.image, obj, mask, 845, device, args.debug, outfile ) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile ) # Output shape and color data result = open(args.result, "a") result.write("\t".join(map(str, shape_header))) result.write("\n") result.write("\t".join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write("\t".join(map(str, row))) result.write("\n") result.write("\t".join(map(str, color_header))) result.write("\n") result.write("\t".join(map(str, color_data))) result.write("\n") result.write("\t".join(map(str, boundary_header))) result.write("\n") result.write("\t".join(map(str, boundary_data))) result.write("\n") result.write("\t".join(map(str, boundary_img1))) result.write("\n") for row in color_img: result.write("\t".join(map(str, row))) result.write("\n") result.close() ############################# Use VIS image mask for NIR image######################### # Find matching NIR image device, nirpath = pcv.get_nir(path, filename, device, args.debug) nir, path1, filename1 = pcv.readimage(nirpath) nir2 = cv2.imread(nirpath, -1) # Flip mask device, f_mask = pcv.flip(mask, "vertical", device, args.debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 65, 0, "top", "left", args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug) ####################################### Analysis ############################################# outfile1 = False if args.writeimg == True: outfile1 = args.outdir + "/" + filename1 device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1 ) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1 ) coresult = open(args.coresult, "a") coresult.write("\t".join(map(str, nhist_header))) coresult.write("\n") coresult.write("\t".join(map(str, nhist_data))) coresult.write("\n") for row in nir_imgs: coresult.write("\t".join(map(str, row))) coresult.write("\n") coresult.write("\t".join(map(str, nshape_header))) coresult.write("\n") coresult.write("\t".join(map(str, nshape_data))) coresult.write("\n") coresult.write("\t".join(map(str, nir_shape))) coresult.write("\n") coresult.close()
def process_sv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, traits, debug=None): # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug) # Fill small objects # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug) # Dilate to join small objects with larger ones device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug) device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug) # Fill dilated image mask device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug) device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug) device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug) device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700, 0, -600, -300) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug) ############## VIS Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(vis_img, vis_id, obj, mask, 384, device, debug) # Determine color properties: Histograms, Color Slices and # Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(1, len(boundary_header)): vis_traits[boundary_header[i]] = boundary_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) ############################# Use VIS image mask for NIR image######################### # Flip mask device, f_mask = pcv.flip(mask, "vertical", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) # Add data to traits table traits['sv_area'].append(vis_traits['area']) traits['hull_area'].append(vis_traits['hull-area']) traits['solidity'].append(vis_traits['solidity']) traits['height'].append(vis_traits['height_above_bound']) traits['perimeter'].append(vis_traits['perimeter']) return [vis_traits, nir_traits]
def main(): # obtiene opciones de imagen args = options() #LINEA 22 if args.debug: print("Debug mode turned on...") # lee la imagen el flags=0 indica que se espera una imagen a escala de grises img = cv2.imread(args.image, flags=0) # cv2.imshow("imagen original",img) # Get directory path and image name from command line arguments path, img_name = os.path.split(args.image) #LINEA 30 # Read in image which is the pixelwise average of background images img_bkgrd = cv2.imread("background_average.jpg", flags=0) #cv2.imshow("ventana del fondo",img_bkgrd) # paso del procesamiento de imagenes device = 0 ######hasta qui bien #linea 37 # Restar la imagen de fondo de la imagen con la planta. device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug) #cv2.imshow("imagen resta",bkg_sub_img) # Threshold the image of interest using the two-sided cv2.inRange function (keep what is between 50-190) bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 50, 190) if args.debug: cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img) #hasta qui todo bien #linea 46 # Filtrado de Laplace (identificar bordes basados en la derivada 2) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) #cv2.imshow("imagen de filtrado",lp_img) if args.debug: pcv.plot_hist(lp_img, 'histograma_lp') # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) #cv2.imshow("imagen de borde lapacian",lp_shrp_img) if args.debug: pcv.plot_hist(lp_shrp_img, 'histograma_lp_shrp') #hasta aqui todo bien linea 58 # Sobel filtering-filtrado de sobel # 1ª derivada filtrado sobel a lo largo del eje horizontal, núcleo = 1, sin escala) """ segun esta masl son siete,kito scale y me kedo con apertura k,chekar sobel en docs device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) """ device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, device, args.debug) #cv2.imshow("imagen sobel-eje horizontal",sbx_img) if args.debug: pcv.plot_hist(sbx_img, 'histograma_sbx') # Filtrado de la primera derivada sobel a lo largo del eje vertical, núcleo = 1, sin escala) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, device, args.debug) #cv2.imshow("imagen sobel-ejevertical",sby_img) if args.debug: pcv.plot_hist(sby_img, 'histograma_sby') # Combina los efectos de ambos filtros x e y mediante la suma de matrizes # Esto captura los bordes identificados dentro de cada plano y enfatiza los bordes encontrados en ambas imágenes device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) #cv2.imshow("imagen suma de sobel",sb_img) if args.debug: pcv.plot_hist(sb_img, 'histograma_sb_comb_img') #hasta aqui todo bien linea 82 # usar filtro pasa bajo blur para suavizar la imagen de sobel device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) #cv2.imshow("imagen blur",mblur_img) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) #cv2.imshow("imagen blur-invertido",mblur_invert_img) # Combinar la imagen suavizada del sobel con la imagen afilada del laplaciano # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 #Combina las mejores características de ambos métodos como se describe en "Digital Image Processing" por González y Woods pág. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) #cv2.imshow("imagen-combinacion-sobel-laplacian",mblur_img) if args.debug: pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img') # Realizar el umbral para generar una imagen binaria device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 125, 255, 'dark', device, args.debug) #cv2.imshow("imagen binaria de combinacion",tr_es_img) #hasta aqui todo bien linea 99 # Prepare a few small kernels for morphological filtering #prepara nucleos pequeños para un filtrado moorfologico kern = np.zeros((3, 3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1, 1:3] = 1 kern2 = np.copy(kern) kern2[1, 0:2] = 1 kern3 = np.copy(kern) kern3[0:2, 1] = 1 kern4 = np.copy(kern) kern4[1:3, 1] = 1 # prepara un nucleo grande para la dilatacion kern[1, 0:3] = 1 kern[0:3, 1] = 1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 1",e1_img) device, e2_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 2",e2_img) device, e3_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 3",e3_img) device, e4_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 4",e4_img) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) #cv2.imshow("c12",c12_img) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) #cv2.imshow("c123",c123_img) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) #cv2.imshow("c1234",c1234_img) # Bring the two object identification approaches together. # Using a logical OR combine object identified by background subtraction and the object identified by derivative filter. device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug) #cv2.imshow("comb_img",comb_img) # Get masked image, Essentially identify pixels corresponding to plant and keep those. device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug) #cv2.imshow("masked_erd",masked_erd) #cv2.imshow("imagen original chkar",img) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) # mask for the bottom of the image device, im2, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask( img, (120, 184), (215, 252), device, args.debug, color='white') #cv2.imshow("im2",box1_img) # mask for the left side of the image device, im3, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask( img, (1, 1), (85, 252), device, args.debug, color='white') #cv2.imshow("im3",box2_img) # mask for the right side of the image device, im4, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask( img, (240, 1), (318, 252), device, args.debug, color='white') #cv2.imshow("im4",box3_img) # mask the edges device, im5, box4_img, rect_contour4, hierarchy4 = pcv.rectangle_mask( img, (1, 1), (318, 252), device, args.debug) #cv2.imshow("im5",box4_img) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) #cv2.imshow("combinacion logica or",bx1234_img) # invert this mask and then apply it the masked image. device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # cv2.imshow("combinacion logica or invertida",inv_bx1234_img) device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug) # cv2.imshow("edge_masked_img",edge_masked_img) # assign the coordinates of an area of interest (rectangle around the area you expect the plant to be in) device, im6, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask( img, (120, 75), (200, 184), device, args.debug) #cv2.imshow("im6",roi_img) # get the coordinates of the plant from the masked object plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # Obtain the coordinates of the plant object which are partially within the area of interest device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug) # Apply the box mask to the image to ensure no background device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) #cv2.imshow("mascara final",masked_img) #///////////////////////////////////////////////////////////// #device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) #cv2.imshow("rgb",rgb) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) #cv2.imshow("mask",mask) mask3d = np.copy(mask) plant_objects_2, plant_hierarchy_2 = cv2.findContours( mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) # Get final masked image device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) #cv2.imshow("maskara final2",masked_img) ################### copia lo de arriba esta mal el tutorial # Obtain a 3 dimensional representation of this grayscale image (for pseudocoloring) #rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) # Generate a binary to send to the analysis function #device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) # Make a copy of this mask for pseudocoloring #mask3d = np.copy(mask) # Extract coordinates of plant for pseudocoloring of plant #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) # Extract coordinates of plant for pseudocoloring of plant #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) #################################### ####################### ### Analysis ### # Perform signal analysis #################pruebas de que esta masl el tutorial"""""""""""""""" #ols=type(args.image) #print ols ##############pruebas de que no agarro device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name) #print(args.outdir+'/'+img_name) #print(args.debug) #al final si salio se agrego lo qyue esta debug= and filename= ##################################################### debug me marca True por ello puse pritn de mas #device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, rgb, mask, 256, device, debug='print', filename=False) device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity( img, rgb, mask, 256, device, debug=args.debug, filename=args.outdir + '/' + img_name) # Perform shape analysis device, shape_header, shape_data, ori_img = pcv.analyze_object( rgb, args.image, o, m, device, debug=args.debug, filename=args.outdir + '/' + img_name) # Print the results to STDOUT pcv.print_results(args.image, hist_header, hist_data) pcv.print_results(args.image, shape_header, shape_data) cv2.waitKey() cv2.destroyAllWdindows()
def process_tv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, brass_mask, traits, debug=None): device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) ############################# Use VIS image mask for NIR image######################### # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) # Add data to traits table traits['tv_area'] = vis_traits['area'] return [vis_traits, nir_traits]
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs,'white', device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv=pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug) device, soil_car = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, 'dark', device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 155, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_fill = pcv.fill(soil_ab, soil_ab_cnt, 75, device, args.debug) # Median Filter device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(img,'circle', device, None, 'default', args.debug,True, 0,0,-50,-50) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','v','img',300,args.outdir+'/'+filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data)
import plantcv as pcv #lee imagen img, path, img_filename = pcv.readimage( "/home/fitosmartplatform/plantCV/prueba/plan.jpg") #contador dl paso de procesamiento de imagen device = 0 # Create binary image from a gray image based on threshold values. Targeting light objects in the image. device, threshold_light = pcv.binary_threshold(img, 36, 255, 'dark', device, debug="print") device, h_channel = pcv.rgb2gray_hsv(img, 'h', device, debug="print") pcv.print_image(h_channel, "/home/fitosmartplatform/plantCV/prueba/image-gray.jpg") pcv.print_image(threshold_light, "/home/fitosmartplatform/plantCV/prueba/test-image.jpg") """notas""" #si uso plot :imprime datos de la imagen no aguarda #si uso print :imprime imagen y la aguarda con un nombre de la funcion #si uso el light es un entorno diferente #si uso dark es otro entorno de la imagen
def main(): # Sets variables from input arguments args = options() device = 0 # Workflow step counter debug = args.debug # Option to display debug images to the notebook rgb_img = args.image # Name of seed Image writeimg = args.writeimg outfile = str(args.result) outdir = str(args.outdir) # Reads in RGB image img, path, filename = pcv.readimage(rgb_img) if writeimg is True: pcv.print_image(img, outfile + "_original.jpg") # Converts RGB to HSV and extract the Saturation channel and inverts image device, img_gray_sat = pcv.rgb2gray_hsv(img, 's', device, debug) img_gray_sat = 255 - img_gray_sat # Corrects saturation image background brightness sat_img2 = 255 - correct_white_background(img_gray_sat) # Convert RGB to HSV and extract the Value channel device, img_gray_val = pcv.rgb2gray_hsv(img, 'v', device, debug) # Corrects value image background brightness val_img2 = 255 - correct_white_background(img_gray_val) # Convert RGB to HSV and extract the Hue channel device, img_hue = pcv.rgb2gray_hsv(img, 'h', device, debug) # Corrects Hue Image Based on standard mask = np.zeros(img.shape[:2], np.uint8) mask[1050: 1150, 3750: 3850] = 255 huehist = cv2.calcHist([img_hue], [0], mask, [256], [0, 256]) correction_factor = 155 - np.argmax(huehist) hue_channel = np.add(img_hue, correction_factor) if correction_factor > 0: hue_channel = np.where(hue_channel > 179, hue_channel - 180, hue_channel) elif correction_factor < 0: hue_channel = np.where(hue_channel < 0, 180 + hue_channel, hue_channel) # Thresholds the Saturation image device, sat_img_binary = pcv.binary_threshold(sat_img2, 35, 255, 'light', device, debug) # Threshold the Value image device, val_img_binary = pcv.binary_threshold(val_img2, 35, 255, 'light', device, debug) # Combines masks img_binary = np.where(sat_img_binary < 255, val_img_binary, sat_img_binary) # Fills in speckles smaller than 200 pixels mask = np.copy(img_binary) device, fill_image = pcv.fill(img_binary, mask, 200, device, debug) if writeimg is True: pcv.print_image(mask, outfile + "_mask.jpg") pcv.print_image(img_binary, outfile + "_binary.jpg") # Identifies objects using filled binary image as a mask device, id_objects, obj_hierarchy = pcv.find_objects(img, fill_image, device, debug) # Defines rectangular ROI device, roi, roi_hierarchy = \ pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 300, 1000, -1250, -425) # Keeps only objects within or partially within ROI device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = \ pcv.roi_objects(img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Randomly colors the individual seeds img_copy = np.copy(img) for i in range(0, len(roi_objects)): rand_color = color_palette(1) cv2.drawContours(img_copy, roi_objects, i, rand_color[0], -1, lineType=8, hierarchy=roi_obj_hierarchy) if writeimg is True: pcv.print_image(img_copy, outfile + "_coloredseeds.jpg") # Gets the area of each seed, saved in shape_data shape_header = [] table = [] for i in range(0, len(roi_objects)): if roi_obj_hierarchy[0][i][3] == -1: # Checks if shape is a parent contour # Object combine kept objects device, obj, mask2 = pcv.object_composition(img, [roi_objects[i]], np.array([[roi_obj_hierarchy[0][i]]]), device, debug) if obj is not None: device, shape_header, shape_data, shape_img = \ pcv.analyze_object(img, rgb_img, obj, mask2, device, debug) if shape_header is not None: shape_header.append('hue') shape_header.append('saturation') if shape_data is not None: darkval = float(np.sum(np.multiply(sat_img2, mask2))) / np.sum(mask2) huehist = cv2.calcHist([hue_channel], [0], mask2, [256], [0, 256]) hueval = np.argmax(huehist) shape_data.append(hueval) shape_data.append(darkval) table.append(shape_data) # Finds the area of the size marker in pixels and saves to "marker data" device, marker_header, marker_data, analysis_images =\ pcv.report_size_marker_area(img, 'rectangle', device, debug, "detect", 3525, 850, -200, -1700, "black", "light", "h", 120) # shape_header.append("marker_area") # Saves seed and marker shape data results to file metadata = open(posixpath.join(outdir, outfile), 'r').read() os.remove(posixpath.join(outdir, outfile)) for seed, row in enumerate(table): prefix = posixpath.join(outdir, outfile[0:-4]) results = open(prefix + '_' + str(seed + 1) + '.txt', 'w') results.write(metadata) results.write('\t'.join(map(str, shape_header)) + '\n') # row.append(marker_data[1]) results.write('\t'.join(map(str, row)) + '\n') results.write('\t'.join(map(str, marker_header)) + '\n') results.write('\t'.join(map(str, marker_data)) + '\n') results.close()
def main(): # Get options args = options() path_mask = '/home/mfeldman/tester/mask/mask_brass_tv_z1_L0.png' # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(path_mask) # Pipeline step device = 0 # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv=pcv.invert(brass_thresh, device, args.debug) device, masked_image = pcv.apply_mask(img, brass_inv, 'white', device, args.debug) # We can do a pretty good job of identifying the plant from the s channel device, s = pcv.rgb2gray_hsv(masked_image, 's', device, args.debug) s_thresh = cv2.inRange(s, 100, 190) # Lets blur the result a bit to get rid of unwanted noise s_blur = cv2.medianBlur(s_thresh,5) # The a channel is good too device, a = pcv.rgb2gray_lab(masked_image, 'a', device, args.debug) a_thresh = cv2.inRange(a, 90, 120) a_blur = cv2.medianBlur(a_thresh,5) # Now lets set of a series of filters to remove unwanted background plant_shape = cv2.bitwise_and(a_blur, s_blur) # Lets remove all the crap on the sides of the image plant_shape[:,:330] = 0 plant_shape[:,2100:] = 0 plant_shape[:200,:] = 0 # Now remove all remaining small points using erosion with a 3 x 3 kernel kernel = np.ones((3,3),np.uint8) erosion = cv2.erode(plant_shape ,kernel,iterations = 1) # Now dilate to fill in small holes kernel = np.ones((3,3),np.uint8) dilation = cv2.dilate(erosion ,kernel,iterations = 1) # Apply mask to the background image device, masked = pcv.apply_mask(masked_image, plant_shape, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked, dilation, device, args.debug) # Get ROI contours device, roi, roi_hierarchy = pcv.define_roi(masked_image, 'circle', device, None, 'default', args.debug, True, x_adj=0, y_adj=0, w_adj=0, h_adj=-1200) # ROI device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(masked_image,'partial',roi, roi_hierarchy, id_objects,obj_hierarchy,device, args.debug) # Get object contour and masked object device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Landmarks ################ device, points = pcv.acute_vertex(obj, 40, 40, 40, img, device, args.debug) boundary_line = 'NA' # Use acute fxn to estimate tips device, points_r, centroid_r, bline_r = pcv.scale_features(obj, mask, points, boundary_line, device, args.debug) # Get number of points tips = len(points_r) # Use turgor_proxy fxn to get distances device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy(points_r, centroid_r, bline_r, device, args.debug) # Get pseudomarkers along the y-axis device, left, right, center_h = pcv.y_axis_pseudolandmarks(obj, mask, img, device, args.debug) # Re-scale the points device, left_r, left_cr, left_br = pcv.scale_features(obj, mask, left, boundary_line, device, args.debug) device, right_r, right_cr, right_br = pcv.scale_features(obj, mask, right, boundary_line, device, args.debug) device, center_hr, center_hcr, center_hbr = pcv.scale_features(obj, mask, center_h, boundary_line, device, args.debug) # Get pseudomarkers along the x-axis device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj, mask, img, device, args.debug) # Re-scale the points device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top, boundary_line, device, args.debug) device, bottom_r, bottom_cr, bottom_br = pcv.scale_features(obj, mask, bottom, boundary_line, device, args.debug) device, center_vr, center_vcr, center_vbr = pcv.scale_features(obj, mask, center_v, boundary_line, device, args.debug) ## Need to convert the points into a list of tuples format to match the scaled points points = points.reshape(len(points),2) points = points.tolist() temp_out = [] for p in points: p = tuple(p) temp_out.append(p) points = temp_out left = left.reshape(20,2) left = left.tolist() temp_out = [] for l in left: l = tuple(l) temp_out.append(l) left = temp_out right = right.reshape(20,2) right = right.tolist() temp_out = [] for r in right: r = tuple(r) temp_out.append(r) right = temp_out center_h = center_h.reshape(20,2) center_h = center_h.tolist() temp_out = [] for ch in center_h: ch = tuple(ch) temp_out.append(ch) center_h = temp_out ## Need to convert the points into a list of tuples format to match the scaled points top = top.reshape(20,2) top = top.tolist() temp_out = [] for t in top: t = tuple(t) temp_out.append(t) top = temp_out bottom = bottom.reshape(20,2) bottom = bottom.tolist() temp_out = [] for b in bottom: b = tuple(b) temp_out.append(b) bottom = temp_out center_v = center_v.reshape(20,2) center_v = center_v.tolist() temp_out = [] for cvr in center_v: cvr = tuple(cvr) temp_out.append(cvr) center_v = temp_out #Store Landmark Data landmark_header=( 'HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b', 'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r', 'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r', 'bottom_lmk_r', 'center_v_lmk_r' ) landmark_data = ( 'LANDMARK_DATA', points, points_r, centroid_r, bline_r, tips, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, left, right, center_h, left_r, right_r, center_hr, top, bottom, center_v, top_r, bottom_r, center_vr ) ############## VIS Analysis ################ outfile=False #if args.writeimg==True: # outfile=args.outdir+"/"+filename # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile) # Shape properties relative to user boundary line (optional) device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 330, device,args.debug,outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile) # Output shape and color data result=open(args.result,"a") result.write('\t'.join(map(str,shape_header))) result.write("\n") result.write('\t'.join(map(str,shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,color_header))) result.write("\n") result.write('\t'.join(map(str,color_data))) result.write("\n") result.write('\t'.join(map(str,boundary_header))) result.write("\n") result.write('\t'.join(map(str,boundary_data))) result.write("\n") result.write('\t'.join(map(str,boundary_img1))) result.write("\n") for row in color_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,landmark_header))) result.write("\n") result.write('\t'.join(map(str,landmark_data))) result.write("\n") result.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug) # Fill small objects #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug) # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug) # Dilate to join small objects with larger ones device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, args.debug) device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, args.debug) # Fill dilated image mask device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, args.debug) img2 = np.copy(img) device, masked2 = pcv.apply_mask(img2, ab_cnt3, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, args.debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, args.debug) device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 550, 10, -600, -907) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Landmarks ################ device, points = pcv.acute_vertex(obj, 40, 40, 40, img, device, args.debug) boundary_line = 900 # Use acute fxn to estimate tips device, points_r, centroid_r, bline_r = pcv.scale_features( obj, mask, points, boundary_line, device, args.debug) # Get number of points tips = len(points_r) # Use turgor_proxy fxn to get distances device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy( points_r, centroid_r, bline_r, device, args.debug) # Get pseudomarkers along the y-axis device, left, right, center_h = pcv.y_axis_pseudolandmarks( obj, mask, img, device, args.debug) # Re-scale the points device, left_r, left_cr, left_br = pcv.scale_features( obj, mask, left, boundary_line, device, args.debug) device, right_r, right_cr, right_br = pcv.scale_features( obj, mask, right, boundary_line, device, args.debug) device, center_hr, center_hcr, center_hbr = pcv.scale_features( obj, mask, center_h, boundary_line, device, args.debug) # Get pseudomarkers along the x-axis device, top, bottom, center_v = pcv.x_axis_pseudolandmarks( obj, mask, img, device, args.debug) # Re-scale the points device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top, boundary_line, device, args.debug) device, bottom_r, bottom_cr, bottom_br = pcv.scale_features( obj, mask, bottom, boundary_line, device, args.debug) device, center_vr, center_vcr, center_vbr = pcv.scale_features( obj, mask, center_v, boundary_line, device, args.debug) ## Need to convert the points into a list of tuples format to match the scaled points points = points.reshape(len(points), 2) points = points.tolist() temp_out = [] for p in points: p = tuple(p) temp_out.append(p) points = temp_out left = left.reshape(20, 2) left = left.tolist() temp_out = [] for l in left: l = tuple(l) temp_out.append(l) left = temp_out right = right.reshape(20, 2) right = right.tolist() temp_out = [] for r in right: r = tuple(r) temp_out.append(r) right = temp_out center_h = center_h.reshape(20, 2) center_h = center_h.tolist() temp_out = [] for ch in center_h: ch = tuple(ch) temp_out.append(ch) center_h = temp_out ## Need to convert the points into a list of tuples format to match the scaled points top = top.reshape(20, 2) top = top.tolist() temp_out = [] for t in top: t = tuple(t) temp_out.append(t) top = temp_out bottom = bottom.reshape(20, 2) bottom = bottom.tolist() temp_out = [] for b in bottom: b = tuple(b) temp_out.append(b) bottom = temp_out center_v = center_v.reshape(20, 2) center_v = center_v.tolist() temp_out = [] for cvr in center_v: cvr = tuple(cvr) temp_out.append(cvr) center_v = temp_out #Store Landmark Data landmark_header = ('HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b', 'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r', 'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r', 'bottom_lmk_r', 'center_v_lmk_r') landmark_data = ('LANDMARK_DATA', points, points_r, centroid_r, bline_r, tips, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, left, right, center_h, left_r, right_r, center_hr, top, bottom, center_v, top_r, bottom_r, center_vr) ############## VIS Analysis ################ outfile = False #if args.writeimg==True: #outfile=args.outdir+"/"+filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound( img, args.image, obj, mask, 935, device, args.debug, outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300, outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header))) result.write("\n") result.write('\t'.join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, color_header))) result.write("\n") result.write('\t'.join(map(str, color_data))) result.write("\n") result.write('\t'.join(map(str, boundary_header))) result.write("\n") result.write('\t'.join(map(str, boundary_data))) result.write("\n") result.write('\t'.join(map(str, boundary_img1))) result.write("\n") for row in color_img: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, landmark_header))) result.write("\n") result.write('\t'.join(map(str, landmark_data))) result.write("\n") result.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug) # Fill small objects #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug) # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small noise device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug) # Dilate to join small objects with larger ones device, ab_cnt1=pcv.dilate(ab_fill1, 3, 2, device, args.debug) device, ab_cnt2=pcv.dilate(ab_fill1, 3, 2, device, args.debug) # Fill dilated image mask device, ab_cnt3=pcv.fill(ab_cnt2,ab_cnt1,150,device,args.debug) device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug) device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug) # Threshold the green-magenta and blue images device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, args.debug) device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, args.debug) device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 525, 0,-490,-150) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile=False if args.writeimg==True: outfile=args.outdir+"/"+filename # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile) # Shape properties relative to user boundary line (optional) device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 325, device,args.debug,outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile) # Output shape and color data result=open(args.result,"a") result.write('\t'.join(map(str,shape_header))) result.write("\n") result.write('\t'.join(map(str,shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,color_header))) result.write("\n") result.write('\t'.join(map(str,color_data))) result.write("\n") result.write('\t'.join(map(str,boundary_header))) result.write("\n") result.write('\t'.join(map(str,boundary_data))) result.write("\n") result.write('\t'.join(map(str,boundary_img1))) result.write("\n") for row in color_img: result.write('\t'.join(map(str,row))) result.write("\n") result.close()
def main(): # Get options args = options() if args.debug: print("Analyzing your image dude...") # Read image device = 0 img = cv2.imread(args.image, flags=0) path, img_name = os.path.split(args.image) # Read in image which is average of average of backgrounds img_bkgrd = cv2.imread("bkgrd_ave_z3500.png", flags=0) # NIR images for burnin2 are up-side down. This may be fixed in later experiments img = ndimage.rotate(img, 180) img_bkgrd = ndimage.rotate(img_bkgrd, 180) # Subtract the image from the image background to make the plant more prominent device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug) if args.debug: pcv.plot_hist(bkg_sub_img, 'bkg_sub_img') device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, 'dark', device, args.debug) bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220) if args.debug: cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img) #device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug) # if a region of interest is specified read it in roi = cv2.imread(args.roi) # Start by examining the distribution of pixel intensity values if args.debug: pcv.plot_hist(img, 'hist_img') # Will intensity transformation enhance your ability to isolate object of interest by thesholding? device, he_img = pcv.HistEqualization(img, device, args.debug) if args.debug: pcv.plot_hist(he_img, 'hist_img_he') # Laplace filtering (identify edges based on 2nd derivative) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(lp_img, 'hist_lp') # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) if args.debug: pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp') # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled) device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sbx_img, 'hist_sbx') # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sby_img, 'hist_sby') # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphesize edges found in both images device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) if args.debug: pcv.plot_hist(sb_img, 'hist_sb_comb_img') # Use a lowpass (blurring) filter to smooth sobel image device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) if args.debug: pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img') # Perform thresholding to generate a binary image device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug) # Prepare a few small kernels for morphological filtering kern = np.zeros((3,3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1,1:3]=1 kern2 = np.copy(kern) kern2[1,0:2]=1 kern3 = np.copy(kern) kern3[0:2,1]=1 kern4 = np.copy(kern) kern4[1:3,1]=1 # Prepare a larger kernel for dilation kern[1,0:3]=1 kern[0:3,1]=1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug) device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug) device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug) device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) # Perform dilation # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug) device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug) # Get masked image # The dilated image may contain some pixels which are not plant device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug) # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) # mask for the bottom of the image device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (100,210), (230,252), device, args.debug) # mask for the left side of the image device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1,1), (85,252), device, args.debug) # mask for the right side of the image device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240,1), (318,252), device, args.debug) # mask the edges device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # Make a ROI around the plant, include connected objects # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug) device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (100,75), (220,208), device, args.debug) plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug) # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) mask3d = np.copy(mask) plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) ### Analysis ### device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name) device, shape_header, shape_data, ori_img = pcv.analyze_object(rgb, args.image, o, m, device, args.debug, args.outdir + '/' + img_name) pcv.print_results(args.image, hist_header, hist_data) pcv.print_results(args.image, shape_header, shape_data)
def main(): # Get options args = options() if args.debug: print("Analyzing your image dude...") # Read image device = 0 img = cv2.imread(args.image, flags=0) path, img_name = os.path.split(args.image) # Read in image which is average of average of backgrounds img_bkgrd = cv2.imread("bkgrd_ave_z2500.png", flags=0) # NIR images for burnin2 are up-side down. This may be fixed in later experiments img = ndimage.rotate(img, 180) img_bkgrd = ndimage.rotate(img_bkgrd, 180) # Subtract the image from the image background to make the plant more prominent device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug) if args.debug: pcv.plot_hist(bkg_sub_img, "bkg_sub_img") device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, "dark", device, args.debug) bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220) if args.debug: cv2.imwrite("bkgrd_sub_thres.png", bkg_sub_thres_img) # device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug) # if a region of interest is specified read it in roi = cv2.imread(args.roi) # Start by examining the distribution of pixel intensity values if args.debug: pcv.plot_hist(img, "hist_img") # Will intensity transformation enhance your ability to isolate object of interest by thesholding? device, he_img = pcv.HistEqualization(img, device, args.debug) if args.debug: pcv.plot_hist(he_img, "hist_img_he") # Laplace filtering (identify edges based on 2nd derivative) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(lp_img, "hist_lp") # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) if args.debug: pcv.plot_hist(lp_shrp_img, "hist_lp_shrp") # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled) device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sbx_img, "hist_sbx") # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sby_img, "hist_sby") # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphesize edges found in both images device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) if args.debug: pcv.plot_hist(sb_img, "hist_sb_comb_img") # Use a lowpass (blurring) filter to smooth sobel image device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) if args.debug: pcv.plot_hist(edge_shrp_img, "hist_edge_shrp_img") # Perform thresholding to generate a binary image device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, "dark", device, args.debug) # Prepare a few small kernels for morphological filtering kern = np.zeros((3, 3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1, 1:3] = 1 kern2 = np.copy(kern) kern2[1, 0:2] = 1 kern3 = np.copy(kern) kern3[0:2, 1] = 1 kern4 = np.copy(kern) kern4[1:3, 1] = 1 # Prepare a larger kernel for dilation kern[1, 0:3] = 1 kern[0:3, 1] = 1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug) device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug) device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug) device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) # Perform dilation # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug) device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug) # Get masked image # The dilated image may contain some pixels which are not plant device, masked_erd = pcv.apply_mask(img, comb_img, "black", device, args.debug) # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) # mask for the bottom of the image device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (120, 184), (215, 252), device, args.debug) # mask for the left side of the image device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1, 1), (85, 252), device, args.debug) # mask for the right side of the image device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240, 1), (318, 252), device, args.debug) # mask the edges device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1, 1), (318, 252), device, args.debug) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # Make a ROI around the plant, include connected objects # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, "black", device, args.debug) device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (120, 75), (200, 184), device, args.debug) plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug ) # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, "black", device, args.debug) rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, "light", device, args.debug) mask3d = np.copy(mask) plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) ### Analysis ### device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity( img, args.image, mask, 256, device, args.debug, args.outdir + "/" + img_name ) device, shape_header, shape_data, ori_img = pcv.analyze_object( rgb, args.image, o, m, device, args.debug, args.outdir + "/" + img_name ) pcv.print_results(args.image, hist_header, hist_data) pcv.print_results(args.image, shape_header, shape_data)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, "b", device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, "white", device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug) device, soil_car = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, "dark", device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 150, 255, "light", device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 75, device, args.debug) # Median Filter # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi( img, "circle", device, None, "default", args.debug, True, 0, 0, -200, -200 ) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug ) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile ) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile ) # Output shape and color data result = open(args.result, "a") result.write("\t".join(map(str, shape_header))) result.write("\n") result.write("\t".join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write("\t".join(map(str, row))) result.write("\n") result.write("\t".join(map(str, color_header))) result.write("\n") result.write("\t".join(map(str, color_data))) result.write("\n") for row in color_img: result.write("\t".join(map(str, row))) result.write("\n") result.close() ############################# Use VIS image mask for NIR image######################### # Find matching NIR image device, nirpath = pcv.get_nir(path, filename, device, args.debug) nir, path1, filename1 = pcv.readimage(nirpath) nir2 = cv2.imread(nirpath, -1) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, args.debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 9, 12, "top", "left", args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug) ####################################### Analysis ############################################# outfile1 = False if args.writeimg == True: outfile1 = args.outdir + "/" + filename1 device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1 ) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1 ) coresult = open(args.coresult, "a") coresult.write("\t".join(map(str, nhist_header))) coresult.write("\n") coresult.write("\t".join(map(str, nhist_data))) coresult.write("\n") for row in nir_imgs: coresult.write("\t".join(map(str, row))) coresult.write("\n") coresult.write("\t".join(map(str, nshape_header))) coresult.write("\n") coresult.write("\t".join(map(str, nshape_data))) coresult.write("\n") coresult.write("\t".join(map(str, nir_shape))) coresult.write("\n") coresult.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) #roi = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 34, 255, 'light', device, args.debug) # Median Filter #device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug) #device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug) # Fill small objects #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_thresh, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug) # Select area with black bars and find overlapping plant material device, roi1, roi_hierarchy1 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 0, 0, -1700, 0) device, id_objects1, obj_hierarchy1 = pcv.find_objects( masked2, ab_fill, device, args.debug) device, roi_objects1, hierarchy1, kept_mask1, obj_area1 = pcv.roi_objects( masked2, 'cutto', roi1, roi_hierarchy1, id_objects1, obj_hierarchy1, device, args.debug) device, masked3 = pcv.apply_mask(masked2, kept_mask1, 'white', device, args.debug) device, masked_a1 = pcv.rgb2gray_lab(masked3, 'a', device, args.debug) device, masked_b1 = pcv.rgb2gray_lab(masked3, 'b', device, args.debug) device, maskeda_thresh1 = pcv.binary_threshold(masked_a1, 110, 255, 'dark', device, args.debug) device, maskedb_thresh1 = pcv.binary_threshold(masked_b1, 170, 255, 'light', device, args.debug) device, ab1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug) device, ab_cnt1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug) device, ab_fill1 = pcv.fill(ab1, ab_cnt1, 300, device, args.debug) device, roi2, roi_hierarchy2 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 1700, 0, 0, 0) device, id_objects2, obj_hierarchy2 = pcv.find_objects( masked2, ab_fill, device, args.debug) device, roi_objects2, hierarchy2, kept_mask2, obj_area2 = pcv.roi_objects( masked2, 'cutto', roi2, roi_hierarchy2, id_objects2, obj_hierarchy2, device, args.debug) device, masked4 = pcv.apply_mask(masked2, kept_mask2, 'white', device, args.debug) device, masked_a2 = pcv.rgb2gray_lab(masked4, 'a', device, args.debug) device, masked_b2 = pcv.rgb2gray_lab(masked4, 'b', device, args.debug) device, maskeda_thresh2 = pcv.binary_threshold(masked_a2, 110, 255, 'dark', device, args.debug) device, maskedb_thresh2 = pcv.binary_threshold(masked_b2, 170, 255, 'light', device, args.debug) device, ab2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug) device, ab_cnt2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug) device, ab_fill2 = pcv.fill(ab2, ab_cnt2, 200, device, args.debug) device, ab_cnt3 = pcv.logical_or(ab_fill1, ab_fill2, device, args.debug) device, masked3 = pcv.apply_mask(masked2, ab_cnt3, 'white', device, args.debug) # Identify objects device, id_objects3, obj_hierarchy3 = pcv.find_objects( masked2, ab_fill, device, args.debug) # Define ROI device, roi3, roi_hierarchy3 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 650, 0, -450, -300) # Decide which objects to keep and combine with objects overlapping with black bars device, roi_objects3, hierarchy3, kept_mask3, obj_area1 = pcv.roi_objects( img, 'cutto', roi3, roi_hierarchy3, id_objects3, obj_hierarchy3, device, args.debug) device, kept_mask4_1 = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug) device, kept_cnt = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug) device, kept_mask4 = pcv.fill(kept_mask4_1, kept_cnt, 200, device, args.debug) device, masked5 = pcv.apply_mask(masked2, kept_mask4, 'white', device, args.debug) device, id_objects4, obj_hierarchy4 = pcv.find_objects( masked5, kept_mask4, device, args.debug) device, roi4, roi_hierarchy4 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, False, 0, 0, 0, 0) device, roi_objects4, hierarchy4, kept_mask4, obj_area = pcv.roi_objects( img, 'partial', roi4, roi_hierarchy4, id_objects4, obj_hierarchy4, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects4, hierarchy4, device, args.debug) ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, args.outdir + '/' + filename) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound( img, args.image, obj, mask, 375, device, args.debug, args.outdir + '/' + filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, norm_slice = pcv.analyze_color( img, args.image, kept_mask4, 256, device, args.debug, 'all', 'rgb', 'v', 'img', 300, args.outdir + '/' + filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data) pcv.print_results(args.image, boundary_header, boundary_data)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # roi = cv2.imread(args.roi) # Pipeline step device = 0 ## Convert RGB to HSV and extract the Saturation channel # device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # ## Threshold the Saturation image # device, s_thresh = pcv.binary_threshold(s, 90, 255, 'dark', device, args.debug) # ## Median Filter # device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) # device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # ## Fill small objects ##device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # ## Convert RGB to LAB and extract the Blue channel # device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # ## Threshold the blue image # device, b_thresh = pcv.binary_threshold(b, 135, 255, 'light', device, args.debug) # device, b_cnt = pcv.binary_threshold(b, 135, 255, 'light', device, args.debug) # ##Fill small objects # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # ## Join the thresholded saturation and blue-yellow images # device, bs = pcv.logical_or(s_mblur, b_cnt, device, args.debug) # ## Apply Mask (for vis images, mask_color=white) # device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(img, "a", device, args.debug) device, masked_b = pcv.rgb2gray_lab(img, "b", device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 135, 255, "dark", device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 140, 255, "light", device, args.debug) # # # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 1000, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(img, ab_fill, "white", device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi( masked2, "rectangle", device, None, "default", args.debug, True, 550, 0, -500, -300 ) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug ) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############### Analysis ################ # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, args.outdir + "/" + filename ) # Shape properties relative to user boundary line (optional) # device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 1680, device,args.debug,args.outdir+'/'+filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, norm_slice = pcv.analyze_color( img, args.image, kept_mask, 256, device, args.debug, "all", "rgb", "v", "img", 300, args.outdir + "/" + filename ) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data)
def process_tv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, brass_mask, traits, debug=None): device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( vis_img, vis_id, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color( vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) ############################# Use VIS image mask for NIR image######################### # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects( nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) # Add data to traits table traits['tv_area'] = vis_traits['area'] return [vis_traits, nir_traits]
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug) device, soil_car = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, 'dark', device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 155, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_fill = pcv.fill(soil_ab, soil_ab_cnt, 200, device, args.debug) # Median Filter device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'circle', device, None, 'default', args.debug, True, 0, 0, -50, -50) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) # ############# Analysis ################ # output mask device, maskpath, mask_images = pcv.output_mask(device, img, mask, filename, args.outdir, True, args.debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300) result = open(args.result, "a") result.write('\t'.join(map(str, shape_header))) result.write("\n") result.write('\t'.join(map(str, shape_data))) result.write("\n") for row in mask_images: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, color_header))) result.write("\n") result.write('\t'.join(map(str, color_data))) result.write("\n") result.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 debug = args.debug # print('Original image') # pcv.plot_image(img) # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # print('Convert RGB to HSV and extract the Saturation channel') # plt.imshow(s) # plt.show() # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 100, 255, 'light', device, debug) # print('Threshold the Saturation image') # plt.imshow(s_thresh) # plt.show() # # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # print('Median Filter') # plt.imshow(s_mblur) # plt.show() # # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # print('Convert RGB to LAB and extract the Blue channel') # plt.imshow(b) # plt.show() # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 160, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 160, 255, 'light', device, debug) # print('Threshold the blue image') # plt.imshow(b_cnt) # plt.show() # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug) # # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug) # print('Join the thresholded saturation and blue-yellow images') # plt.imshow(bs) # plt.show() # # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # print('Apply Mask 1 (for vis images, mask_color=white)') # plt.imshow(masked) # plt.show() # # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', device, debug) device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug) # print('Apply Mask 2 (for vis images, mask_color=white)') # plt.imshow(masked2) # plt.show() # #Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, ab_fill, device, debug) # # Define ROI # device, roi1, roi_hierarchy= pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 67, 377, -125, -368) device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 1, 1, -1, -1) # # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) ############### Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, debug, args.outdir + '/' + filename) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound( img, args.image, obj, mask, 1680, device, debug, args.outdir + '/' + filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, kept_mask, 256, device, debug, 'all', 'v', 'img', 300, args.outdir + '/' + filename) # Write shape and color data to results file result = open(args.result, "a") result.write('\t'.join(map(str, shape_header))) result.write("\n") result.write('\t'.join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, color_header))) result.write("\n") result.write('\t'.join(map(str, color_data))) result.write("\n") for row in color_img: result.write('\t'.join(map(str, row))) result.write("\n") result.close()
def main(): # Initialize device device = 0 # Parse command-line options args = options() # Read image img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) # Convert RGB to LAB and extract the Blue-Yellow channel device, blue_channel = pcv.rgb2gray_lab(img=img, channel="b", device=device, debug=args.debug) # Threshold the blue image using the triangle autothreshold method device, blue_tri = pcv.triangle_auto_threshold(device=device, img=blue_channel, maxvalue=255, object_type="light", xstep=1, debug=args.debug) # Extract core plant region from the image to preserve delicate plant features during filtering device += 1 plant_region = blue_tri[0:1750, 600:2080] if args.debug is not None: pcv.print_image(filename=str(device) + "_extract_plant_region.png", img=plant_region) # Use a Gaussian blur to disrupt the strong edge features in the cabinet device, blur_gaussian = pcv.gaussian_blur(device=device, img=blue_tri, ksize=(3, 3), sigmax=0, sigmay=None, debug=args.debug) # Threshold the blurred image to remove features that were blurred device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian, threshold=250, maxValue=255, object_type="light", device=device, debug=args.debug) # Add the plant region back in to the filtered image device += 1 blur_thresholded[0:1750, 600:2080] = plant_region if args.debug is not None: pcv.print_image(filename=str(device) + "_replace_plant_region.png", img=blur_thresholded) # Fill small noise device, blue_fill_50 = pcv.fill(img=np.copy(blur_thresholded), mask=np.copy(blur_thresholded), size=50, device=device, debug=args.debug) # Apply a small median blur to break up pot edges device, med_blur = pcv.median_blur(img=np.copy(blue_fill_50), ksize=3, device=device, debug=args.debug) # Define an ROI for the barcode label device, label_roi, label_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=1100, y_adj=1350, w_adj=-1070, h_adj=-590) # Identify all remaining contours in the binary image device, contours, hierarchy = pcv.find_objects(img=img, mask=np.copy(med_blur), device=device, debug=args.debug) # Remove contours completely contained within the label region of interest device, remove_label_mask = remove_countors_roi(mask=med_blur, contours=contours, hierarchy=hierarchy, roi=label_roi, device=device, debug=args.debug) # Identify objects device, contours, contour_hierarchy = pcv.find_objects( img=img, mask=remove_label_mask, device=device, debug=args.debug) # Define ROI device, roi, roi_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=565, y_adj=0, w_adj=-490, h_adj=-600) # Decide which objects to keep device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects( img=img, roi_type="partial", roi_contour=roi, roi_hierarchy=roi_hierarchy, object_contour=contours, obj_hierarchy=contour_hierarchy, device=device, debug=args.debug) # If there are no contours left we cannot measure anything if len(roi_contours) > 0: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_contours, hierarchy=roi_contour_hierarchy, device=device, debug=args.debug) outfile = False if args.writeimg: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile) # Shape properties relative to user boundary line (optional) device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, line_position=690, device=device, debug=args.debug, filename=outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") result.write('\t'.join(map(str, boundary_header)) + "\n") result.write('\t'.join(map(str, boundary_data)) + "\n") result.write('\t'.join(map(str, boundary_img)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close()
def main(): # Initialize device device = 0 # Parse command-line options args = options() # Read image img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) # Convert RGB to LAB and extract the Green-Magenta channel device, green_channel = pcv.rgb2gray_lab(img=img, channel="a", device=device, debug=args.debug) # Invert the Green-Magenta image because the plant is dark green device, green_inv = pcv.invert(img=green_channel, device=device, debug=args.debug) # Threshold the inverted Green-Magenta image to mostly isolate green pixels device, green_thresh = pcv.binary_threshold(img=green_inv, threshold=134, maxValue=255, object_type="light", device=device, debug=args.debug) # Extract core plant region from the image to preserve delicate plant features during filtering device += 1 plant_region = green_thresh[100:2000, 250:2250] if args.debug is not None: pcv.print_image(filename=str(device) + "_extract_plant_region.png", img=plant_region) # Use a Gaussian blur to disrupt the strong edge features in the cabinet device, blur_gaussian = pcv.gaussian_blur(device=device, img=green_thresh, ksize=(7, 7), sigmax=0, sigmay=None, debug=args.debug) # Threshold the blurred image to remove features that were blurred device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian, threshold=250, maxValue=255, object_type="light", device=device, debug=args.debug) # Add the plant region back in to the filtered image device += 1 blur_thresholded[100:2000, 250:2250] = plant_region if args.debug is not None: pcv.print_image(filename=str(device) + "_replace_plant_region.png", img=blur_thresholded) # Use a median blur to breakup the horizontal and vertical lines caused by shadows from the track edges device, med_blur = pcv.median_blur(img=blur_thresholded, ksize=5, device=device, debug=args.debug) # Fill in small contours device, green_fill_50 = pcv.fill(img=np.copy(med_blur), mask=np.copy(med_blur), size=50, device=device, debug=args.debug) # Define an ROI for the brass stopper device, stopper_roi, stopper_hierarchy = pcv.define_roi( img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=1420, y_adj=890, w_adj=-920, h_adj=-1040) # Identify all remaining contours in the binary image device, contours, hierarchy = pcv.find_objects(img=img, mask=np.copy(green_fill_50), device=device, debug=args.debug) # Remove contours completely contained within the stopper region of interest device, remove_stopper_mask = remove_countors_roi(mask=green_fill_50, contours=contours, hierarchy=hierarchy, roi=stopper_roi, device=device, debug=args.debug) # Define an ROI for a screw hole device, screw_roi, screw_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=1870, y_adj=1010, w_adj=-485, h_adj=-960) # Remove contours completely contained within the screw region of interest device, remove_screw_mask = remove_countors_roi(mask=remove_stopper_mask, contours=contours, hierarchy=hierarchy, roi=screw_roi, device=device, debug=args.debug) # Identify objects device, contours, contour_hierarchy = pcv.find_objects( img=img, mask=remove_screw_mask, device=device, debug=args.debug) # Define ROI device, roi, roi_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=565, y_adj=200, w_adj=-490, h_adj=-250) # Decide which objects to keep device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects( img=img, roi_type="partial", roi_contour=roi, roi_hierarchy=roi_hierarchy, object_contour=contours, obj_hierarchy=contour_hierarchy, device=device, debug=args.debug) # If there are no contours left we cannot measure anything if len(roi_contours) > 0: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_contours, hierarchy=roi_contour_hierarchy, device=device, debug=args.debug) outfile = False if args.writeimg: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close() # Find matching NIR image device, nirpath = pcv.get_nir(path=path, filename=filename, device=device, debug=args.debug) nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath) nir_img = cv2.imread(nirpath, 0) # Make mask glovelike in proportions via dilation device, d_mask = pcv.dilate(plant_mask, kernel=1, i=0, device=device, debug=args.debug) # Resize mask prop2, prop1 = conv_ratio() device, nmask = pcv.resize(img=d_mask, resize_x=prop1, resize_y=prop2, device=device, debug=args.debug) # Convert the resized mask to a binary mask device, bmask = pcv.binary_threshold(img=nmask, threshold=0, maxValue=255, object_type="light", device=device, debug=args.debug) device, crop_img = crop_sides_equally(mask=bmask, nir=nir_img, device=device, debug=args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(img=nir_img, mask=crop_img, device=device, x=0, y=1, v_pos="bottom", h_pos="right", debug=args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb, mask=newmask, device=device, debug=args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( img=nir_rgb, contours=nir_objects, hierarchy=nir_hierarchy, device=device, debug=args.debug) if args.writeimg: outfile = args.outdir + "/" + nir_filename # Analyze NIR signal data device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( img=nir_img, rgbimg=nir_rgb, mask=nir_combinedmask, bins=256, device=device, histplot=False, debug=args.debug, filename=outfile) # Analyze the shape of the plant contour from the NIR image device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( img=nir_img, imgname=nir_filename, obj=nir_combined, mask=nir_combinedmask, device=device, debug=args.debug, filename=outfile) # Write NIR data to co-results file coresult = open(args.coresult, "a") coresult.write('\t'.join(map(str, nhist_header)) + "\n") coresult.write('\t'.join(map(str, nhist_data)) + "\n") for row in nir_imgs: coresult.write('\t'.join(map(str, row)) + "\n") coresult.write('\t'.join(map(str, nshape_header)) + "\n") coresult.write('\t'.join(map(str, nshape_data)) + "\n") coresult.write('\t'.join(map(str, nir_shape)) + "\n") coresult.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, "b", device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, "white", device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, "light", device, args.debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, "dark", device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, "light", device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 250, device, args.debug) # Median Filter # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi( img, "rectangle", device, None, "default", args.debug, True, 600, 450, -600, -350 ) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug ) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile ) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile ) # Output shape and color data result = open(args.result, "a") result.write("\t".join(map(str, shape_header))) result.write("\n") result.write("\t".join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write("\t".join(map(str, row))) result.write("\n") result.write("\t".join(map(str, color_header))) result.write("\n") result.write("\t".join(map(str, color_data))) result.write("\n") for row in color_img: result.write("\t".join(map(str, row))) result.write("\n")
def main(): # Get options args = options() # Read image img = cv2.imread(args.image) roi = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,False, 0, 0,0,0) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,True) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','rgb','v') # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 debug = args.debug # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 85, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 160, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 160, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', device, debug) device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light', device, debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug) device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug) device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, ab_fill, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 550, 0, -500, -1900) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)