def main(): # Get options args = options() # Read image (converting fmax and track to 8 bit just to create a mask, use 16-bit for all the math) mask, path, filename = pcv.readimage(args.fmax) #mask = cv2.imread(args.fmax) track = cv2.imread(args.track) mask1, mask2, mask3= cv2.split(mask) # Pipeline step device = 0 # Mask pesky track autofluor device, track1= pcv.rgb2gray_hsv(track, 'v', device, args.debug) device, track_thresh = pcv.binary_threshold(track1, 0, 255, 'light', device, args.debug) device, track_inv=pcv.invert(track_thresh, device, args.debug) device, track_masked = pcv.apply_mask(mask1, track_inv, 'black', device, args.debug) # Threshold the Saturation image device, fmax_thresh = pcv.binary_threshold(track_masked, 20, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(fmax_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(fmax_thresh, 0, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 5, device, args.debug) device, sfill_cnt = pcv.fill(s_mblur, s_cnt, 5, device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(mask, sfill_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(mask,'circle', device, None, 'default', args.debug,True, 0,0,-100,-100) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(mask,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, masked = pcv.object_composition(mask, roi_objects, hierarchy3, device, args.debug) ################ Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(mask, args.fmax, obj, masked, device,args.debug, args.outdir+'/'+filename) # Fluorescence Measurement (read in 16-bit images) fdark=cv2.imread(args.fdark, -1) fmin=cv2.imread(args.fmin, -1) fmax=cv2.imread(args.fmax, -1) device, fvfm_header, fvfm_data=pcv.fluor_fvfm(fdark,fmin,fmax,kept_mask, device, args.outdir+'/'+filename, 1000, args.debug) # Output shape and color data pcv.print_results(args.fmax, shape_header, shape_data) pcv.print_results(args.fmax, fvfm_header, fvfm_data)
def test_plantcv_invert(): img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1) device, inverted_img = pcv.invert(img=img, device=0, debug=None) # Assert that the output image has the dimensions of the input image if all([i == j] for i, j in zip(np.shape(inverted_img), TEST_BINARY_DIM)): # Assert that the image is binary if all([i == j] for i, j in zip(np.unique(inverted_img), [0, 255])): assert 1 else: assert 0 else: assert 0
def main(): # Get options args = options() path_mask = '/home/mfeldman/tester/mask/mask_brass_tv_z1_L0.png' # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(path_mask) # Pipeline step device = 0 # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv=pcv.invert(brass_thresh, device, args.debug) device, masked_image = pcv.apply_mask(img, brass_inv, 'white', device, args.debug) # We can do a pretty good job of identifying the plant from the s channel device, s = pcv.rgb2gray_hsv(masked_image, 's', device, args.debug) s_thresh = cv2.inRange(s, 100, 190) # Lets blur the result a bit to get rid of unwanted noise s_blur = cv2.medianBlur(s_thresh,5) # The a channel is good too device, a = pcv.rgb2gray_lab(masked_image, 'a', device, args.debug) a_thresh = cv2.inRange(a, 90, 120) a_blur = cv2.medianBlur(a_thresh,5) # Now lets set of a series of filters to remove unwanted background plant_shape = cv2.bitwise_and(a_blur, s_blur) # Lets remove all the crap on the sides of the image plant_shape[:,:330] = 0 plant_shape[:,2100:] = 0 plant_shape[:200,:] = 0 # Now remove all remaining small points using erosion with a 3 x 3 kernel kernel = np.ones((3,3),np.uint8) erosion = cv2.erode(plant_shape ,kernel,iterations = 1) # Now dilate to fill in small holes kernel = np.ones((3,3),np.uint8) dilation = cv2.dilate(erosion ,kernel,iterations = 1) # Apply mask to the background image device, masked = pcv.apply_mask(masked_image, plant_shape, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked, dilation, device, args.debug) # Get ROI contours device, roi, roi_hierarchy = pcv.define_roi(masked_image, 'circle', device, None, 'default', args.debug, True, x_adj=0, y_adj=0, w_adj=0, h_adj=-1200) # ROI device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(masked_image,'partial',roi, roi_hierarchy, id_objects,obj_hierarchy,device, args.debug) # Get object contour and masked object device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Landmarks ################ device, points = pcv.acute_vertex(obj, 40, 40, 40, img, device, args.debug) boundary_line = 'NA' # Use acute fxn to estimate tips device, points_r, centroid_r, bline_r = pcv.scale_features(obj, mask, points, boundary_line, device, args.debug) # Get number of points tips = len(points_r) # Use turgor_proxy fxn to get distances device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy(points_r, centroid_r, bline_r, device, args.debug) # Get pseudomarkers along the y-axis device, left, right, center_h = pcv.y_axis_pseudolandmarks(obj, mask, img, device, args.debug) # Re-scale the points device, left_r, left_cr, left_br = pcv.scale_features(obj, mask, left, boundary_line, device, args.debug) device, right_r, right_cr, right_br = pcv.scale_features(obj, mask, right, boundary_line, device, args.debug) device, center_hr, center_hcr, center_hbr = pcv.scale_features(obj, mask, center_h, boundary_line, device, args.debug) # Get pseudomarkers along the x-axis device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj, mask, img, device, args.debug) # Re-scale the points device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top, boundary_line, device, args.debug) device, bottom_r, bottom_cr, bottom_br = pcv.scale_features(obj, mask, bottom, boundary_line, device, args.debug) device, center_vr, center_vcr, center_vbr = pcv.scale_features(obj, mask, center_v, boundary_line, device, args.debug) ## Need to convert the points into a list of tuples format to match the scaled points points = points.reshape(len(points),2) points = points.tolist() temp_out = [] for p in points: p = tuple(p) temp_out.append(p) points = temp_out left = left.reshape(20,2) left = left.tolist() temp_out = [] for l in left: l = tuple(l) temp_out.append(l) left = temp_out right = right.reshape(20,2) right = right.tolist() temp_out = [] for r in right: r = tuple(r) temp_out.append(r) right = temp_out center_h = center_h.reshape(20,2) center_h = center_h.tolist() temp_out = [] for ch in center_h: ch = tuple(ch) temp_out.append(ch) center_h = temp_out ## Need to convert the points into a list of tuples format to match the scaled points top = top.reshape(20,2) top = top.tolist() temp_out = [] for t in top: t = tuple(t) temp_out.append(t) top = temp_out bottom = bottom.reshape(20,2) bottom = bottom.tolist() temp_out = [] for b in bottom: b = tuple(b) temp_out.append(b) bottom = temp_out center_v = center_v.reshape(20,2) center_v = center_v.tolist() temp_out = [] for cvr in center_v: cvr = tuple(cvr) temp_out.append(cvr) center_v = temp_out #Store Landmark Data landmark_header=( 'HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b', 'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r', 'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r', 'bottom_lmk_r', 'center_v_lmk_r' ) landmark_data = ( 'LANDMARK_DATA', points, points_r, centroid_r, bline_r, tips, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, left, right, center_h, left_r, right_r, center_hr, top, bottom, center_v, top_r, bottom_r, center_vr ) ############## VIS Analysis ################ outfile=False #if args.writeimg==True: # outfile=args.outdir+"/"+filename # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile) # Shape properties relative to user boundary line (optional) device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 330, device,args.debug,outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile) # Output shape and color data result=open(args.result,"a") result.write('\t'.join(map(str,shape_header))) result.write("\n") result.write('\t'.join(map(str,shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,color_header))) result.write("\n") result.write('\t'.join(map(str,color_data))) result.write("\n") result.write('\t'.join(map(str,boundary_header))) result.write("\n") result.write('\t'.join(map(str,boundary_data))) result.write("\n") result.write('\t'.join(map(str,boundary_img1))) result.write("\n") for row in color_img: result.write('\t'.join(map(str,row))) result.write("\n") result.write('\t'.join(map(str,landmark_header))) result.write("\n") result.write('\t'.join(map(str,landmark_data))) result.write("\n") result.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, "b", device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, "white", device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug) device, soil_car = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, "dark", device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 150, 255, "light", device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 75, device, args.debug) # Median Filter # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi( img, "circle", device, None, "default", args.debug, True, 0, 0, -200, -200 ) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug ) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile ) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile ) # Output shape and color data result = open(args.result, "a") result.write("\t".join(map(str, shape_header))) result.write("\n") result.write("\t".join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write("\t".join(map(str, row))) result.write("\n") result.write("\t".join(map(str, color_header))) result.write("\n") result.write("\t".join(map(str, color_data))) result.write("\n") for row in color_img: result.write("\t".join(map(str, row))) result.write("\n") result.close() ############################# Use VIS image mask for NIR image######################### # Find matching NIR image device, nirpath = pcv.get_nir(path, filename, device, args.debug) nir, path1, filename1 = pcv.readimage(nirpath) nir2 = cv2.imread(nirpath, -1) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, args.debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 9, 12, "top", "left", args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug) ####################################### Analysis ############################################# outfile1 = False if args.writeimg == True: outfile1 = args.outdir + "/" + filename1 device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1 ) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1 ) coresult = open(args.coresult, "a") coresult.write("\t".join(map(str, nhist_header))) coresult.write("\n") coresult.write("\t".join(map(str, nhist_data))) coresult.write("\n") for row in nir_imgs: coresult.write("\t".join(map(str, row))) coresult.write("\n") coresult.write("\t".join(map(str, nshape_header))) coresult.write("\n") coresult.write("\t".join(map(str, nshape_data))) coresult.write("\n") coresult.write("\t".join(map(str, nir_shape))) coresult.write("\n") coresult.close()
def main(): # Get options args = options() if args.debug: print("Analyzing your image dude...") # Read image device = 0 img = cv2.imread(args.image, flags=0) path, img_name = os.path.split(args.image) # Read in image which is average of average of backgrounds img_bkgrd = cv2.imread("bkgrd_ave_z3500.png", flags=0) # NIR images for burnin2 are up-side down. This may be fixed in later experiments img = ndimage.rotate(img, 180) img_bkgrd = ndimage.rotate(img_bkgrd, 180) # Subtract the image from the image background to make the plant more prominent device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug) if args.debug: pcv.plot_hist(bkg_sub_img, 'bkg_sub_img') device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, 'dark', device, args.debug) bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220) if args.debug: cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img) #device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug) # if a region of interest is specified read it in roi = cv2.imread(args.roi) # Start by examining the distribution of pixel intensity values if args.debug: pcv.plot_hist(img, 'hist_img') # Will intensity transformation enhance your ability to isolate object of interest by thesholding? device, he_img = pcv.HistEqualization(img, device, args.debug) if args.debug: pcv.plot_hist(he_img, 'hist_img_he') # Laplace filtering (identify edges based on 2nd derivative) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(lp_img, 'hist_lp') # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) if args.debug: pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp') # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled) device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sbx_img, 'hist_sbx') # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sby_img, 'hist_sby') # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphesize edges found in both images device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) if args.debug: pcv.plot_hist(sb_img, 'hist_sb_comb_img') # Use a lowpass (blurring) filter to smooth sobel image device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) if args.debug: pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img') # Perform thresholding to generate a binary image device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug) # Prepare a few small kernels for morphological filtering kern = np.zeros((3,3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1,1:3]=1 kern2 = np.copy(kern) kern2[1,0:2]=1 kern3 = np.copy(kern) kern3[0:2,1]=1 kern4 = np.copy(kern) kern4[1:3,1]=1 # Prepare a larger kernel for dilation kern[1,0:3]=1 kern[0:3,1]=1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug) device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug) device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug) device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) # Perform dilation # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug) device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug) # Get masked image # The dilated image may contain some pixels which are not plant device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug) # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) # mask for the bottom of the image device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (100,210), (230,252), device, args.debug) # mask for the left side of the image device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1,1), (85,252), device, args.debug) # mask for the right side of the image device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240,1), (318,252), device, args.debug) # mask the edges device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # Make a ROI around the plant, include connected objects # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug) device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (100,75), (220,208), device, args.debug) plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug) # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) mask3d = np.copy(mask) plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) ### Analysis ### device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name) device, shape_header, shape_data, ori_img = pcv.analyze_object(rgb, args.image, o, m, device, args.debug, args.outdir + '/' + img_name) pcv.print_results(args.image, hist_header, hist_data) pcv.print_results(args.image, shape_header, shape_data)
def main(): # Get options args = options() if args.debug: print("Analyzing your image dude...") # Read image img = cv2.imread(args.image, flags=0) # if a region of interest is specified read it in roi = cv2.imread(args.roi) # Pipeline step device = 0 # Start by examining the distribution of pixel intensity values if args.debug: pcv.plot_hist(img, 'hist_img') # Will intensity transformation enhance your ability to isolate object of interest by thesholding? device, he_img = pcv.HistEqualization(img, device, args.debug) if args.debug: pcv.plot_hist(he_img, 'hist_img_he') # Laplace filtering (identify edges based on 2nd derivative) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(lp_img, 'hist_lp') # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) if args.debug: pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp') # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled) device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sbx_img, 'hist_sbx') # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sby_img, 'hist_sby') # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphesize edges found in both images device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) if args.debug: pcv.plot_hist(sb_img, 'hist_sb_comb_img') # Use a lowpass (blurring) filter to smooth sobel image device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) if args.debug: pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img') # Perform thresholding to generate a binary image device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug) # Prepare a few small kernels for morphological filtering kern = np.zeros((3,3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1,1:3]=1 kern2 = np.copy(kern) kern2[1,0:2]=1 kern3 = np.copy(kern) kern3[0:2,1]=1 kern4 = np.copy(kern) kern4[1:3,1]=1 # Prepare a larger kernel for dilation kern[1,0:3]=1 kern[0:3,1]=1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug) device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug) device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug) device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) # Perform dilation device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug) # Get masked image # The dilated image may contain some pixels which are not plant device, masked_erd = pcv.apply_mask(img, c1234_img, 'black', device, args.debug) device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (1,1), (64,252), device, args.debug) device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (256,1), (318,252), device, args.debug) device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (1,184), (318,252), device, args.debug) device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # Apply the box mask to the image device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, 'example')
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, "b", device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, "white", device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, "light", device, args.debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, "dark", device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, "light", device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 250, device, args.debug) # Median Filter # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi( img, "rectangle", device, None, "default", args.debug, True, 600, 450, -600, -350 ) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug ) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile ) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile ) # Output shape and color data result = open(args.result, "a") result.write("\t".join(map(str, shape_header))) result.write("\n") result.write("\t".join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write("\t".join(map(str, row))) result.write("\n") result.write("\t".join(map(str, color_header))) result.write("\n") result.write("\t".join(map(str, color_data))) result.write("\n") for row in color_img: result.write("\t".join(map(str, row))) result.write("\n")
def process_tv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, brass_mask, traits, debug=None): device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) ############################# Use VIS image mask for NIR image######################### # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) # Add data to traits table traits['tv_area'] = vis_traits['area'] return [vis_traits, nir_traits]
def process_tv_images(session, url, vis_id, nir_id, traits, debug=False): """Process top-view images. Inputs: session = requests session object url = Clowder URL vis_id = The Clowder ID of an RGB image nir_img = The Clowder ID of an NIR grayscale image traits = traits table (dictionary) debug = None, print, or plot. Print = save to file, Plot = print to screen. :param session: requests session object :param url: str :param vis_id: str :param nir_id: str :param traits: dict :param debug: str :return traits: dict """ # Read VIS image from Clowder vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True) img_array = np.asarray(bytearray(vis_r.content), dtype="uint8") img = cv2.imdecode(img_array, -1) # Read the VIS top-view image mask for zoom = 1 from Clowder mask_r = session.get(posixpath.join(url, "api/files/57451b28e4b0efbe2dc3d4d5"), stream=True) mask_array = np.asarray(bytearray(mask_r.content), dtype="uint8") brass_mask = cv2.imdecode(mask_array, -1) device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) #print(vis_traits) add_plantcv_metadata(session, url, vis_id, vis_traits) ############################# Use VIS image mask for NIR image######################### # Read NIR image from Clowder nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True) nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8") nir = cv2.imdecode(nir_array, -1) nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) #print(nir_traits) add_plantcv_metadata(session, url, nir_id, nir_traits) # Add data to traits table traits['tv_area'] = vis_traits['area'] return traits
def process_tv_images(vis_img, nir_img, debug=False): """Process top-view images. Inputs: vis_img = An RGB image. nir_img = An NIR grayscale image. debug = None, print, or plot. Print = save to file, Plot = print to screen. :param vis_img: str :param nir_img: str :param debug: str :return: """ # Read image img, path, filename = pcv.readimage(vis_img) brass_mask = cv2.imread('mask_brass_tv_z1_L1.png') device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug, None, 'v', 'img', 300) print('\t'.join(map(str, shape_header)) + '\n') print('\t'.join(map(str, shape_data)) + '\n') for row in shape_img: print('\t'.join(map(str, row)) + '\n') print('\t'.join(map(str, color_header)) + '\n') print('\t'.join(map(str, color_data)) + '\n') for row in color_img: print('\t'.join(map(str, row)) + '\n') ############################# Use VIS image mask for NIR image######################### # Read NIR image nir, path1, filename1 = pcv.readimage(nir_img) nir2 = cv2.imread(nir_img, -1) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask, device, debug) print('\t'.join(map(str,nhist_header)) + '\n') print('\t'.join(map(str,nhist_data)) + '\n') for row in nir_imgs: print('\t'.join(map(str,row)) + '\n') print('\t'.join(map(str,nshape_header)) + '\n') print('\t'.join(map(str,nshape_data)) + '\n') print('\t'.join(map(str,nir_shape)) + '\n')
def process_tv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, brass_mask, traits, debug=None): device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug) device, brass_inv = pcv.invert(brass_thresh, device, debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, soil_cnt, device, debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) # Object combine kept objects device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( vis_img, vis_id, obj, mask, device, debug) # Determine color properties device, color_header, color_data, color_img = pcv.analyze_color( vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300) # Output shape and color data vis_traits = {} for i in range(1, len(shape_header)): vis_traits[shape_header[i]] = shape_data[i] for i in range(2, len(color_header)): vis_traits[color_header[i]] = serialize_color_data(color_data[i]) ############################# Use VIS image mask for NIR image######################### # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects( nir_rgb, newmask, device, debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( nir_rgb, nir_objects, nir_hierarchy, device, debug) ####################################### Analysis ############################################# device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug) nir_traits = {} for i in range(1, len(nshape_header)): nir_traits[nshape_header[i]] = nshape_data[i] for i in range(2, len(nhist_header)): nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i]) # Add data to traits table traits['tv_area'] = vis_traits['area'] return [vis_traits, nir_traits]
def main(): # obtiene opciones de imagen args = options() #LINEA 22 if args.debug: print("Debug mode turned on...") # lee la imagen el flags=0 indica que se espera una imagen a escala de grises img = cv2.imread(args.image, flags=0) # cv2.imshow("imagen original",img) # Get directory path and image name from command line arguments path, img_name = os.path.split(args.image) #LINEA 30 # Read in image which is the pixelwise average of background images img_bkgrd = cv2.imread("background_average.jpg", flags=0) #cv2.imshow("ventana del fondo",img_bkgrd) # paso del procesamiento de imagenes device = 0 ######hasta qui bien #linea 37 # Restar la imagen de fondo de la imagen con la planta. device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug) #cv2.imshow("imagen resta",bkg_sub_img) # Threshold the image of interest using the two-sided cv2.inRange function (keep what is between 50-190) bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 50, 190) if args.debug: cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img) #hasta qui todo bien #linea 46 # Filtrado de Laplace (identificar bordes basados en la derivada 2) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) #cv2.imshow("imagen de filtrado",lp_img) if args.debug: pcv.plot_hist(lp_img, 'histograma_lp') # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) #cv2.imshow("imagen de borde lapacian",lp_shrp_img) if args.debug: pcv.plot_hist(lp_shrp_img, 'histograma_lp_shrp') #hasta aqui todo bien linea 58 # Sobel filtering-filtrado de sobel # 1ª derivada filtrado sobel a lo largo del eje horizontal, núcleo = 1, sin escala) """ segun esta masl son siete,kito scale y me kedo con apertura k,chekar sobel en docs device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) """ device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, device, args.debug) #cv2.imshow("imagen sobel-eje horizontal",sbx_img) if args.debug: pcv.plot_hist(sbx_img, 'histograma_sbx') # Filtrado de la primera derivada sobel a lo largo del eje vertical, núcleo = 1, sin escala) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, device, args.debug) #cv2.imshow("imagen sobel-ejevertical",sby_img) if args.debug: pcv.plot_hist(sby_img, 'histograma_sby') # Combina los efectos de ambos filtros x e y mediante la suma de matrizes # Esto captura los bordes identificados dentro de cada plano y enfatiza los bordes encontrados en ambas imágenes device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) #cv2.imshow("imagen suma de sobel",sb_img) if args.debug: pcv.plot_hist(sb_img, 'histograma_sb_comb_img') #hasta aqui todo bien linea 82 # usar filtro pasa bajo blur para suavizar la imagen de sobel device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) #cv2.imshow("imagen blur",mblur_img) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) #cv2.imshow("imagen blur-invertido",mblur_invert_img) # Combinar la imagen suavizada del sobel con la imagen afilada del laplaciano # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 #Combina las mejores características de ambos métodos como se describe en "Digital Image Processing" por González y Woods pág. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) #cv2.imshow("imagen-combinacion-sobel-laplacian",mblur_img) if args.debug: pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img') # Realizar el umbral para generar una imagen binaria device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 125, 255, 'dark', device, args.debug) #cv2.imshow("imagen binaria de combinacion",tr_es_img) #hasta aqui todo bien linea 99 # Prepare a few small kernels for morphological filtering #prepara nucleos pequeños para un filtrado moorfologico kern = np.zeros((3, 3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1, 1:3] = 1 kern2 = np.copy(kern) kern2[1, 0:2] = 1 kern3 = np.copy(kern) kern3[0:2, 1] = 1 kern4 = np.copy(kern) kern4[1:3, 1] = 1 # prepara un nucleo grande para la dilatacion kern[1, 0:3] = 1 kern[0:3, 1] = 1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 1",e1_img) device, e2_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 2",e2_img) device, e3_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 3",e3_img) device, e4_img = pcv.erode(tr_es_img, 1, 1, device, args.debug) #cv2.imshow("erosion 4",e4_img) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) #cv2.imshow("c12",c12_img) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) #cv2.imshow("c123",c123_img) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) #cv2.imshow("c1234",c1234_img) # Bring the two object identification approaches together. # Using a logical OR combine object identified by background subtraction and the object identified by derivative filter. device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug) #cv2.imshow("comb_img",comb_img) # Get masked image, Essentially identify pixels corresponding to plant and keep those. device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug) #cv2.imshow("masked_erd",masked_erd) #cv2.imshow("imagen original chkar",img) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) # mask for the bottom of the image device, im2, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask( img, (120, 184), (215, 252), device, args.debug, color='white') #cv2.imshow("im2",box1_img) # mask for the left side of the image device, im3, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask( img, (1, 1), (85, 252), device, args.debug, color='white') #cv2.imshow("im3",box2_img) # mask for the right side of the image device, im4, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask( img, (240, 1), (318, 252), device, args.debug, color='white') #cv2.imshow("im4",box3_img) # mask the edges device, im5, box4_img, rect_contour4, hierarchy4 = pcv.rectangle_mask( img, (1, 1), (318, 252), device, args.debug) #cv2.imshow("im5",box4_img) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) #cv2.imshow("combinacion logica or",bx1234_img) # invert this mask and then apply it the masked image. device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # cv2.imshow("combinacion logica or invertida",inv_bx1234_img) device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug) # cv2.imshow("edge_masked_img",edge_masked_img) # assign the coordinates of an area of interest (rectangle around the area you expect the plant to be in) device, im6, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask( img, (120, 75), (200, 184), device, args.debug) #cv2.imshow("im6",roi_img) # get the coordinates of the plant from the masked object plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # Obtain the coordinates of the plant object which are partially within the area of interest device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug) # Apply the box mask to the image to ensure no background device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) #cv2.imshow("mascara final",masked_img) #///////////////////////////////////////////////////////////// #device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) #cv2.imshow("rgb",rgb) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) #cv2.imshow("mask",mask) mask3d = np.copy(mask) plant_objects_2, plant_hierarchy_2 = cv2.findContours( mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) # Get final masked image device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug) #cv2.imshow("maskara final2",masked_img) ################### copia lo de arriba esta mal el tutorial # Obtain a 3 dimensional representation of this grayscale image (for pseudocoloring) #rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) # Generate a binary to send to the analysis function #device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug) # Make a copy of this mask for pseudocoloring #mask3d = np.copy(mask) # Extract coordinates of plant for pseudocoloring of plant #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) # Extract coordinates of plant for pseudocoloring of plant #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) #################################### ####################### ### Analysis ### # Perform signal analysis #################pruebas de que esta masl el tutorial"""""""""""""""" #ols=type(args.image) #print ols ##############pruebas de que no agarro device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name) #print(args.outdir+'/'+img_name) #print(args.debug) #al final si salio se agrego lo qyue esta debug= and filename= ##################################################### debug me marca True por ello puse pritn de mas #device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, rgb, mask, 256, device, debug='print', filename=False) device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity( img, rgb, mask, 256, device, debug=args.debug, filename=args.outdir + '/' + img_name) # Perform shape analysis device, shape_header, shape_data, ori_img = pcv.analyze_object( rgb, args.image, o, m, device, debug=args.debug, filename=args.outdir + '/' + img_name) # Print the results to STDOUT pcv.print_results(args.image, hist_header, hist_data) pcv.print_results(args.image, shape_header, shape_data) cv2.waitKey() cv2.destroyAllWdindows()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug) device, soil_car = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, 'dark', device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 155, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_fill = pcv.fill(soil_ab, soil_ab_cnt, 200, device, args.debug) # Median Filter device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'circle', device, None, 'default', args.debug, True, 0, 0, -50, -50) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) # ############# Analysis ################ # output mask device, maskpath, mask_images = pcv.output_mask(device, img, mask, filename, args.outdir, True, args.debug) # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300) result = open(args.result, "a") result.write('\t'.join(map(str, shape_header))) result.write("\n") result.write('\t'.join(map(str, shape_data))) result.write("\n") for row in mask_images: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, color_header))) result.write("\n") result.write('\t'.join(map(str, color_data))) result.write("\n") result.close()
def main(): # Get options args = options() if args.debug: print("Analyzing your image dude...") # Read image device = 0 img = cv2.imread(args.image, flags=0) path, img_name = os.path.split(args.image) # Read in image which is average of average of backgrounds img_bkgrd = cv2.imread("bkgrd_ave_z2500.png", flags=0) # NIR images for burnin2 are up-side down. This may be fixed in later experiments img = ndimage.rotate(img, 180) img_bkgrd = ndimage.rotate(img_bkgrd, 180) # Subtract the image from the image background to make the plant more prominent device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug) if args.debug: pcv.plot_hist(bkg_sub_img, "bkg_sub_img") device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, "dark", device, args.debug) bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220) if args.debug: cv2.imwrite("bkgrd_sub_thres.png", bkg_sub_thres_img) # device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug) # if a region of interest is specified read it in roi = cv2.imread(args.roi) # Start by examining the distribution of pixel intensity values if args.debug: pcv.plot_hist(img, "hist_img") # Will intensity transformation enhance your ability to isolate object of interest by thesholding? device, he_img = pcv.HistEqualization(img, device, args.debug) if args.debug: pcv.plot_hist(he_img, "hist_img_he") # Laplace filtering (identify edges based on 2nd derivative) device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(lp_img, "hist_lp") # Lapacian image sharpening, this step will enhance the darkness of the edges detected device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug) if args.debug: pcv.plot_hist(lp_shrp_img, "hist_lp_shrp") # Sobel filtering # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled) device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sbx_img, "hist_sbx") # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled) device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug) if args.debug: pcv.plot_hist(sby_img, "hist_sby") # Combine the effects of both x and y filters through matrix addition # This will capture edges identified within each plane and emphesize edges found in both images device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug) if args.debug: pcv.plot_hist(sb_img, "hist_sb_comb_img") # Use a lowpass (blurring) filter to smooth sobel image device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug) device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug) # combine the smoothed sobel image with the laplacian sharpened image # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug) if args.debug: pcv.plot_hist(edge_shrp_img, "hist_edge_shrp_img") # Perform thresholding to generate a binary image device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, "dark", device, args.debug) # Prepare a few small kernels for morphological filtering kern = np.zeros((3, 3), dtype=np.uint8) kern1 = np.copy(kern) kern1[1, 1:3] = 1 kern2 = np.copy(kern) kern2[1, 0:2] = 1 kern3 = np.copy(kern) kern3[0:2, 1] = 1 kern4 = np.copy(kern) kern4[1:3, 1] = 1 # Prepare a larger kernel for dilation kern[1, 0:3] = 1 kern[0:3, 1] = 1 # Perform erosion with 4 small kernels device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug) device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug) device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug) device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug) # Combine eroded images device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug) device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug) device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug) # Perform dilation # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug) device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug) # Get masked image # The dilated image may contain some pixels which are not plant device, masked_erd = pcv.apply_mask(img, comb_img, "black", device, args.debug) # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug) # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges # img is (254 X 320) # mask for the bottom of the image device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (120, 184), (215, 252), device, args.debug) # mask for the left side of the image device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1, 1), (85, 252), device, args.debug) # mask for the right side of the image device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240, 1), (318, 252), device, args.debug) # mask the edges device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1, 1), (318, 252), device, args.debug) # combine boxes to filter the edges and car out of the photo device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug) device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug) device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug) device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug) # Make a ROI around the plant, include connected objects # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, "black", device, args.debug) device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (120, 75), (200, 184), device, args.debug) plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects( img, "partial", roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug ) # Apply the box mask to the image # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug) device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, "black", device, args.debug) rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # Generate a binary to send to the analysis function device, mask = pcv.binary_threshold(masked_img, 1, 255, "light", device, args.debug) mask3d = np.copy(mask) plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug) ### Analysis ### device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity( img, args.image, mask, 256, device, args.debug, args.outdir + "/" + img_name ) device, shape_header, shape_data, ori_img = pcv.analyze_object( rgb, args.image, o, m, device, args.debug, args.outdir + "/" + img_name ) pcv.print_results(args.image, hist_header, hist_data) pcv.print_results(args.image, shape_header, shape_data)
def main(): # Initialize device device = 0 # Parse command-line options args = options() # Read image img, path, filename = pcv.readimage(filename=args.image, debug=args.debug) # Convert RGB to LAB and extract the Green-Magenta channel device, green_channel = pcv.rgb2gray_lab(img=img, channel="a", device=device, debug=args.debug) # Invert the Green-Magenta image because the plant is dark green device, green_inv = pcv.invert(img=green_channel, device=device, debug=args.debug) # Threshold the inverted Green-Magenta image to mostly isolate green pixels device, green_thresh = pcv.binary_threshold(img=green_inv, threshold=134, maxValue=255, object_type="light", device=device, debug=args.debug) # Extract core plant region from the image to preserve delicate plant features during filtering device += 1 plant_region = green_thresh[100:2000, 250:2250] if args.debug is not None: pcv.print_image(filename=str(device) + "_extract_plant_region.png", img=plant_region) # Use a Gaussian blur to disrupt the strong edge features in the cabinet device, blur_gaussian = pcv.gaussian_blur(device=device, img=green_thresh, ksize=(7, 7), sigmax=0, sigmay=None, debug=args.debug) # Threshold the blurred image to remove features that were blurred device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian, threshold=250, maxValue=255, object_type="light", device=device, debug=args.debug) # Add the plant region back in to the filtered image device += 1 blur_thresholded[100:2000, 250:2250] = plant_region if args.debug is not None: pcv.print_image(filename=str(device) + "_replace_plant_region.png", img=blur_thresholded) # Use a median blur to breakup the horizontal and vertical lines caused by shadows from the track edges device, med_blur = pcv.median_blur(img=blur_thresholded, ksize=5, device=device, debug=args.debug) # Fill in small contours device, green_fill_50 = pcv.fill(img=np.copy(med_blur), mask=np.copy(med_blur), size=50, device=device, debug=args.debug) # Define an ROI for the brass stopper device, stopper_roi, stopper_hierarchy = pcv.define_roi( img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=1420, y_adj=890, w_adj=-920, h_adj=-1040) # Identify all remaining contours in the binary image device, contours, hierarchy = pcv.find_objects(img=img, mask=np.copy(green_fill_50), device=device, debug=args.debug) # Remove contours completely contained within the stopper region of interest device, remove_stopper_mask = remove_countors_roi(mask=green_fill_50, contours=contours, hierarchy=hierarchy, roi=stopper_roi, device=device, debug=args.debug) # Define an ROI for a screw hole device, screw_roi, screw_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=1870, y_adj=1010, w_adj=-485, h_adj=-960) # Remove contours completely contained within the screw region of interest device, remove_screw_mask = remove_countors_roi(mask=remove_stopper_mask, contours=contours, hierarchy=hierarchy, roi=screw_roi, device=device, debug=args.debug) # Identify objects device, contours, contour_hierarchy = pcv.find_objects( img=img, mask=remove_screw_mask, device=device, debug=args.debug) # Define ROI device, roi, roi_hierarchy = pcv.define_roi(img=img, shape="rectangle", device=device, roi=None, roi_input="default", debug=args.debug, adjust=True, x_adj=565, y_adj=200, w_adj=-490, h_adj=-250) # Decide which objects to keep device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects( img=img, roi_type="partial", roi_contour=roi, roi_hierarchy=roi_hierarchy, object_contour=contours, obj_hierarchy=contour_hierarchy, device=device, debug=args.debug) # If there are no contours left we cannot measure anything if len(roi_contours) > 0: # Object combine kept objects device, plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_contours, hierarchy=roi_contour_hierarchy, device=device, debug=args.debug) outfile = False if args.writeimg: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img=img, imgname=args.image, obj=plant_contour, mask=plant_mask, device=device, debug=args.debug, filename=outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, # output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img=img, imgname=args.image, mask=plant_mask, bins=256, device=device, debug=args.debug, hist_plot_type=None, pseudo_channel="v", pseudo_bkg="img", resolution=300, filename=outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header)) + "\n") result.write('\t'.join(map(str, shape_data)) + "\n") for row in shape_img: result.write('\t'.join(map(str, row)) + "\n") result.write('\t'.join(map(str, color_header)) + "\n") result.write('\t'.join(map(str, color_data)) + "\n") for row in color_img: result.write('\t'.join(map(str, row)) + "\n") result.close() # Find matching NIR image device, nirpath = pcv.get_nir(path=path, filename=filename, device=device, debug=args.debug) nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath) nir_img = cv2.imread(nirpath, 0) # Make mask glovelike in proportions via dilation device, d_mask = pcv.dilate(plant_mask, kernel=1, i=0, device=device, debug=args.debug) # Resize mask prop2, prop1 = conv_ratio() device, nmask = pcv.resize(img=d_mask, resize_x=prop1, resize_y=prop2, device=device, debug=args.debug) # Convert the resized mask to a binary mask device, bmask = pcv.binary_threshold(img=nmask, threshold=0, maxValue=255, object_type="light", device=device, debug=args.debug) device, crop_img = crop_sides_equally(mask=bmask, nir=nir_img, device=device, debug=args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(img=nir_img, mask=crop_img, device=device, x=0, y=1, v_pos="bottom", h_pos="right", debug=args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb, mask=newmask, device=device, debug=args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( img=nir_rgb, contours=nir_objects, hierarchy=nir_hierarchy, device=device, debug=args.debug) if args.writeimg: outfile = args.outdir + "/" + nir_filename # Analyze NIR signal data device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( img=nir_img, rgbimg=nir_rgb, mask=nir_combinedmask, bins=256, device=device, histplot=False, debug=args.debug, filename=outfile) # Analyze the shape of the plant contour from the NIR image device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( img=nir_img, imgname=nir_filename, obj=nir_combined, mask=nir_combinedmask, device=device, debug=args.debug, filename=outfile) # Write NIR data to co-results file coresult = open(args.coresult, "a") coresult.write('\t'.join(map(str, nhist_header)) + "\n") coresult.write('\t'.join(map(str, nhist_data)) + "\n") for row in nir_imgs: coresult.write('\t'.join(map(str, row)) + "\n") coresult.write('\t'.join(map(str, nshape_header)) + "\n") coresult.write('\t'.join(map(str, nshape_data)) + "\n") coresult.write('\t'.join(map(str, nir_shape)) + "\n") coresult.close()
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs,'white', device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv=pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, args.debug) device, soil_car=pcv.logical_or(soil_car1, soil_car2,device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 200, device, args.debug) # Median Filter #device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) #device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,True, 600,450,-600,-350) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data)
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) brass_mask = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug) # Fill small objects device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug) # Fill small objects device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Mask pesky brass piece device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug) device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug) device, brass_inv = pcv.invert(brass_thresh, device, args.debug) device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug) # Further mask soil and car device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug) device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug) device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, args.debug) device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug) device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug) device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, args.debug) device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug) # Fill small objects device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 150, device, args.debug) # Median Filter #device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug) #device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug) # Identify objects device, id_objects, obj_hierarchy = pcv.find_objects( masked2, soil_cnt, device, args.debug) # Define ROI device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', args.debug, True, 600, 450, -600, -350) # Decide which objects to keep device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############## VIS Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Find shape properties, output shape image (optional) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, args.image, obj, mask, device, args.debug, outfile) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header, color_data, color_img = pcv.analyze_color( img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300, outfile) # Output shape and color data result = open(args.result, "a") result.write('\t'.join(map(str, shape_header))) result.write("\n") result.write('\t'.join(map(str, shape_data))) result.write("\n") for row in shape_img: result.write('\t'.join(map(str, row))) result.write("\n") result.write('\t'.join(map(str, color_header))) result.write("\n") result.write('\t'.join(map(str, color_data))) result.write("\n") for row in color_img: result.write('\t'.join(map(str, row))) result.write("\n") result.close() ############################# Use VIS image mask for NIR image######################### # Find matching NIR image device, nirpath = pcv.get_nir(path, filename, device, args.debug) nir, path1, filename1 = pcv.readimage(nirpath) nir2 = cv2.imread(nirpath, -1) # Flip mask device, f_mask = pcv.flip(mask, "horizontal", device, args.debug) # Reize mask device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, args.debug) # position, and crop mask device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top", "right", args.debug) # Identify objects device, nir_objects, nir_hierarchy = pcv.find_objects( nir, newmask, device, args.debug) # Object combine kept objects device, nir_combined, nir_combinedmask = pcv.object_composition( nir, nir_objects, nir_hierarchy, device, args.debug) ####################################### Analysis ############################################# outfile1 = False if args.writeimg == True: outfile1 = args.outdir + "/" + filename1 device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity( nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1) device, nshape_header, nshape_data, nir_shape = pcv.analyze_object( nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1) coresult = open(args.coresult, "a") coresult.write('\t'.join(map(str, nhist_header))) coresult.write("\n") coresult.write('\t'.join(map(str, nhist_data))) coresult.write("\n") for row in nir_imgs: coresult.write('\t'.join(map(str, row))) coresult.write("\n") coresult.write('\t'.join(map(str, nshape_header))) coresult.write("\n") coresult.write('\t'.join(map(str, nshape_data))) coresult.write("\n") coresult.write('\t'.join(map(str, nir_shape))) coresult.write("\n") coresult.close()