def psIImask(img, mode='thresh'): # pcv.plot_image(img) if mode is 'thresh': # this entropy based technique seems to work well when algae is present algaethresh = filters.threshold_yen(image=img) threshy = pcv.threshold.binary(img, algaethresh, 255, 'light') # mask = pcv.dilate(threshy, 2, 1) mask = pcv.fill(threshy, 250) mask = pcv.erode(mask, 2, 1) mask = pcv.fill(mask, 100) final_mask = mask # pcv.fill(mask, 270) elif isinstance(mode, pd.DataFrame): mode = curvedf rownum = mode.imageid.values.argmax() imgdf = mode.iloc[[1, rownum]] fm = cv2.imread(imgdf.filename[0]) fmp = cv2.imread(imgdf.filename[1]) npq = np.float32(np.divide(fm, fmp, where=fmp != 0) - 1) npq = np.ma.array(fmp, mask=fmp < 200) plt.imshow(npq) # pcv.plot_image(npq) final_mask = np.zeros_like(img) else: pcv.fatal_error( 'mode must be "thresh" (default) or an object of class pd.DataFrame' ) return final_mask
def psIImask(img, mode='thresh'): ''' Input: img = greyscale image mode = type of thresholding to perform. Currently only 'thresh' is available ''' # pcv.plot_image(img) if mode is 'thresh': # this entropy based technique seems to work well when algae is present algaethresh = filters.threshold_yen(image=img) threshy = pcv.threshold.binary(img, algaethresh, 255, 'light') # mask = pcv.dilate(threshy, 2, 1) mask = pcv.fill(threshy, 150) mask = pcv.erode(mask, 2, 1) mask = pcv.fill(mask, 45) # mask = pcv.dilate(mask, 2,1) final_mask = mask # pcv.fill(mask, 270) else: pcv.fatal_error( 'mode must be "thresh" (default) or an object of class pd.DataFrame' ) return final_mask
def remove_green(self, imagearray): """Expects a dot thresholded""" hsv = cv2.cvtColor(imagearray, cv2.COLOR_BGR2HSV) green_lower = np.array([20, 0, 0]) green_upper = np.array([90, 255, 255]) mask = cv2.inRange(hsv, green_lower, green_upper) mask = cv2.bitwise_not(mask) device, mask = pcv.fill(mask, mask, 500, 0) device, mask = pcv.fill(mask, mask, 500, 0) res = cv2.bitwise_and(imagearray, imagearray, mask=mask) return res
def threshold_dots_withcenter3(imgarray): img = imgarray hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) outrangemask = cv2.inRange(hsv, np.array([30, 0, 0]), np.array([60, 255, 255])) inrangemask = cv2.bitwise_not(outrangemask) inrangemask = ndimage.filters.minimum_filter(inrangemask, (2, 2)) dev, inrangemask = pcv.fill(inrangemask, inrangemask, 300, 0) hsv = cv2.bitwise_and(hsv, hsv, mask=inrangemask) inrangemask2 = cv2.inRange(hsv, np.array([0, 0, 50]), np.array([255, 255, 255])) hsv = cv2.bitwise_and(hsv, hsv, mask=inrangemask2) img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) img = cv2.bilateralFilter(img, 5, 200, 200) img = cv2.medianBlur(img, 5) middle_sectionx = int(img.shape[0] / 2) middle_sectiony = int(img.shape[1] / 2) img = img[(middle_sectionx - 200):(middle_sectionx + 200), (middle_sectiony - 500):(middle_sectiony + 500)] #Dilation 5x5 kernel #kernel = np.ones((3,3),np.uint8) #img=cv2.dilate(img, kernel, iterations=1) #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)) #img = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel) img = cvtcolor_bgr_rgb(img) #plt.imshow(img) #plt.show() return img, (middle_sectionx - 200, middle_sectiony - 500)
def psIImask(img, mode='thresh'): # pcv.plot_image(img) if mode is 'thresh': try: masko = pcv.threshold.otsu(img,255, 'light') mask = pcv.fill(masko, 100) # this entropy based technique seems to work well when algae is present # algaethresh = filters.threshold_yen(image=img) # threshy = pcv.threshold.binary(img, algaethresh, 255, 'light') # mask = pcv.dilate(threshy, 2, 1) # mask = pcv.fill(mask, 250) # mask = pcv.erode(mask, 2, 2) final_mask = mask # pcv.fill(mask, 270) except RuntimeError as e: print('No fluorescence in this Fm image. Resulting mask',e) return(np.zeros_like(img)) elif isinstance(mode, pd.DataFrame): mode = curvedf rownum = mode.imageid.values.argmax() imgdf = mode.iloc[[1,rownum]] fm = cv2.imread(imgdf.filename[0]) fmp = cv2.imread(imgdf.filename[1]) npq = np.float32(np.divide(fm,fmp, where = fmp != 0) - 1) npq = np.ma.array(fmp, mask = fmp < 200) plt.imshow(npq) # pcv.plot_image(npq) final_mask = np.zeroes(np.shape(img)) else: pcv.fatal_error('mode must be "thresh" (default) or "npq")') return final_mask
def main(path, imagename): args = {'names': 'names.txt', 'outdir': './output-images'} #Read image img1, path, filename = pcv.readimage(path + imagename, "native") #pcv.params.debug=args['debug'] #img1 = pcv.white_balance(img,roi=(400,800,200,200)) #img1 = cv2.resize(img1,(4000,2000)) shift1 = pcv.shift_img(img1, 10, 'top') img1 = shift1 a = pcv.rgb2gray_lab(img1, 'a') img_binary = pcv.threshold.binary(a, 120, 255, 'dark') fill_image = pcv.fill(img_binary, 10) dilated = pcv.dilate(fill_image, 1, 1) id_objects, obj_hierarchy = pcv.find_objects(img1, dilated) roi_contour, roi_hierarchy = pcv.roi.rectangle(4000, 2000, -2000, -4000, img1) #print(roi_contour) roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects( img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy) clusters_i, contours, hierarchies = pcv.cluster_contours( img1, roi_objects, roi_obj_hierarchy, 1, 4) ''' pcv.params.debug = "print"''' out = args['outdir'] names = args['names'] output_path = pcv.cluster_contour_splitimg(img1, clusters_i, contours, hierarchies, out, file=filename, filenames=names)
def threshold_green(self, image): # image=cv2.convertScaleAbs(image,image, 1.25,0) # cla=cv2.createCLAHE()#sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) # image=cv2.filter2D(image, -1,sharpen_kernel) # image=cla.apply(image) img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) device = 0 avg = np.mean(img_hsv[:, :, 2]) img_hsv[:, :, 2] = cv2.add((90 - avg), img_hsv[:, :, 2]) # print("hi") green_lower = np.array( [30, 100, 60]) ##Define lower bound found by experimentation green_upper = np.array([90, 253, 255]) ##Upper bound mask = cv2.inRange(img_hsv, green_lower, green_upper) device, dilated = pcv.dilate(mask, 1, 1, device) device, mask = pcv.fill(dilated, dilated, 30, device) # device, dilated = pcv.fill(dilated, dilated, 50, device) res = cv2.bitwise_and(img_hsv, img_hsv, mask=dilated) # plt.imshow(res) # plt.show() # cv2.imshow("hi",res) # cv2.waitKey(0) # cv2.destroyAllWindows() return dilated, res
def process_pot(self, pot_image): device = 0 # debug=None updated_pot_image = self.threshold_green(pot_image) # plt.imshow(updated_pot_image) # plt.show() device, a = pcv.rgb2gray_lab(updated_pot_image, 'a', device) device, img_binary = pcv.binary_threshold(a, 127, 255, 'dark', device, None) # plt.imshow(img_binary) # plt.show() mask = np.copy(img_binary) device, fill_image = pcv.fill(img_binary, mask, 50, device) device, dilated = pcv.dilate(fill_image, 1, 1, device) device, id_objects, obj_hierarchy = pcv.find_objects( updated_pot_image, updated_pot_image, device) device, roi1, roi_hierarchy = pcv.define_roi(updated_pot_image, 'rectangle', device, None, 'default', debug, False) device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( updated_pot_image, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug) device, obj, mask = pcv.object_composition(updated_pot_image, roi_objects, hierarchy3, device, debug) device, shape_header, shape_data, shape_img = pcv.analyze_object( updated_pot_image, "Example1", obj, mask, device, debug, False) print(shape_data[1])
def vismask(img): a_img = pcv.rgb2gray_lab(img, channel='a') thresh_a = pcv.threshold.binary(a_img, 124, 255, 'dark') b_img = pcv.rgb2gray_lab(img, channel='b') thresh_b = pcv.threshold.binary(b_img, 127, 255, 'light') mask = pcv.logical_and(thresh_a, thresh_b) mask = pcv.fill(mask, 800) final_mask = pcv.dilate(mask, 2, 1) return final_mask
def main(): # Get options args = options() debug = args.debug # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 device, corrected_img = pcv.white_balance(device, img, debug, (500, 1000, 500, 500)) img = corrected_img device, img_gray_sat = pcv.rgb2gray_lab(img, 'a', device, debug) device, img_binary = pcv.binary_threshold(img_gray_sat, 120, 255, 'dark', device, debug) mask = np.copy(img_binary) device, fill_image = pcv.fill(img_binary, mask, 300, device, debug) device, id_objects, obj_hierarchy = pcv.find_objects( img, fill_image, device, debug) device, roi, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 1800, 1600, -1500, -500) device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device, debug) device, obj, mask = pcv.object_composition(img, roi_objects, roi_obj_hierarchy, device, debug) outfile = os.path.join(args.outdir, filename) device, color_header, color_data, color_img = pcv.analyze_color( img, img, mask, 256, device, debug, None, 'v', 'img', 300, outfile) device, shape_header, shape_data, shape_img = pcv.analyze_object( img, "img", obj, mask, device, debug, outfile) shapepath = outfile[:-4] + '_shapes.jpg' shapepic = cv2.imread(shapepath) plantsize = "The plant is " + str(np.sum(mask)) + " pixels large" cv2.putText(shapepic, plantsize, (500, 500), cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 255, 0), 10) pcv.print_image(shapepic, outfile[:-4] + '-out_shapes.jpg')
def main(): #Menangkap gambar IP Cam dengan opencv ##Ngosek, koding e wis ono tapi carane ngonek ning gcloud rung reti wkwk #Mengambil gambar yang sudah didapatkan dari opencv untuk diproses di plantcv path = 'Image test\capture (1).jpg' gmbTumbuhanRaw, path, filename = pcv.readimage(path, mode='native') #benarkan gambar yang miring koreksiRot = pcv.rotate(gmbTumbuhanRaw, 2, True) gmbKoreksi = koreksiRot pcv.print_image(gmbKoreksi, 'Image test\Hasil\gambar_koreksi.jpg') #Mengatur white balance dari gambar #usahakan gambar rata (tanpa bayangan dari manapun!)! #GANTI nilai dari region of intrest (roi) berdasarkan ukuran gambar!! koreksiWhiteBal = pcv.white_balance(gmbTumbuhanRaw, roi=(2, 100, 1104, 1200)) pcv.print_image(koreksiWhiteBal, 'Image test\Hasil\koreksi_white_bal.jpg') #mengubah kontras gambar agar berbeda dengan warna background #tips: latar jangan sama hijaunya kontrasBG = pcv.rgb2gray_lab(koreksiWhiteBal, channel='a') pcv.print_image(kontrasBG, 'Image test\Hasil\koreksi_kontras.jpg') #binary threshol gambar #sesuaikan thresholdnya binthres = pcv.threshold.binary(gray_img=kontrasBG, threshold=115, max_value=255, object_type='dark') #hilangkan noise dengan fill noise resiksitik = pcv.fill(binthres, size=10) pcv.print_image(resiksitik, 'Image test\Hasil\\noiseFill.jpg') #haluskan dengan dilate dilasi = pcv.dilate(resiksitik, ksize=12, i=1) #ambil objek dan set besar roi id_objek, hirarki_objek = pcv.find_objects(gmbTumbuhanRaw, mask=dilasi) roi_contour, roi_hierarchy = pcv.roi.rectangle(img=gmbKoreksi, x=20, y=96, h=1100, w=680) #keluarkan gambar (untuk debug aja sih) roicontour = cv2.drawContours(gmbKoreksi, roi_contour, -1, (0, 0, 255), 3) pcv.print_image(roicontour, 'Image test\Hasil\\roicontour.jpg') """
def plant_cv(img): counter = 0 debug = None counter, s = pcv.rgb2gray_hsv(img, 's', counter, debug) counter, s_thresh = pcv.binary_threshold(s, 145, 255, 'light', counter, debug) counter, s_mblur = pcv.median_blur(s_thresh, 5, counter, debug) # Convert RGB to LAB and extract the Blue channel counter, b = pcv.rgb2gray_lab(img, 'b', counter, debug) # Threshold the blue image counter, b_thresh = pcv.binary_threshold(b, 145, 255, 'light', counter, debug) counter, b_cnt = pcv.binary_threshold(b, 145, 255, 'light', counter, debug) # Join the thresholded saturation and blue-yellow images counter, bs = pcv.logical_or(s_mblur, b_cnt, counter, debug) counter, masked = pcv.apply_mask(img, bs, 'white', counter, debug) #---------------------------------------- # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels counter, masked_a = pcv.rgb2gray_lab(masked, 'a', counter, debug) counter, masked_b = pcv.rgb2gray_lab(masked, 'b', counter, debug) # Threshold the green-magenta and blue images counter, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', counter, debug) counter, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light', counter, debug) counter, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', counter, debug) # Join the thresholded saturation and blue-yellow images (OR) counter, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, counter, debug) counter, ab = pcv.logical_or(maskeda_thresh1, ab1, counter, debug) counter, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, counter, debug) # Fill small objects counter, ab_fill = pcv.fill(ab, ab_cnt, 200, counter, debug) # Apply mask (for vis images, mask_color=white) counter, masked2 = pcv.apply_mask(masked, ab_fill, 'white', counter, debug) zeros = np.zeros(masked2.shape[:2], dtype="uint8") merged = cv2.merge([zeros, ab_fill, zeros]) return merged, masked2
def threshold_dots_withcenter(imgarray): img = imgarray hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) outrangemask = cv2.inRange(hsv, np.array([10, 0, 0]), np.array([80, 255, 255])) inrangemask = cv2.bitwise_not(outrangemask) dev, inrangemask = pcv.fill(inrangemask, inrangemask, 300, 0) hsv = cv2.bitwise_and(hsv, hsv, mask=inrangemask) inrangemask2 = cv2.inRange(hsv, np.array([0, 0, 100]), np.array([255, 255, 255])) hsv = cv2.bitwise_and(hsv, hsv, mask=inrangemask2) img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) img = cv2.bilateralFilter(img, 5, 200, 200) img = cv2.medianBlur(img, 5) middle_sectionx = int(img.shape[0] / 2) middle_sectiony = int(img.shape[1] / 2) img = img[(middle_sectionx - 100):(middle_sectionx + 100), (middle_sectiony - 500):(middle_sectiony + 500)] img = cvtcolor_bgr_rgb(img) return [img, tuple([(middle_sectionx - 100), (middle_sectiony - 500)])]
def threshold_dots4(imgarray): img = imgarray hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) outrangemask = cv2.inRange(hsv, np.array([20, 0, 0]), np.array([70, 255, 255])) inrangemask = cv2.bitwise_not(outrangemask) inrangemask = ndimage.filters.minimum_filter(inrangemask, (2, 2)) dev, inrangemask = pcv.fill(inrangemask, inrangemask, 300, 0) hsv = cv2.bitwise_and(hsv, hsv, mask=inrangemask) inrangemask2 = cv2.inRange(hsv, np.array([0, 0, 135]), np.array([255, 255, 255])) hsv = cv2.bitwise_and(hsv, hsv, mask=inrangemask2) img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) img = cv2.bilateralFilter(img, 5, 200, 200) img = cv2.medianBlur(img, 5) middle_sectionx = int(img.shape[0] / 2) middle_sectiony = int(img.shape[1] / 2) img = img[(middle_sectionx - 200):(middle_sectionx + 200), (middle_sectiony - 500):(middle_sectiony + 500)] img = cvtcolor_bgr_rgb(img) return img
def main(): # Get options args = options() # Set variables device = 0 pcv.params.debug = args.debug img_file = args.image # Read image img, path, filename = pcv.readimage(filename=img_file, mode='rgb') # Process saturation channel from HSV colour space s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') lp_s = pcv.laplace_filter(s, 1, 1) shrp_s = pcv.image_subtract(s, lp_s) s_eq = pcv.hist_equalization(shrp_s) s_thresh = pcv.threshold.binary(gray_img=s_eq, threshold=215, max_value=255, object_type='light') s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) # Process green-magenta channel from LAB colour space b = pcv.rgb2gray_lab(rgb_img=img, channel='a') b_lp = pcv.laplace_filter(b, 1, 1) b_shrp = pcv.image_subtract(b, b_lp) b_thresh = pcv.threshold.otsu(b_shrp, 255, object_type='dark') # Create and apply mask bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_thresh) filled = pcv.fill_holes(bs) masked = pcv.apply_mask(img=img, mask=filled, mask_color='white') # Extract colour channels from masked image masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=140, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Produce and apply a mask opened_ab = pcv.opening(gray_img=ab) ab_fill = pcv.fill(bin_img=ab, size=200) closed_ab = pcv.closing(gray_img=ab_fill) masked2 = pcv.apply_mask(img=masked, mask=bs, mask_color='white') # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define region of interest (ROI) roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=250, y=100, h=200, w=200) # Decide what objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ outfile = False if args.writeimg == True: outfile = args.outdir + "/" + filename # Analyze the plant analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask) color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img, obj=obj, mask=mask) top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask) # Print results of the analysis pcv.print_results(filename=args.result) pcv.output_mask(img, kept_mask, filename, outdir=args.outdir, mask_only=True)
def main(): # Set variables args = options() pcv.params.debug = args.debug # Read and rotate image img, path, filename = pcv.readimage(filename=args.image) img = pcv.rotate(img, -90, False) # Create mask from LAB b channel l = pcv.rgb2gray_lab(rgb_img=img, channel='b') l_thresh = pcv.threshold.binary(gray_img=l, threshold=115, max_value=255, object_type='dark') l_mblur = pcv.median_blur(gray_img=l_thresh, ksize=5) # Apply mask to image masked = pcv.apply_mask(img=img, mask=l_mblur, mask_color='white') ab_fill = pcv.fill(bin_img=l_mblur, size=50) # Extract plant object from image id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=ab_fill) roi1, roi_hierarchy = pcv.roi.rectangle(img=masked, x=150, y=270, h=100, w=100) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) ############### Analysis ################ # Analyze shape properties analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask) boundary_image2 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=370) # Analyze colour properties color_histogram = pcv.analyze_color(rgb_img=img, mask=kept_mask, hist_plot_type='all') # Analyze shape independent of size top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img, obj=obj, mask=mask) top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img, obj=obj, mask=mask) # Print results pcv.print_results(filename='{}'.format(args.result)) pcv.print_image(img=color_histogram, filename='{}_color_hist.jpg'.format(args.outdir)) pcv.print_image(img=kept_mask, filename='{}_mask.jpg'.format(args.outdir))
def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) pcv.params.debug=args.debug #set debug mode # STEP 1: Check if this is a night image, for some of these dataset's images were captured # at night, even if nothing is visible. To make sure that images are not taken at # night we check that the image isn't mostly dark (0=black, 255=white). # if it is a night image it throws a fatal error and stops the workflow. if np.average(img) < 50: pcv.fatal_error("Night Image") else: pass # STEP 2: Normalize the white color so you can later # compare color between images. # Inputs: # img = image object, RGB colorspace # roi = region for white reference, if none uses the whole image, # otherwise (x position, y position, box width, box height) # white balance image based on white toughspot #img1 = pcv.white_balance(img=img,roi=(400,800,200,200)) img1 = pcv.white_balance(img=img, mode='hist', roi=None) # STEP 3: Rotate the image # Inputs: # img = image object, RGB color space # rotation_deg = Rotation angle in degrees, can be negative, positive values # will move counter-clockwise # crop = If True then image will be cropped to original image dimensions, if False # the image size will be adjusted to accommodate new image dimensions rotate_img = pcv.rotate(img=img1,rotation_deg=-1, crop=False) # STEP 4: Shift image. This step is important for clustering later on. # For this image it also allows you to push the green raspberry pi camera # out of the image. This step might not be necessary for all images. # The resulting image is the same size as the original. # Inputs: # img = image object # number = integer, number of pixels to move image # side = direction to move from "top", "bottom", "right","left" shift1 = pcv.shift_img(img=img1, number=300, side='top') img1 = shift1 # STEP 5: Convert image from RGB colorspace to LAB colorspace # Keep only the green-magenta channel (grayscale) # Inputs: # img = image object, RGB colorspace # channel = color subchannel ('l' = lightness, 'a' = green-magenta , 'b' = blue-yellow) #a = pcv.rgb2gray_lab(img=img1, channel='a') a = pcv.rgb2gray_lab(rgb_img=img1, channel='a') # STEP 6: Set a binary threshold on the saturation channel image # Inputs: # img = img object, grayscale # threshold = threshold value (0-255) # max_value = value to apply above threshold (usually 255 = white) # object_type = light or dark # - If object is light then standard thresholding is done # - If object is dark then inverse thresholding is done img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type='dark') #img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type'dark') # ^ # | # adjust this value # STEP 7: Fill in small objects (speckles) # Inputs: # bin_img = image object, binary. img will be returned after filling # size = minimum object area size in pixels (integer) fill_image = pcv.fill(bin_img=img_binary, size=10) # ^ # | # adjust this value # STEP 8: Dilate so that you don't lose leaves (just in case) # Inputs: # img = input image # ksize = kernel size # i = iterations, i.e. number of consecutive filtering passes #dilated = pcv.dilate(img=fill_image, ksize=1, i=1) dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1) # STEP 9: Find objects (contours: black-white boundaries) # Inputs: # img = image that the objects will be overlayed # mask = what is used for object detection id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated) #id_objects, obj_hierarchy = pcv.find_objects(gray_img, mask) # STEP 10: Define region of interest (ROI) # Inputs: # img = img to overlay roi # x_adj = adjust center along x axis # y_adj = adjust center along y axis # h_adj = adjust height # w_adj = adjust width # roi_contour, roi_hierarchy = pcv.roi.rectangle(img1, 10, 500, -10, -100) # ^ ^ # |________________| # adjust these four values roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1, x=200, y=190, h=2000, w=3000) # STEP 11: Keep objects that overlap with the ROI # Inputs: # img = img to display kept objects # roi_contour = contour of roi, output from any ROI function # roi_hierarchy = contour of roi, output from any ROI function # object_contour = contours of objects, output from "Identifying Objects" function # obj_hierarchy = hierarchy of objects, output from "Identifying Objects" function # roi_type = 'partial' (default, for partially inside), 'cutto', or 'largest' (keep only largest contour) roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img1, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # STEP 12: This function take a image with multiple contours and # clusters them based on user input of rows and columns # Inputs: # img = An RGB image # roi_objects = object contours in an image that are needed to be clustered. # roi_obj_hierarchy = object hierarchy # nrow = number of rows to cluster (this should be the approximate number of desired rows in the entire image even if there isn't a literal row of plants) # ncol = number of columns to cluster (this should be the approximate number of desired columns in the entire image even if there isn't a literal row of plants) # show_grid = if True then a grid gets displayed in debug mode (default show_grid=False) clusters_i, contours, hierarchies = pcv.cluster_contours(img=img1, roi_objects=roi_objects, roi_obj_hierarchy=roi_obj_hierarchy, nrow=2, ncol=3) # STEP 13: This function takes clustered contours and splits them into multiple images, # also does a check to make sure that the number of inputted filenames matches the number # of clustered contours. If no filenames are given then the objects are just numbered # Inputs: # img = ideally a masked RGB image. # grouped_contour_indexes = output of cluster_contours, indexes of clusters of contours # contours = contours to cluster, output of cluster_contours # hierarchy = object hierarchy # outdir = directory for output images # file = the name of the input image to use as a base name , output of filename from read_image function # filenames = input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes) # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11) pcv.params.debug = "print" out = args.outdir names = args.names output_path, imgs, masks = pcv.cluster_contour_splitimg(rgb_img=img1, grouped_contour_indexes=clusters_i, contours=contours, hierarchy=hierarchies, outdir=out, file=filename, filenames=names)
# exclusive or (pcv.logical_xor) function. # Inputs: # bin_img1 - Binary image data to be compared to bin_img2 # bin_img2 - Binary image data to be compared to bin_img1 xor_img = pcv.logical_xor(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) pcv.print_image(img=xor_img, filename="upload/output_imgs/root_img.jpg") # In[16]: # Fill small objects (reduce image noise) # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled ab_fill = pcv.fill(bin_img=ab, size=200) pcv.print_image(img=ab_fill, filename="upload/output_imgs/NoiseRe_img.jpg") # In[17]: # Closing filters out dark noise from an image. # Inputs: # gray_img - Grayscale or binary image data # kernel - Optional neighborhood, expressed as an array of 1's and 0's. If None (default), # uses cross-shaped structuring element. closed_ab = pcv.closing(gray_img=ab_fill) pcv.print_image(img=closed_ab, filename="upload/output_imgs/DarkNoise_img.jpg") # In[18]:
threshold=160, max_value=255, object_type='light') # In[17]: # Same as line above. b_cnt = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light') # In[18]: # Optional step in vis workflow that I tried. Fills small objects, not very useful here. b_fill = pcv.fill(b_thresh, 10) # In[22]: # Joining the s_mblur image with the b_cnt image. bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt) # In[24]: # The image above is now used as a "mask" over the original image to wipe the background. masked = pcv.apply_mask(img=img, mask=bs, mask_color='white') # In[25]: # Extracting the Green-Magenta channel. masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
def image_avg(fundf): # dn't understand why import suddently needs to be inside function # import cv2 as cv2 # import numpy as np # import pandas as pd # import os # from matplotlib import pyplot as plt # from skimage import filters # from skimage import morphology # from skimage import segmentation # Predefine some variables global c, h, roi_c, roi_h, ilegend, mask_Fm, fn_Fm # Get the filename for minimum and maximum fluoresence fn_min = fundf.query('frame == "Fo" or frame == "Fp"').filename.values[0] fn_max = fundf.query('frame == "Fm" or frame == "Fmp"').filename.values[0] # Get the parameter name that links these 2 frames param_name = fundf['parameter'].iloc[0] # Create a new output filename that combines existing filename with parameter outfn = os.path.splitext(os.path.basename(fn_max))[0] outfn_split = outfn.split('-') # outfn_split[2] = datetime.strptime(fundf.jobdate.values[0],'%Y-%m-%d').strftime('%Y%m%d') outfn_split[2] = fundf.jobdate.dt.strftime('%Y%m%d').values[0] basefn = "-".join(outfn_split[0:-1]) outfn_split[-1] = param_name outfn = "-".join(outfn_split) print(outfn) # Make some directories based on sample id to keep output organized plantbarcode = outfn_split[0] fmaxdir = os.path.join(fluordir, plantbarcode) os.makedirs(fmaxdir, exist_ok=True) # If debug mode is 'print', create a specific debug dir for each pim file if pcv.params.debug == 'print': debug_outdir = os.path.join(debugdir, outfn) os.makedirs(debug_outdir, exist_ok=True) pcv.params.debug_outdir = debug_outdir # read images and create mask from max fluorescence # read image as is. only gray values in PSII images imgmin, _, _ = pcv.readimage(fn_min) img, _, _ = pcv.readimage(fn_max) fdark = np.zeros_like(img) out_flt = fdark.astype('float32') # <- needs to be float32 for imwrite if param_name == 'FvFm': # save max fluorescence filename fn_Fm = fn_max # create mask # #create black mask over lower half of image to threshold upper plant only # img_half, _, _, _ = pcv.rectangle_mask(img, p1=(0,321), p2=(480,640)) # # mask1 = pcv.threshold.otsu(img_half,255) # algaethresh = filters.threshold_otsu(image=img_half) # mask0 = pcv.threshold.binary(img_half, algaethresh, 255, 'light') # # create black mask over upper half of image to threshold lower plant only # img_half, _, _, _ = pcv.rectangle_mask(img, p1=(0, 0), p2=(480, 319), color='black') # # mask0 = pcv.threshold.otsu(img_half,255) # algaethresh = filters.threshold_otsu(image=img_half) # mask1 = pcv.threshold.binary(img_half, algaethresh, 255, 'light') # mask = pcv.logical_xor(mask0, mask1) # # mask = pcv.dilate(mask, 2, 1) # mask = pcv.fill(mask, 350) # mask = pcv.erode(mask, 2, 2) # mask = pcv.erode(mask, 2, 1) # mask = pcv.fill(mask, 100) # otsuT = filters.threshold_otsu(img) # # sigma=(k-1)/6. This is because the length for 99 percentile of gaussian pdf is 6sigma. # k = int(2 * np.ceil(3 * otsuT) + 1) # gb = pcv.gaussian_blur(img, ksize = (k,k), sigma_x = otsuT) # mask = img >= gb + 10 # pcv.plot_image(mask) # local_otsu = filters.rank.otsu(img, pcv.get_kernel((9,9), 'rectangle'))#morphology.disk(2)) # thresh_image = img >= local_otsu #_------> elevation_map = filters.sobel(img) # pcv.plot_image(elevation_map) thresh = filters.threshold_otsu(image=img) # thresh = 50 markers = np.zeros_like(img, dtype='uint8') markers[img > thresh + 8] = 2 markers[img <= thresh + 8] = 1 # pcv.plot_image(markers,cmap=plt.cm.nipy_spectral) mask = segmentation.watershed(elevation_map, markers) mask = mask.astype(np.uint8) # pcv.plot_image(mask) mask[mask == 1] = 0 mask[mask == 2] = 1 # pcv.plot_image(mask, cmap=plt.cm.nipy_spectral) # mask = pcv.erode(mask, 2, 1) mask = pcv.fill(mask, 100) # pcv.plot_image(mask, cmap=plt.cm.nipy_spectral) # <----------- roi_c, roi_h = pcv.roi.multi(img, coord=(250, 200), radius=70, spacing=(0, 220), ncols=1, nrows=2) if len(np.unique(mask)) == 1: c = [] YII = mask NPQ = mask newmask = mask else: # find objects and setup roi c, h = pcv.find_objects(img, mask) # setup individual roi plant masks newmask = np.zeros_like(mask) # compute fv/fm and save to file YII, hist_fvfm = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=imgmin, fmax=img, mask=mask, bins=128) # YII = np.divide(Fv, # img, # out=out_flt.copy(), # where=np.logical_and(mask > 0, img > 0)) # NPQ is 0 NPQ = np.zeros_like(YII) # cv2.imwrite(os.path.join(fmaxdir, outfn + '-fvfm.tif'), YII) # print Fm - will need this later # cv2.imwrite(os.path.join(fmaxdir, outfn + '-fmax.tif'), img) # NPQ will always be an array of 0s else: # compute YII and NPQ if parameter is other than FvFm newmask = mask_Fm # use cv2 to read image becase pcv.readimage will save as input_image.png overwriting img # newmask = cv2.imread(os.path.join(maskdir, basefn + '-FvFm-mask.png'),-1) if len(np.unique(newmask)) == 1: YII = np.zeros_like(newmask) NPQ = np.zeros_like(newmask) else: # compute YII YII, hist_yii = pcv.photosynthesis.analyze_fvfm(fdark, fmin=imgmin, fmax=img, mask=newmask, bins=128) # make sure to initialize with out=. using where= provides random values at False pixels. you will get a strange result. newmask comes from Fm instead of Fm' so they can be different #newmask<0, img>0 = FALSE: not part of plant but fluorescence detected. #newmask>0, img<=0 = FALSE: part of plant in Fm but no fluorescence detected <- this is likely the culprit because pcv.apply_mask doesn't always solve issue. # YII = np.divide(Fvp, # img, # out=out_flt.copy(), # where=np.logical_and(newmask > 0, img > 0)) # compute NPQ # Fm = cv2.imread(os.path.join(fmaxdir, basefn + '-FvFm-fmax.tif'), -1) Fm = cv2.imread(fn_Fm, -1) NPQ = np.divide(Fm, img, out=out_flt.copy(), where=np.logical_and(newmask > 0, img > 0)) NPQ = np.subtract(NPQ, 1, out=out_flt.copy(), where=np.logical_and(NPQ >= 1, newmask > 0)) # cv2.imwrite(os.path.join(fmaxdir, outfn + '-yii.tif'), YII) # cv2.imwrite(os.path.join(fmaxdir, outfn + '-npq.tif'), NPQ) # end if-else Fv/Fm # Make as many copies of incoming dataframe as there are ROIs so all results can be saved outdf = fundf.copy() for i in range(0, len(roi_c) - 1): outdf = outdf.append(fundf) outdf.frameid = outdf.frameid.astype('uint8') # Initialize lists to store variables for each ROI and iterate through each plant frame_avg = [] yii_avg = [] yii_std = [] npq_avg = [] npq_std = [] plantarea = [] ithroi = [] inbounds = [] if len(c) == 0: for i, rc in enumerate(roi_c): # each variable needs to be stored 2 x #roi frame_avg.append(np.nan) frame_avg.append(np.nan) yii_avg.append(np.nan) yii_avg.append(np.nan) yii_std.append(np.nan) yii_std.append(np.nan) npq_avg.append(np.nan) npq_avg.append(np.nan) npq_std.append(np.nan) npq_std.append(np.nan) inbounds.append(False) inbounds.append(False) plantarea.append(0) plantarea.append(0) # Store iteration Number even if there are no objects in image ithroi.append(int(i)) ithroi.append(int(i)) # append twice so each image has a value. else: i = 1 rc = roi_c[i] for i, rc in enumerate(roi_c): # Store iteration Number ithroi.append(int(i)) ithroi.append(int(i)) # append twice so each image has a value. # extract ith hierarchy rh = roi_h[i] # Filter objects based on being in the defined ROI roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects( img, roi_contour=rc, roi_hierarchy=rh, object_contour=c, obj_hierarchy=h, roi_type='partial') if obj_area == 0: print('!!! No plant detected in ROI ', str(i)) frame_avg.append(np.nan) frame_avg.append(np.nan) yii_avg.append(np.nan) yii_avg.append(np.nan) yii_std.append(np.nan) yii_std.append(np.nan) npq_avg.append(np.nan) npq_avg.append(np.nan) npq_std.append(np.nan) npq_std.append(np.nan) inbounds.append(False) inbounds.append(False) plantarea.append(0) plantarea.append(0) else: # Combine multiple plant objects within an roi together plant_contour, plant_mask = pcv.object_composition( img=img, contours=roi_obj, hierarchy=hierarchy_obj) #combine plant masks after roi filter if param_name == 'FvFm': newmask = pcv.image_add(newmask, plant_mask) # Calc mean and std dev of fluoresence, YII, and NPQ and save to list frame_avg.append(cppc.utils.mean(imgmin, plant_mask)) frame_avg.append(cppc.utils.mean(img, plant_mask)) # need double because there are two images per loop yii_avg.append(cppc.utils.mean(YII, plant_mask)) yii_avg.append(cppc.utils.mean(YII, plant_mask)) yii_std.append(cppc.utils.std(YII, plant_mask)) yii_std.append(cppc.utils.std(YII, plant_mask)) npq_avg.append(cppc.utils.mean(NPQ, plant_mask)) npq_avg.append(cppc.utils.mean(NPQ, plant_mask)) npq_std.append(cppc.utils.std(NPQ, plant_mask)) npq_std.append(cppc.utils.std(NPQ, plant_mask)) plantarea.append(obj_area * cppc.pixelresolution**2) plantarea.append(obj_area * cppc.pixelresolution**2) # Check if plant is compeltely within the frame of the image inbounds.append(pcv.within_frame(plant_mask)) inbounds.append(pcv.within_frame(plant_mask)) # Output a pseudocolor of NPQ and YII for each induction period for each image imgdir = os.path.join(outdir, 'pseudocolor_images') outfn_roi = outfn + '-roi' + str(i) os.makedirs(imgdir, exist_ok=True) npq_img = pcv.visualize.pseudocolor(NPQ, obj=None, mask=plant_mask, cmap='inferno', axes=False, min_value=0, max_value=2.5, background='black', obj_padding=0) npq_img = cppc.viz.add_scalebar( npq_img, pixelresolution=cppc.pixelresolution, barwidth=10, barlabel='1 cm', barlocation='lower left') # If you change the output size and resolution you will need to adjust the timelapse video script npq_img.set_size_inches(6, 6, forward=False) npq_img.savefig( os.path.join(imgdir, outfn_roi + '-NPQ.png'), bbox_inches='tight', dpi=100) #100 is default for matplotlib/plantcv if ilegend == 1: #only need to print legend once npq_img.savefig(os.path.join(imgdir, 'npq_legend.pdf'), bbox_inches='tight') npq_img.clf() yii_img = pcv.visualize.pseudocolor( YII, obj=None, mask=plant_mask, cmap='gist_rainbow', #custom_colormaps.get_cmap( # 'imagingwin')# axes=False, min_value=0, max_value=1, background='black', obj_padding=0) yii_img = cppc.viz.add_scalebar( yii_img, pixelresolution=cppc.pixelresolution, barwidth=10, barlabel='1 cm', barlocation='lower left') yii_img.set_size_inches(6, 6, forward=False) yii_img.savefig(os.path.join(imgdir, outfn_roi + '-YII.png'), bbox_inches='tight', dpi=100) if ilegend == 1: #print legend once and increment ilegend to stop in future iterations yii_img.savefig(os.path.join(imgdir, 'yii_legend.pdf'), bbox_inches='tight') ilegend = ilegend + 1 yii_img.clf() # end try-except-else # end roi loop # end if there are objects from roi filter # save mask of all plants to file after roi filter if param_name == 'FvFm': mask_Fm = newmask.copy() # pcv.print_image(newmask, os.path.join(maskdir, outfn + '-mask.png')) # check YII values for uniqueness between all ROI. nonunique ROI suggests the plants grew into each other and can no longer be reliably separated in image processing. # a single value isn't always robust. I think because there are small independent objects that fall in one roi but not the other that change the object within the roi slightly. # also note, I originally designed this for trays of 2 pots. It will not detect if e.g. 2 out of 9 plants grow into each other rounded_avg = [round(n, 3) for n in yii_avg] rounded_std = [round(n, 3) for n in yii_std] if len(roi_c) > 1: isunique = not (rounded_avg.count(rounded_avg[0]) == len(yii_avg) and rounded_std.count(rounded_std[0]) == len(yii_std)) else: isunique = True # save all values to outgoing dataframe outdf['roi'] = ithroi outdf['frame_avg'] = frame_avg outdf['yii_avg'] = yii_avg outdf['npq_avg'] = npq_avg outdf['yii_std'] = yii_std outdf['npq_std'] = npq_std outdf['obj_in_frame'] = inbounds outdf['unique_roi'] = isunique return (outdf)
s_thresh_1 = pcv.threshold.binary(gray_img=s, threshold=10, max_value=255, object_type='light') s_thresh_2 = pcv.threshold.binary(gray_img=s, threshold=245, max_value=255, object_type='dark') s_thresh = pcv.logical_and(bin_img1=s_thresh_1, bin_img2=s_thresh_2) # Median Blur s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image b_cnt = pcv.threshold.binary(gray_img=b, threshold=128, max_value=255, object_type='light') # Fill small objects b_fill = pcv.fill(b_cnt, 10) # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_fill) # Apply Mask (for VIS images, mask_color=white) masked = pcv.apply_mask(rgb_img=img, mask=bs, mask_color='white') # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels # Threshold the green-magenta and blue images masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=127, max_value=255, object_type='dark')
def main(): # Initialize options args = options() # Set PlantCV debug mode to input debug method pcv.params.debug = args.debug # Use PlantCV to read in the input image. The function outputs an image as a NumPy array, the path to the file, # and the image filename img, path, filename = pcv.readimage(filename=args.image) # ## Segmentation # ### Saturation channel # Convert the RGB image to HSV colorspace and extract the saturation channel s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') # Use a binary threshold to set an inflection value where all pixels in the grayscale saturation image below the # threshold get set to zero (pure black) and all pixels at or above the threshold get set to 255 (pure white) s_thresh = pcv.threshold.binary(gray_img=s, threshold=80, max_value=255, object_type='light') # ### Blue-yellow channel # Convert the RGB image to LAB colorspace and extract the blue-yellow channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Use a binary threshold to set an inflection value where all pixels in the grayscale blue-yellow image below the # threshold get set to zero (pure black) and all pixels at or above the threshold get set to 255 (pure white) b_thresh = pcv.threshold.binary(gray_img=b, threshold=134, max_value=255, object_type='light') # ### Green-magenta channel # Convert the RGB image to LAB colorspace and extract the green-magenta channel a = pcv.rgb2gray_lab(rgb_img=img, channel='a') # In the green-magenta image the plant pixels are darker than the background. Setting object_type="dark" will # invert the image first and then use a binary threshold to set an inflection value where all pixels in the # grayscale green-magenta image below the threshold get set to zero (pure black) and all pixels at or above the # threshold get set to 255 (pure white) a_thresh = pcv.threshold.binary(gray_img=a, threshold=122, max_value=255, object_type='dark') # Combine the binary images for the saturation and blue-yellow channels. The "or" operator returns a binary image # that is white when a pixel was white in either or both input images bs = pcv.logical_or(bin_img1=s_thresh, bin_img2=b_thresh) # Combine the binary images for the combined saturation and blue-yellow channels and the green-magenta channel. # The "or" operator returns a binary image that is white when a pixel was white in either or both input images bsa = pcv.logical_or(bin_img1=bs, bin_img2=a_thresh) # The combined binary image labels plant pixels well but the background still has pixels labeled as foreground. # Small white noise (salt) in the background can be removed by filtering white objects in the image by size and # setting a size threshold where smaller objects can be removed bsa_fill1 = pcv.fill(bin_img=bsa, size=15) # Fill small noise # Before more stringent size filtering is done we want to connect plant parts that may still be disconnected from # the main plant. Use a dilation to expand the boundary of white regions. Ksize is the size of a box scanned # across the image and i is the number of times a scan is done bsa_fill2 = pcv.dilate(gray_img=bsa_fill1, ksize=3, i=3) # Remove small objects by size again but use a higher threshold bsa_fill3 = pcv.fill(bin_img=bsa_fill2, size=250) # Use the binary image to identify objects or connected components. id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=bsa_fill3) # Because the background still contains pixels labeled as foreground, the object list contains background. # Because these images were collected in an automated system the plant is always centered in the image at the # same position each time. Define a region of interest (ROI) to set the area where we expect to find plant # pixels. PlantCV can make simple ROI shapes like rectangles, circles, etc. but here we use a custom ROI to fit a # polygon around the plant area roi_custom, roi_hier_custom = pcv.roi.custom(img=img, vertices=[[1085, 1560], [1395, 1560], [1395, 1685], [1890, 1744], [1890, 25], [600, 25], [615, 1744], [1085, 1685]]) # Use the ROI to filter out objects found outside the ROI. When `roi_type = "cutto"` objects outside the ROI are # cropped out. The default `roi_type` is "partial" which allows objects to overlap the ROI and be retained roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi_custom, roi_hierarchy=roi_hier_custom, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='cutto') # Filter remaining objects by size again to remove any remaining background objects filled_mask1 = pcv.fill(bin_img=kept_mask, size=350) # Use a closing operation to first dilate (expand) and then erode (shrink) the plant to fill in any additional # gaps in leaves or stems filled_mask2 = pcv.closing(gray_img=filled_mask1) # Remove holes or dark spot noise (pepper) in the plant binary image filled_mask3 = pcv.fill_holes(filled_mask2) # With the clean binary image identify the contour of the plant id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=filled_mask3) # Because a plant or object of interest may be composed of multiple contours, it is required to combine all # remaining contours into a single contour before measurements can be done obj, mask = pcv.object_composition(img=img, contours=id_objects, hierarchy=obj_hierarchy) # ## Measurements PlantCV has several built-in measurement or analysis methods. Here, basic measurements of size # and shape are done. Additional typical modules would include plant height (`pcv.analyze_bound_horizontal`) and # color (`pcv.analyze_color`) shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask) # Save the shape image if requested if args.writeimg: outfile = os.path.join(args.outdir, filename[:-4] + "_shapes.png") pcv.print_image(img=shape_img, filename=outfile) # ## Morphology workflow # Update a few PlantCV parameters for plotting purposes pcv.params.text_size = 1.5 pcv.params.text_thickness = 5 pcv.params.line_thickness = 15 # Convert the plant mask into a "skeletonized" image where each path along the stem and leaves are a single pixel # wide skel = pcv.morphology.skeletonize(mask=mask) # Sometimes wide parts of leaves or stems are skeletonized in the direction perpendicular to the main path. These # "barbs" or "spurs" can be removed by pruning the skeleton to remove small paths. Pruning will also separate the # individual path segments (leaves and stem parts) pruned, segmented_img, segment_objects = pcv.morphology.prune(skel_img=skel, size=30, mask=mask) pruned, segmented_img, segment_objects = pcv.morphology.prune(skel_img=pruned, size=3, mask=mask) # Leaf and stem segments above are separated but only into individual paths. We can sort the segments into stem # and leaf paths by identifying primary segments (stems; those that end in a branch point) and secondary segments # (leaves; those that begin at a branch point and end at a tip point) leaf_objects, other_objects = pcv.morphology.segment_sort(skel_img=pruned, objects=segment_objects, mask=mask) # Label the segment unique IDs segmented_img, labeled_id_img = pcv.morphology.segment_id(skel_img=pruned, objects=leaf_objects, mask=mask) # Measure leaf insertion angles. Measures the angle between a line fit through the stem paths and a line fit # through the first `size` points of each leaf path labeled_angle_img = pcv.morphology.segment_insertion_angle(skel_img=pruned, segmented_img=segmented_img, leaf_objects=leaf_objects, stem_objects=other_objects, size=22) # Save leaf angle image if requested if args.writeimg: outfile = os.path.join(args.outdir, filename[:-4] + "_leaf_insertion_angles.png") pcv.print_image(img=labeled_angle_img, filename=outfile) # ## Other potential morphological measurements There are many other functions that extract data from within the # morphology sub-package of PlantCV. For our purposes, we are most interested in the relative angle between each # leaf and the stem which we measure with `plantcv.morphology.segment_insertion_angle`. However, the following # cells show some of the other traits that we are able to measure from images that can be succesfully sorted into # primary and secondary segments. # Segment the plant binary mask using the leaf and stem segments. Allows for the measurement of individual leaf # areas # filled_img = pcv.morphology.fill_segments(mask=mask, objects=leaf_objects) # Measure the path length of each leaf (geodesic distance) # labeled_img2 = pcv.morphology.segment_path_length(segmented_img=segmented_img, objects=leaf_objects) # Measure the straight-line, branch point to tip distance (Euclidean) for each leaf # labeled_img3 = pcv.morphology.segment_euclidean_length(segmented_img=segmented_img, objects=leaf_objects) # Measure the curvature of each leaf (Values closer to 1 indicate that a segment is a straight line while larger # values indicate the segment has more curvature) # labeled_img4 = pcv.morphology.segment_curvature(segmented_img=segmented_img, objects=leaf_objects) # Measure absolute leaf angles (angle of linear regression line fit to each leaf object) Note: negative values # signify leaves to the left of the stem, positive values signify leaves to the right of the stem # labeled_img5 = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=leaf_objects) # Measure leaf curvature in degrees # labeled_img6 = pcv.morphology.segment_tangent_angle(segmented_img=segmented_img, objects=leaf_objects, size=35) # Measure stem characteristics like stem angle and length # stem_img = pcv.morphology.analyze_stem(rgb_img=img, stem_objects=other_objects) # Remove unneeded observations (hack) _ = pcv.outputs.observations.pop("tips") _ = pcv.outputs.observations.pop("branch_pts") angles = pcv.outputs.observations["segment_insertion_angle"]["value"] remove_indices = [] for i, value in enumerate(angles): if value == "NA": remove_indices.append(i) remove_indices.sort(reverse=True) for i in remove_indices: _ = pcv.outputs.observations["segment_insertion_angle"]["value"].pop(i) # ## Save the results out to file for downsteam analysis pcv.print_results(filename=args.result)
def main(): # Get options args = options() # Set variables pcv.params.debug = args.debug # Replace the hard-coded debug with the debug flag img_file = args.image # Replace the hard-coded input image with image flag ############### Image read-in ################ # Read target image img, path, filename = pcv.readimage(filename = img_file, mode = "rgb") ############### Find scale and crop ################ # find colour card in the image to be analysed df, start, space = pcv.transform.find_color_card(rgb_img = img) if int(start[0]) < 2000: img = imutils.rotate_bound(img, -90) rotated = 1 df, start, space = pcv.transform.find_color_card(rgb_img = img) else: rotated = 0 #if img.shape[0] > 6000: # rotated = 1 #else: rotated = 0 img_mask = pcv.transform.create_color_card_mask(rgb_img = img, radius = 10, start_coord = start, spacing = space, ncols = 4, nrows = 6) # write the spacing of the colour card to file as size marker with open(r'size_marker.csv', 'a') as f: writer = csv.writer(f) writer.writerow([filename, space[0]]) # define a bounding rectangle around the colour card x_cc,y_cc,w_cc,h_cc = cv2.boundingRect(img_mask) x_cc = int(round(x_cc - 0.3 * w_cc)) y_cc = int(round(y_cc - 0.3 * h_cc)) h_cc = int(round(h_cc * 1.6)) w_cc = int(round(w_cc * 1.6)) # crop out colour card start_point = (x_cc, y_cc) end_point = (x_cc+w_cc, y_cc+h_cc) colour = (0, 0, 0) thickness = -1 crop_img = cv2.rectangle(img, start_point, end_point, colour, thickness) ############### Fine segmentation ################ # Threshold A and B channels of the LAB colourspace and the Hue channel of the HSV colourspace l_thresh, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[70,0,0], upper_thresh=[255,255,255], channel='LAB') a_thresh, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[0,0,0], upper_thresh=[255,145,255], channel='LAB') b_thresh, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[0,0,123], upper_thresh=[255,255,255], channel='LAB') h_thresh_low, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[0,0,0], upper_thresh=[130,255,255], channel='HSV') h_thresh_high, _ = pcv.threshold.custom_range(img=crop_img, lower_thresh=[150,0,0], upper_thresh=[255,255,255], channel='HSV') h_thresh = pcv.logical_or(h_thresh_low, h_thresh_high) # Join the thresholded images to keep only consensus pixels ab = pcv.logical_and(b_thresh, a_thresh) lab = pcv.logical_and(l_thresh, ab) labh = pcv.logical_and(lab, h_thresh) # Fill small objects labh_clean = pcv.fill(labh, 200) # Dilate to close broken borders #labh_dilated = pcv.dilate(labh_clean, 4, 1) labh_dilated = labh_clean # Apply mask (for VIS images, mask_color=white) masked = pcv.apply_mask(crop_img, labh_dilated, "white") # Identify objects contours, hierarchy = pcv.find_objects(crop_img, labh_dilated) # Define ROI if rotated == 1: roi_height = 3000 roi_lwr_bound = y_cc + (h_cc * 0.5) - roi_height roi_contour, roi_hierarchy= pcv.roi.rectangle(x=1000, y=roi_lwr_bound, h=roi_height, w=2000, img=crop_img) else: roi_height = 1500 roi_lwr_bound = y_cc + (h_cc * 0.5) - roi_height roi_contour, roi_hierarchy= pcv.roi.rectangle(x=2000, y=roi_lwr_bound, h=roi_height, w=2000, img=crop_img) # Decide which objects to keep filtered_contours, filtered_hierarchy, mask, area = pcv.roi_objects(img = crop_img, roi_type = 'partial', roi_contour = roi_contour, roi_hierarchy = roi_hierarchy, object_contour = contours, obj_hierarchy = hierarchy) # Combine kept objects obj, mask = pcv.object_composition(crop_img, filtered_contours, filtered_hierarchy) ############### Analysis ################ outfile=False if args.writeimg==True: outfile_black=args.outdir+"/"+filename+"_black" outfile_white=args.outdir+"/"+filename+"_white" outfile_analysed=args.outdir+"/"+filename+"_analysed" # analyse shape shape_img = pcv.analyze_object(crop_img, obj, mask) pcv.print_image(shape_img, outfile_analysed) # analyse colour colour_img = pcv.analyze_color(crop_img, mask, 'hsv') # keep the segmented plant for visualisation picture_mask = pcv.apply_mask(crop_img, mask, "black") pcv.print_image(picture_mask, outfile_black) picture_mask = pcv.apply_mask(crop_img, mask, "white") pcv.print_image(picture_mask, outfile_white) # print out results pcv.outputs.save_results(filename=args.result, outformat="json")
def segmentation(imgW, imgNIR, shape): # VIS example from PlantCV with few modifications # Higher value = more strict selection s_threshold = 165 b_threshold = 200 # Read image img = imread(imgW) #img = cvtColor(img, COLOR_BGR2RGB) imgNIR = imread(imgNIR) #imgNIR = cvtColor(imgNIR, COLOR_BGR2RGB) #img, path, img_filename = pcv.readimage(filename=imgW, mode="native") #imgNIR, pathNIR, imgNIR_filename = pcv.readimage(filename=imgNIR, mode="native") # Convert RGB to HSV and extract the saturation channel s = pcv.rgb2gray_hsv(rgb_img=img, channel='s') # Threshold the saturation image s_thresh = pcv.threshold.binary(gray_img=s, threshold=s_threshold, max_value=255, object_type='light') # Median Blur s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5) s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5) # Convert RGB to LAB and extract the Blue channel b = pcv.rgb2gray_lab(rgb_img=img, channel='b') # Threshold the blue image ORIGINAL 160 b_thresh = pcv.threshold.binary(gray_img=b, threshold=b_threshold, max_value=255, object_type='light') b_cnt = pcv.threshold.binary(gray_img=b, threshold=b_threshold, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt) # Apply Mask (for VIS images, mask_color=white) masked = pcv.apply_mask(img=img, mask=bs, mask_color='white') # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images # 115 # 135 # 128 maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') # Join the thresholded saturation and blue-yellow images (OR) ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects ab_fill = pcv.fill(bin_img=ab, size=200) # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white') # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill) # Define ROI height = shape[0] width = shape[1] roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=0, y=0, h=height, w=width) # Decide which objects to keep roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') # Object combine kept objects obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) # Filling holes in the mask, works great for alive plants, not so good for dead plants filled_mask = pcv.fill_holes(mask) final = pcv.apply_mask(img=imgNIR, mask=mask, mask_color='white') pcv.print_image(final, "./segment/segment-temp.png")
# img.show() shows image # inputs are left top right than bottom img_crop = img.crop((1875, 730, 5680, 3260)) # img_crop.show() shows cropped image img_crop.save("Cropped_plate.png") img_crop = cv2.imread("Cropped_plate.png") # reads in the saved img filter_image = pcv.rgb2gray_lab( img_crop, 'b') # filters out colors to gray scale the image Threshold = cv2.adaptiveThreshold(filter_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 241, -1) # Thresholds based on a 241 block size cv2.imwrite("Threshold.png", Threshold) # Saves threshold Threshold = pcv.fill(Threshold, 400) # removes small white spots left by threshold cv2.imwrite("Final_threshold.png", Threshold) # saves threshold with fill changes # now that we have the threshold we need to crop the clusters out so we can get data from each cluster of 4 cells dire = os.getcwd() path = dire + '/photo_dump' try: os.makedirs(path) # so all the pics don't flood our main dir except OSError: pass img = Image.open("Cropped_plate.png") sizeX, sizeY = img.size # finds how big the image is in the x and y directions sizeX = round(sizeX / 12) # 12 cols sizeY = round(sizeY / 8) # 8 rows
def test(true_positive_file, test_parameters): hue_lower_tresh = test_parameters[0] hue_higher_tresh = test_parameters[1] saturation_lower_tresh = test_parameters[2] saturation_higher_tresh = test_parameters[3] value_lower_tresh = test_parameters[4] value_higher_tresh = test_parameters[5] green_lower_tresh = test_parameters[6] green_higher_tresh = test_parameters[7] red_lower_tresh = test_parameters[8] red_higher_thresh = test_parameters[9] blue_lower_tresh = test_parameters[10] blue_higher_tresh = test_parameters[11] blur_k = test_parameters[12] fill_k = test_parameters[13] class args: #image = "C:\\Users\\RensD\\OneDrive\\studie\\Master\\The_big_project\\top_perspective\\0214_2018-03-07 08.55 - 26_cam9.png" image = true_positive_file outdir = "C:\\Users\\RensD\\OneDrive\\studie\\Master\\The_big_project\\top_perspective\\output" debug = debug_setting result = "results.txt" # Get options pcv.params.debug=args.debug #set debug mode pcv.params.debug_outdir=args.outdir #set output directory # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb') # Inputs: # filename - Image file to be read in # mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv' img, path, filename = pcv.readimage(filename=args.image, mode='rgb') s = pcv.rgb2gray_hsv(rgb_img=img, channel='h') mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[hue_lower_tresh], upper_thresh=[hue_higher_tresh], channel='gray') masked = pcv.apply_mask(rgb_img=img, mask = mask, mask_color = 'white') #print("filtered on hue") s = pcv.rgb2gray_hsv(rgb_img=masked, channel='s') mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[saturation_lower_tresh], upper_thresh=[saturation_higher_tresh], channel='gray') masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white') #print("filtered on saturation") s = pcv.rgb2gray_hsv(rgb_img=masked, channel='v') mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[value_lower_tresh], upper_thresh=[value_higher_tresh], channel='gray') masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white') #print("filtered on value") mask, masked = pcv.threshold.custom_range(rgb_img=masked, lower_thresh=[0,green_lower_tresh,0], upper_thresh=[255,green_higher_tresh,255], channel='RGB') masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white') #print("filtered on green") mask, masked = pcv.threshold.custom_range(rgb_img=masked, lower_thresh=[red_lower_tresh,0,0], upper_thresh=[red_higher_thresh,255,255], channel='RGB') masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white') #print("filtered on red") mask_old, masked_old = pcv.threshold.custom_range(rgb_img=masked, lower_thresh=[0,0,blue_lower_tresh], upper_thresh=[255,255,blue_higher_tresh], channel='RGB') masked = pcv.apply_mask(rgb_img=masked_old, mask = mask_old, mask_color = 'white') #print("filtered on blue") ###____________________________________ Blur to minimize try: s_mblur = pcv.median_blur(gray_img = masked_old, ksize = blur_k) s = pcv.rgb2gray_hsv(rgb_img=s_mblur, channel='v') mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[0], upper_thresh=[254], channel='gray') except: print("failed blur step") try: mask = pcv.fill(mask, fill_k) except: pass masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white') ###_____________________________________ Now to identify objects masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a') masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b') # Threshold the green-magenta and blue images maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark') maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light') maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light') ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh) ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1) # Fill small objects # Inputs: # bin_img - Binary image data # size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled ab_fill = pcv.fill(bin_img=ab, size=200) #print("filled") # Apply mask (for VIS images, mask_color=white) masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white') id_objects, obj_hierarchy = pcv.find_objects(masked, ab_fill) # Let's just take the largest roi1, roi_hierarchy= pcv.roi.rectangle(img=masked, x=0, y=0, h=960, w=1280) # Currently hardcoded with HiddenPrints(): roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type=roi_type) obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3) if use_mask == True: return(mask) else: masked2 = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white') return(masked2)
def main(): # Create input arguments object args = options() # Set debug mode pcv.params.debug = args.debug # Open a single image img, imgpath, imgname = pcv.readimage(filename=args.image) # Visualize colorspaces all_cs = pcv.visualize.colorspaces(rgb_img=img) # Extract the Blue-Yellow ("b") channel from the LAB colorspace gray_img = pcv.rgb2gray_lab(rgb_img=img, channel="b") # Plot a histogram of pixel values for the Blue-Yellow ("b") channel. hist_plot = pcv.visualize.histogram(gray_img=gray_img) # Apply a binary threshold to the Blue-Yellow ("b") grayscale image. thresh_img = pcv.threshold.binary(gray_img=gray_img, threshold=140, max_value=255, object_type="light") # Apply a dilation with a 5x5 kernel and 3 iterations dil_img = pcv.dilate(gray_img=thresh_img, ksize=5, i=3) # Fill in small holes in the leaves closed_img = pcv.fill_holes(bin_img=dil_img) # Erode the plant pixels using a 5x5 kernel and 3 iterations er_img = pcv.erode(gray_img=closed_img, ksize=5, i=3) # Apply a Gaussian blur with a 5 x 5 kernel. blur_img = pcv.gaussian_blur(img=er_img, ksize=(5, 5)) # Set pixel values less than 255 to 0 blur_img[np.where(blur_img < 255)] = 0 # Fill/remove objects less than 300 pixels in area cleaned = pcv.fill(bin_img=blur_img, size=300) # Create a circular ROI roi, roi_str = pcv.roi.circle(img=img, x=1725, y=1155, r=400) # Identify objects in the binary image cnts, cnts_str = pcv.find_objects(img=img, mask=cleaned) # Filter objects by region of interest plant_cnt, plant_str, plant_mask, plant_area = pcv.roi_objects( img=img, roi_contour=roi, roi_hierarchy=roi_str, object_contour=cnts, obj_hierarchy=cnts_str) # Combine objects into one plant, mask = pcv.object_composition(img=img, contours=plant_cnt, hierarchy=plant_str) # Measure size and shape properties shape_img = pcv.analyze_object(img=img, obj=plant, mask=mask) if args.writeimg: pcv.print_image(img=shape_img, filename=os.path.join(args.outdir, "shapes_" + imgname)) # Analyze color properties color_img = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="hsv") if args.writeimg: pcv.print_image(img=color_img, filename=os.path.join(args.outdir, "histogram_" + imgname)) # Save the measurements to a file pcv.print_results(filename=args.result)
def initcrop(imagePath): dire = dir path = dire + '/Classifyer_dump' try: os.makedirs(path) except OSError: pass image = cv2.imread(imagePath) blue_image = pcv.rgb2gray_lab(image, 'l') Gaussian_blue = cv2.adaptiveThreshold(blue_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 981, -1) # 241 is good 981 cv2.imwrite(os.path.join(path, "blue_test.png"), Gaussian_blue) fill = pcv.fill_holes(Gaussian_blue) fill_again = pcv.fill(fill, 100000) id_objects, obj_hierarchy = pcv.find_objects( img=image, mask=fill_again) # lazy way to findContours and draw them roi1, roi_hierarchy = pcv.roi.rectangle(img=image, x=3000, y=1000, h=200, w=300) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=image, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') cv2.imwrite(os.path.join(path, "plate_mask.png"), kept_mask) mask = cv2.imread(os.path.join(path, "plate_mask.png")) result = image * (mask.astype(image.dtype)) result = cv2.bitwise_not(result) cv2.imwrite(os.path.join(path, "AutoCrop.png"), result) output = cv2.connectedComponentsWithStats(kept_mask, connectivity=8) stats = output[2] left = (stats[1, cv2.CC_STAT_LEFT]) # print(stats[1, cv2.CC_STAT_TOP]) # print(stats[1, cv2.CC_STAT_HEIGHT]) # exit(2) L, a, b = cv2.split(result) # cv2.imwrite("gray_scale.png", L) plate_threshold = cv2.adaptiveThreshold(b, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 87, -1) # 867 is good 241 cv2.imwrite(os.path.join(path, "plate_threshold.png"), plate_threshold) fill_again2 = pcv.fill(plate_threshold, 1000) cv2.imwrite(os.path.join(path, "fill_test.png"), fill_again2) # fill = pcv.fill_holes(fill_again2) # cv2.imwrite(os.path.join(path, "fill_test2.png"), fill) blur_image = pcv.median_blur(fill_again2, 10) nb_components, output, stats, centroids = cv2.connectedComponentsWithStats( blur_image, connectivity=8) sizes = stats[1:, -1] nb_components = nb_components - 1 min_size = 20000 img2 = np.zeros((output.shape)) for i in range(0, nb_components): if sizes[i] <= min_size: img2[output == i + 1] = 255 cv2.imwrite(os.path.join(path, "remove_20000.png"), img2) # this can be made better to speed it up thresh_image = img2.astype( np.uint8) # maybe crop to the roi below then do it thresh_image = pcv.fill_holes(thresh_image) cv2.imwrite("NEWTEST.jpg", thresh_image) id_objects, obj_hierarchy = pcv.find_objects(img=image, mask=thresh_image) roi1, roi_hierarchy = pcv.roi.rectangle(img=image, x=(left + 380), y=750, h=175, w=100) try: where_cell = 0 roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=image, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') cv2.imwrite(os.path.join(path, "test_mask.png"), kept_mask) mask = cv2.imread(os.path.join(path, "test_mask.png")) result = image * (mask.astype(image.dtype)) result = cv2.bitwise_not(result) cv2.imwrite(os.path.join(path, "TEST.png"), result) output = cv2.connectedComponentsWithStats(kept_mask, connectivity=8) stats = output[2] centroids = output[3] centroids_x = (int(centroids[1][0])) centroids_y = (int(centroids[1][1])) except: where_cell = 1 print("did this work?") roi1, roi_hierarchy = pcv.roi.rectangle(img=image, x=(left + 380), y=3200, h=100, w=100) roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects( img=image, roi_contour=roi1, roi_hierarchy=roi_hierarchy, object_contour=id_objects, obj_hierarchy=obj_hierarchy, roi_type='partial') cv2.imwrite(os.path.join(path, "test_mask.png"), kept_mask) mask = cv2.imread(os.path.join(path, "test_mask.png")) result = image * (mask.astype(image.dtype)) result = cv2.bitwise_not(result) cv2.imwrite(os.path.join(path, "TEST.png"), result) output = cv2.connectedComponentsWithStats(kept_mask, connectivity=8) stats = output[2] centroids = output[3] centroids_x = (int(centroids[1][0])) centroids_y = (int(centroids[1][1])) flag = 0 # print(stats[1, cv2.CC_STAT_AREA]) if ((stats[1, cv2.CC_STAT_AREA]) > 4000): flag = 30 # print(centroids_x) # print(centroids_y) # print(centroids) if (where_cell == 0): left = (centroids_x - 70) right = (centroids_x + 3695 + flag) # was 3715 top = (centroids_y - 80) bottom = (centroids_y + 2462) if (where_cell == 1): left = (centroids_x - 70) right = (centroids_x + 3715 + flag) top = (centroids_y - 2480) bottom = (centroids_y + 62) # print(top) # print(bottom) image = Image.open(imagePath) img_crop = image.crop((left, top, right, bottom)) # img_crop.show() img_crop.save(os.path.join(path, 'Cropped_full_yeast.png')) circle_me = cv2.imread(os.path.join(path, "Cropped_full_yeast.png")) cropped_img = cv2.imread( os.path.join(path, "Cropped_full_yeast.png" )) # changed from Yeast_Cluster.%d.png %counter L, a, b = cv2.split(cropped_img) # can do l a or b Gaussian_blue = cv2.adaptiveThreshold(b, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 241, -1) # For liz's pictures 241 cv2.imwrite(os.path.join(path, "blue_test.png"), Gaussian_blue) blur_image = pcv.median_blur(Gaussian_blue, 10) heavy_fill_blue = pcv.fill(blur_image, 1000) # value 400 hole_fill = pcv.fill_holes(heavy_fill_blue) cv2.imwrite(os.path.join(path, "Cropped_Threshold.png"), hole_fill)
def main(): args = options() os.chdir(args.outdir) # Read RGB image img, path, filename = pcv.readimage(args.image, mode="native") # Get metadata from file name geno_name = filename.split("}{") geno_name = geno_name[5] geno_name = geno_name.split("_") geno_name = geno_name[1] day = filename.split("}{") day = day[7] day = day.split("_") day = day[1] day = day.split("}") day = day[0] plot = filename.split("}{") plot = plot[0] plot = plot.split("_") plot = plot[1] exp_name = filename.split("}{") exp_name = exp_name[1] exp_name = exp_name.split("_") exp_name = exp_name[1] treat_name = filename.split("}{") treat_name = treat_name[6] # Create masks using Naive Bayes Classifier and PDFs file masks = pcv.naive_bayes_classifier(img, args.pdfs) # The following code will identify the racks in the image, find the top edge, and choose a line along the edge to pick a y coordinate to trim any soil/pot pixels identified as plant material. # Convert RGB to HSV and extract the Value channel v = pcv.rgb2gray_hsv(img, 'v') # Threshold the Value image v_thresh = pcv.threshold.binary(v, 98, 255, 'light') # Dilate mask to fill holes dilate_racks = pcv.dilate(v_thresh, 2, 1) # Fill in small objects mask = np.copy(dilate_racks) fill_racks = pcv.fill(mask, 100000) #edge detection edges = cv2.Canny(fill_racks, 60, 180) #write all the straight lines from edge detection lines = cv2.HoughLinesP(edges, rho=1, theta=1 * np.pi / 180, threshold=150, minLineLength=50, maxLineGap=15) N = lines.shape[0] for i in range(N): x1 = lines[i][0][0] y1 = lines[i][0][1] x2 = lines[i][0][2] y2 = lines[i][0][3] cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 2) # keep only horizontal lines N = lines.shape[0] tokeep = [] for i in range(N): want = (abs(lines[i][0][1] - lines[i][0][3])) <= 10 tokeep.append(want) lines = lines[tokeep] # keep only lines in lower half of image N = lines.shape[0] tokeep = [] for i in range(N): want = 3100 > lines[i][0][1] > 2300 tokeep.append(want) lines = lines[tokeep] # assign lines to positions around plants N = lines.shape[0] tokeep = [] left = [] mid = [] right = [] for i in range(N): leftones = lines[i][0][2] <= 2000 left.append(leftones) midones = 3000 > lines[i][0][2] > 2000 mid.append(midones) rightones = lines[i][0][0] >= 3300 right.append(rightones) right = lines[right] left = lines[left] mid = lines[mid] # choose y values for right left mid adding some pixels to go about the pot (subtract because of orientation of axis) y_left = left[0][0][3] - 50 y_mid = mid[0][0][3] - 50 y_right = right[0][0][3] - 50 # reload original image to write new lines on img, path, filename = pcv.readimage(args.image) # write horizontal lines on image cv2.line(img, (left[0][0][0], left[0][0][1]), (left[0][0][2], left[0][0][3]), (255, 255, 51), 2) cv2.line(img, (mid[0][0][0], mid[0][0][1]), (mid[0][0][2], mid[0][0][3]), (255, 255, 51), 2) cv2.line(img, (right[0][0][0], right[0][0][1]), (right[0][0][2], right[0][0][3]), (255, 255, 51), 2) # Add masks together added = masks["healthy"] + masks["necrosis"] + masks["stem"] # Dilate mask to fill holes dilate_img = pcv.dilate(added, 2, 1) # Fill in small objects mask = np.copy(dilate_img) fill_img = pcv.fill(mask, 400) ret, inverted = cv2.threshold(fill_img, 75, 255, cv2.THRESH_BINARY_INV) # Dilate mask to fill holes of plant dilate_inv = pcv.dilate(inverted, 2, 1) # Fill in small objects of plant mask2 = np.copy(dilate_inv) fill_plant = pcv.fill(mask2, 20) inverted_img = pcv.invert(fill_plant) # Identify objects id_objects, obj_hierarchy = pcv.find_objects(img, inverted_img) # Define ROIs roi_left, roi_hierarchy_left = pcv.roi.rectangle(280, 1280, 1275, 1200, img) roi_mid, roi_hierarchy_mid = pcv.roi.rectangle(1900, 1280, 1275, 1200, img) roi_right, roi_hierarchy_right = pcv.roi.rectangle(3600, 1280, 1275, 1200, img) # Decide which objects to keep roi_objects_left, roi_obj_hierarchy_left, kept_mask_left, obj_area_left = pcv.roi_objects( img, 'partial', roi_left, roi_hierarchy_left, id_objects, obj_hierarchy) roi_objects_mid, roi_obj_hierarchy_mid, kept_mask_mid, obj_area_mid = pcv.roi_objects( img, 'partial', roi_mid, roi_hierarchy_mid, id_objects, obj_hierarchy) roi_objects_right, roi_obj_hierarchy_right, kept_mask_right, obj_area_right = pcv.roi_objects( img, 'partial', roi_right, roi_hierarchy_right, id_objects, obj_hierarchy) # Combine objects obj_r, mask_r = pcv.object_composition(img, roi_objects_right, roi_obj_hierarchy_right) obj_m, mask_m = pcv.object_composition(img, roi_objects_mid, roi_obj_hierarchy_mid) obj_l, mask_l = pcv.object_composition(img, roi_objects_left, roi_obj_hierarchy_left) def analyze_bound_horizontal2(img, obj, mask, line_position, filename=False): ori_img = np.copy(img) # Draw line horizontal line through bottom of image, that is adjusted to user input height if len(np.shape(ori_img)) == 3: iy, ix, iz = np.shape(ori_img) else: iy, ix = np.shape(ori_img) size = (iy, ix) size1 = (iy, ix, 3) background = np.zeros(size, dtype=np.uint8) wback = np.zeros(size1, dtype=np.uint8) x_coor = int(ix) y_coor = int(iy) - int(line_position) rec_corner = int(iy - 2) rec_point1 = (1, rec_corner) rec_point2 = (x_coor - 2, y_coor - 2) cv2.rectangle(background, rec_point1, rec_point2, (255), 1) below_contour, below_hierarchy = cv2.findContours( background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:] below = [] above = [] mask_nonzerox, mask_nonzeroy = np.nonzero(mask) obj_points = np.vstack((mask_nonzeroy, mask_nonzerox)) obj_points1 = np.transpose(obj_points) for i, c in enumerate(obj_points1): xy = tuple(c) pptest = cv2.pointPolygonTest(below_contour[0], xy, measureDist=False) if pptest == 1: below.append(xy) cv2.circle(ori_img, xy, 1, (0, 0, 255)) cv2.circle(wback, xy, 1, (0, 0, 0)) else: above.append(xy) cv2.circle(ori_img, xy, 1, (0, 255, 0)) cv2.circle(wback, xy, 1, (255, 255, 255)) return wback ori_img = np.copy(img) # Draw line horizontal line through bottom of image, that is adjusted to user input height if len(np.shape(ori_img)) == 3: iy, ix, iz = np.shape(ori_img) else: iy, ix = np.shape(ori_img) if obj_r is not None: wback_r = analyze_bound_horizontal2(img, obj_r, mask_r, iy - y_right) if obj_m is not None: wback_m = analyze_bound_horizontal2(img, obj_m, mask_m, iy - y_mid) if obj_l is not None: wback_l = analyze_bound_horizontal2(img, obj_l, mask_l, iy - y_left) threshold_light = pcv.threshold.binary(img, 1, 1, 'dark') if obj_r is not None: fgmask_r = pcv.background_subtraction(wback_r, threshold_light) if obj_m is not None: fgmask_m = pcv.background_subtraction(wback_m, threshold_light) if obj_l is not None: fgmask_l = pcv.background_subtraction(wback_l, threshold_light) if obj_l is not None: id_objects_left, obj_hierarchy_left = pcv.find_objects(img, fgmask_l) if obj_m is not None: id_objects_mid, obj_hierarchy_mid = pcv.find_objects(img, fgmask_m) if obj_r is not None: id_objects_right, obj_hierarchy_right = pcv.find_objects(img, fgmask_r) # Combine objects if obj_r is not None: obj_r2, mask_r2 = pcv.object_composition(img, id_objects_right, obj_hierarchy_right) if obj_m is not None: obj_m2, mask_m2 = pcv.object_composition(img, id_objects_mid, obj_hierarchy_mid) if obj_l is not None: obj_l2, mask_l2 = pcv.object_composition(img, id_objects_left, obj_hierarchy_left) # Shape measurements if obj_l is not None: shape_header_left, shape_data_left, shape_img_left = pcv.analyze_object( img, obj_l2, fgmask_l, geno_name + '_' + plot + '_' + 'A' + '_' + day + '_' + 'shape.jpg') if obj_r is not None: shape_header_right, shape_data_right, shape_img_right = pcv.analyze_object( img, obj_r2, fgmask_r, geno_name + '_' + plot + '_' + 'C' + '_' + day + '_' + 'shape.jpg') if obj_m is not None: shape_header_mid, shape_data_mid, shape_img_mid = pcv.analyze_object( img, obj_m2, fgmask_m, geno_name + '_' + plot + '_' + 'B' + '_' + day + '_' + 'shape.jpg') # Color data if obj_r is not None: color_header_right, color_data_right, norm_slice_right = pcv.analyze_color( img, fgmask_r, 256, None, 'v', 'img', geno_name + '_' + plot + '_' + 'C' + '_' + day + '_' + 'color.jpg') if obj_m is not None: color_header_mid, color_data_mid, norm_slice_mid = pcv.analyze_color( img, fgmask_m, 256, None, 'v', 'img', geno_name + '_' + plot + '_' + 'B' + '_' + day + '_' + 'color.jpg') if obj_l is not None: color_header_left, color_data_left, norm_slice_left = pcv.analyze_color( img, fgmask_l, 256, None, 'v', 'img', geno_name + '_' + plot + '_' + 'A' + '_' + day + '_' + 'color.jpg') new_header = [ 'experiment', 'day', 'genotype', 'treatment', 'plot', 'plant', 'percent.necrosis', 'area', 'hull-area', 'solidity', 'perimeter', 'width', 'height', 'longest_axis', 'center-of-mass-x', 'center-of-mass-y', 'hull_vertices', 'in_bounds', 'ellipse_center_x', 'ellipse_center_y', 'ellipse_major_axis', 'ellipse_minor_axis', 'ellipse_angle', 'ellipse_eccentricity', 'bin-number', 'bin-values', 'blue', 'green', 'red', 'lightness', 'green-magenta', 'blue-yellow', 'hue', 'saturation', 'value' ] table = [] table.append(new_header) added2 = masks["healthy"] + masks["stem"] # Object combine kept objects if obj_l is not None: masked_image_healthy_left = pcv.apply_mask(added2, fgmask_l, 'black') masked_image_necrosis_left = pcv.apply_mask(masks["necrosis"], fgmask_l, 'black') added_obj_left = masked_image_healthy_left + masked_image_necrosis_left sample = "A" # Calculations necrosis_left = np.sum(masked_image_necrosis_left) necrosis_percent_left = float(necrosis_left) / np.sum(added_obj_left) table.append([ exp_name, day, geno_name, treat_name, plot, sample, round(necrosis_percent_left, 5), shape_data_left[1], shape_data_left[2], shape_data_left[3], shape_data_left[4], shape_data_left[5], shape_data_left[6], shape_data_left[7], shape_data_left[8], shape_data_left[9], shape_data_left[10], shape_data_left[11], shape_data_left[12], shape_data_left[13], shape_data_left[14], shape_data_left[15], shape_data_left[16], shape_data_left[17], '"{}"'.format(color_data_left[1]), '"{}"'.format(color_data_left[2]), '"{}"'.format( color_data_left[3]), '"{}"'.format(color_data_left[4]), '"{}"'.format(color_data_left[5]), '"{}"'.format( color_data_left[6]), '"{}"'.format(color_data_left[7]), '"{}"'.format(color_data_left[8]), '"{}"'.format( color_data_left[9]), '"{}"'.format(color_data_left[10]), '"{}"'.format(color_data_left[11]) ]) # Object combine kept objects if obj_m is not None: masked_image_healthy_mid = pcv.apply_mask(added2, fgmask_m, 'black') masked_image_necrosis_mid = pcv.apply_mask(masks["necrosis"], fgmask_m, 'black') added_obj_mid = masked_image_healthy_mid + masked_image_necrosis_mid sample = "B" # Calculations necrosis_mid = np.sum(masked_image_necrosis_mid) necrosis_percent_mid = float(necrosis_mid) / np.sum(added_obj_mid) table.append([ exp_name, day, geno_name, treat_name, plot, sample, round(necrosis_percent_mid, 5), shape_data_mid[1], shape_data_mid[2], shape_data_mid[3], shape_data_mid[4], shape_data_mid[5], shape_data_mid[6], shape_data_mid[7], shape_data_mid[8], shape_data_mid[9], shape_data_mid[10], shape_data_mid[11], shape_data_mid[12], shape_data_mid[13], shape_data_mid[14], shape_data_mid[15], shape_data_mid[16], shape_data_mid[17], '"{}"'.format(color_data_mid[1]), '"{}"'.format(color_data_mid[2]), '"{}"'.format(color_data_mid[3]), '"{}"'.format(color_data_mid[4]), '"{}"'.format(color_data_mid[5]), '"{}"'.format(color_data_mid[6]), '"{}"'.format(color_data_mid[7]), '"{}"'.format(color_data_mid[8]), '"{}"'.format(color_data_mid[9]), '"{}"'.format( color_data_mid[10]), '"{}"'.format(color_data_mid[11]) ]) # Object combine kept objects if obj_r is not None: masked_image_healthy_right = pcv.apply_mask(added2, fgmask_r, 'black') masked_image_necrosis_right = pcv.apply_mask(masks["necrosis"], fgmask_r, 'black') added_obj_right = masked_image_healthy_right + masked_image_necrosis_right sample = "C" # Calculations necrosis_right = np.sum(masked_image_necrosis_right) necrosis_percent_right = float(necrosis_right) / np.sum( added_obj_right) table.append([ exp_name, day, geno_name, treat_name, plot, sample, round(necrosis_percent_right, 5), shape_data_right[1], shape_data_right[2], shape_data_right[3], shape_data_right[4], shape_data_right[5], shape_data_right[6], shape_data_right[7], shape_data_right[8], shape_data_right[9], shape_data_right[10], shape_data_right[11], shape_data_right[12], shape_data_right[13], shape_data_right[14], shape_data_right[15], shape_data_right[16], shape_data_right[17], '"{}"'.format(color_data_right[1]), '"{}"'.format(color_data_right[2]), '"{}"'.format( color_data_right[3]), '"{}"'.format(color_data_right[4]), '"{}"'.format(color_data_right[5]), '"{}"'.format( color_data_right[6]), '"{}"'.format(color_data_right[7]), '"{}"'.format(color_data_right[8]), '"{}"'.format( color_data_right[9]), '"{}"'.format(color_data_right[10]), '"{}"'.format(color_data_right[11]) ]) if obj_l is not None: merged2 = cv2.merge([ masked_image_healthy_left, np.zeros(np.shape(masks["healthy"]), dtype=np.uint8), masked_image_necrosis_left ]) #blue, green, red pcv.print_image( merged2, geno_name + '_' + plot + '_' + 'A' + '_' + day + '_' + 'merged.jpg') if obj_m is not None: merged3 = cv2.merge([ masked_image_healthy_mid, np.zeros(np.shape(masks["healthy"]), dtype=np.uint8), masked_image_necrosis_mid ]) #blue, green, red pcv.print_image( merged3, geno_name + '_' + plot + '_' + 'B' + '_' + day + '_' + 'merged.jpg') if obj_r is not None: merged4 = cv2.merge([ masked_image_healthy_right, np.zeros(np.shape(masks["healthy"]), dtype=np.uint8), masked_image_necrosis_right ]) #blue, green, red pcv.print_image( merged4, geno_name + '_' + plot + '_' + 'C' + '_' + day + '_' + 'merged.jpg') # Save area results to file (individual csv files for one image...) file_name = filename.split("}{") file_name = file_name[0] + "}{" + file_name[5] + "}{" + file_name[7] outfile = str(file_name[:-4]) + 'csv' with open(outfile, 'w') as f: for row in table: f.write(','.join(map(str, row)) + '\n') print(filename)
def main(): # Get options args = options() debug = args.debug # Read image img, path, filename = pcv.readimage(args.image) # Pipeline step device = 0 device, img1 = pcv.white_balance(device, img, debug, (100, 100, 1000, 1000)) img = img1 #seedmask, path1, filename1 = pcv.readimage(args.mask) #device, seedmask = pcv.rgb2gray(seedmask, device, debug) #device, inverted = pcv.invert(seedmask, device, debug) #device, masked_img = pcv.apply_mask(img, inverted, 'white', device, debug) device, img_gray_sat = pcv.rgb2gray_hsv(img1, 's', device, debug) device, img_binary = pcv.binary_threshold(img_gray_sat, 70, 255, 'light', device, debug) img_binary1 = np.copy(img_binary) device, fill_image = pcv.fill(img_binary1, img_binary, 300, device, debug) device, seed_objects, seed_hierarchy = pcv.find_objects( img, fill_image, device, debug) device, roi1, roi_hierarchy1 = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 1500, 1000, -1000, -500) device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects( img, 'partial', roi1, roi_hierarchy1, seed_objects, seed_hierarchy, device, debug) img_copy = np.copy(img) for i in range(0, len(roi_objects)): rand_color = pcv.color_palette(1) cv2.drawContours(img_copy, roi_objects, i, rand_color[0], -1, lineType=8, hierarchy=roi_obj_hierarchy) pcv.print_image( img_copy, os.path.join(args.outdir, filename[:-4]) + "-seed-confetti.jpg") shape_header = [] # Store the table header table = [] # Store the PlantCV measurements for each seed in a table for i in range(0, len(roi_objects)): if roi_obj_hierarchy[0][i][ 3] == -1: # Only continue if the object is an outermost contour # Object combine kept objects # Inputs: # contours = object list # device = device number. Used to count steps in the pipeline # debug = None, print, or plot. Print = save to file, Plot = print to screen. device, obj, mask = pcv.object_composition( img, [roi_objects[i]], np.array([[roi_obj_hierarchy[0][i]]]), device, None) if obj is not None: # Measure the area and other shape properties of each seed # Inputs: # img = image object (most likely the original), color(RGB) # imgname = name of image # obj = single or grouped contour object # device = device number. Used to count steps in the pipeline # debug = None, print, or plot. Print = save to file, Plot = print to screen. # filename = False or image name. If defined print image device, shape_header, shape_data, shape_img = pcv.analyze_object( img, "img", obj, mask, device, None) if shape_data is not None: table.append(shape_data[1]) data_array = np.array(table) maxval = np.argmax(data_array) maxseed = np.copy(img) cv2.drawContours(maxseed, roi_objects, maxval, (0, 255, 0), 10) imgtext = "This image has " + str(len(data_array)) + " seeds" sizeseed = "The largest seed is in green and is " + str( data_array[maxval]) + " pixels" cv2.putText(maxseed, imgtext, (500, 300), cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 0), 10) cv2.putText(maxseed, sizeseed, (500, 600), cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 0), 10) pcv.print_image(maxseed, os.path.join(args.outdir, filename[:-4]) + "-maxseed.jpg")