Example #1
0
def test_plantcv_erode():
    img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
    device, erode_img = pcv.erode(img=img, kernel=5, i=1, device=0, debug=None)
    # Assert that the output image has the dimensions of the input image
    if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
        # Assert that the image is binary
        if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
            assert 1
        else:
            assert 0
    else:
        assert 0
Example #2
0
def test_plantcv_erode():
    img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
    device, erode_img = pcv.erode(img=img, kernel=5, i=1, device=0, debug=None)
    # Assert that the output image has the dimensions of the input image
    if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
        # Assert that the image is binary
        if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
            assert 1
        else:
            assert 0
    else:
        assert 0
Example #3
0
def main():

    # Get options
    args = options()
    if args.debug:
        print("Analyzing your image dude...")
    # Read image
    device = 0
    img = cv2.imread(args.image, flags=0)
    path, img_name = os.path.split(args.image)
    # Read in image which is average of average of backgrounds
    img_bkgrd = cv2.imread("bkgrd_ave_z2500.png", flags=0)

    # NIR images for burnin2 are up-side down. This may be fixed in later experiments
    img = ndimage.rotate(img, 180)
    img_bkgrd = ndimage.rotate(img_bkgrd, 180)

    # Subtract the image from the image background to make the plant more prominent
    device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug)
    if args.debug:
        pcv.plot_hist(bkg_sub_img, "bkg_sub_img")
    device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, "dark", device, args.debug)
    bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220)
    if args.debug:
        cv2.imwrite("bkgrd_sub_thres.png", bkg_sub_thres_img)

    # device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug)

    # if a region of interest is specified read it in
    roi = cv2.imread(args.roi)

    # Start by examining the distribution of pixel intensity values
    if args.debug:
        pcv.plot_hist(img, "hist_img")

    # Will intensity transformation enhance your ability to isolate object of interest by thesholding?
    device, he_img = pcv.HistEqualization(img, device, args.debug)
    if args.debug:
        pcv.plot_hist(he_img, "hist_img_he")

    # Laplace filtering (identify edges based on 2nd derivative)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    if args.debug:
        pcv.plot_hist(lp_img, "hist_lp")

    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    if args.debug:
        pcv.plot_hist(lp_shrp_img, "hist_lp_shrp")

    # Sobel filtering
    # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled)
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
    if args.debug:
        pcv.plot_hist(sbx_img, "hist_sbx")

    # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug)
    if args.debug:
        pcv.plot_hist(sby_img, "hist_sby")

    # Combine the effects of both x and y filters through matrix addition
    # This will capture edges identified within each plane and emphesize edges found in both images
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    if args.debug:
        pcv.plot_hist(sb_img, "hist_sb_comb_img")

    # Use a lowpass (blurring) filter to smooth sobel image
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)

    # combine the smoothed sobel image with the laplacian sharpened image
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug)
    if args.debug:
        pcv.plot_hist(edge_shrp_img, "hist_edge_shrp_img")

    # Perform thresholding to generate a binary image
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, "dark", device, args.debug)

    # Prepare a few small kernels for morphological filtering
    kern = np.zeros((3, 3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1, 1:3] = 1
    kern2 = np.copy(kern)
    kern2[1, 0:2] = 1
    kern3 = np.copy(kern)
    kern3[0:2, 1] = 1
    kern4 = np.copy(kern)
    kern4[1:3, 1] = 1

    # Prepare a larger kernel for dilation
    kern[1, 0:3] = 1
    kern[0:3, 1] = 1

    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug)
    device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug)
    device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug)
    device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug)

    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)

    # Perform dilation
    # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug)
    device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug)

    # Get masked image
    # The dilated image may contain some pixels which are not plant
    device, masked_erd = pcv.apply_mask(img, comb_img, "black", device, args.debug)
    # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug)

    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)
    # mask for the bottom of the image
    device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (120, 184), (215, 252), device, args.debug)
    # mask for the left side of the image
    device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1, 1), (85, 252), device, args.debug)
    # mask for the right side of the image
    device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240, 1), (318, 252), device, args.debug)
    # mask the edges
    device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1, 1), (318, 252), device, args.debug)

    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug)
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)

    # Make a ROI around the plant, include connected objects
    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)

    device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, "black", device, args.debug)

    device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (120, 75), (200, 184), device, args.debug)

    plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug
    )

    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, "black", device, args.debug)
    rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, "light", device, args.debug)
    mask3d = np.copy(mask)
    plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)

    ### Analysis ###
    device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(
        img, args.image, mask, 256, device, args.debug, args.outdir + "/" + img_name
    )
    device, shape_header, shape_data, ori_img = pcv.analyze_object(
        rgb, args.image, o, m, device, args.debug, args.outdir + "/" + img_name
    )

    pcv.print_results(args.image, hist_header, hist_data)
    pcv.print_results(args.image, shape_header, shape_data)
Example #4
0
def main():
   
    # Get options
    args = options()
    if args.debug:
      print("Analyzing your image dude...")
    
    # Read image
    device = 0
    img = cv2.imread(args.image, flags=0)
    path, img_name = os.path.split(args.image)
    # Read in image which is average of average of backgrounds
    img_bkgrd = cv2.imread("bkgrd_ave_z3500.png", flags=0)

    # NIR images for burnin2 are up-side down. This may be fixed in later experiments
    img =  ndimage.rotate(img, 180)
    img_bkgrd =  ndimage.rotate(img_bkgrd, 180)

    # Subtract the image from the image background to make the plant more prominent
    device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug)
    if args.debug:
        pcv.plot_hist(bkg_sub_img, 'bkg_sub_img')
    device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, 'dark', device, args.debug)
    bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220)
    if args.debug:
        cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img)

    #device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug)

    # if a region of interest is specified read it in
    roi = cv2.imread(args.roi)

    # Start by examining the distribution of pixel intensity values
    if args.debug:
      pcv.plot_hist(img, 'hist_img')
      
    # Will intensity transformation enhance your ability to isolate object of interest by thesholding?
    device, he_img = pcv.HistEqualization(img, device, args.debug)
    if args.debug:
      pcv.plot_hist(he_img, 'hist_img_he')
    
    # Laplace filtering (identify edges based on 2nd derivative)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(lp_img, 'hist_lp')
    
    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp')
      
    # Sobel filtering  
    # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled)
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(sbx_img, 'hist_sbx')
      
    # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(sby_img, 'hist_sby')
      
    # Combine the effects of both x and y filters through matrix addition
    # This will capture edges identified within each plane and emphesize edges found in both images
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(sb_img, 'hist_sb_comb_img')
    
    # Use a lowpass (blurring) filter to smooth sobel image
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)
    
    # combine the smoothed sobel image with the laplacian sharpened image
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img')
      
    # Perform thresholding to generate a binary image
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug)
    
    # Prepare a few small kernels for morphological filtering
    kern = np.zeros((3,3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1,1:3]=1
    kern2 = np.copy(kern)
    kern2[1,0:2]=1
    kern3 = np.copy(kern)
    kern3[0:2,1]=1
    kern4 = np.copy(kern)
    kern4[1:3,1]=1
    
    # Prepare a larger kernel for dilation
    kern[1,0:3]=1
    kern[0:3,1]=1
    
    
    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug)
    device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug)
    device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug)
    device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug)
    
    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)
    
    # Perform dilation
    # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug)
    device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug)
    
    # Get masked image
    # The dilated image may contain some pixels which are not plant
    device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug)
    # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug)
    
    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)

    # mask for the bottom of the image
    device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (100,210), (230,252), device, args.debug)
    # mask for the left side of the image
    device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1,1), (85,252), device, args.debug)
    # mask for the right side of the image
    device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240,1), (318,252), device, args.debug)
    # mask the edges
    device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug)
    
    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug)
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)
    

    # Make a ROI around the plant, include connected objects
    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
    device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug)
    device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (100,75), (220,208), device, args.debug)
    plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    
    device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug)
    
      
    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug)
    rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)

    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug)
    mask3d = np.copy(mask)
    plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)
    
    ### Analysis ###
    device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name)
    device, shape_header, shape_data, ori_img = pcv.analyze_object(rgb, args.image, o, m, device, args.debug, args.outdir + '/' + img_name)
    
    pcv.print_results(args.image, hist_header, hist_data)
    pcv.print_results(args.image, shape_header, shape_data)
def get_feature(img):
    print("step one")
    """
    Step one: Background forground substraction 
    """
    # Get options
    args = options()
    debug = args.debug
    # Read image
    filename = args.result
    # img, path, filename = pcv.readimage(args.image)
    # Pipeline step
    device = 0
    device, resize_img = pcv.resize(img, 0.4, 0.4, device, debug)
    # Classify the pixels as plant or background
    device, mask_img = pcv.naive_bayes_classifier(
        resize_img,
        pdf_file=
        "/home/matthijs/PycharmProjects/SMR1/src/vision/ML_background/Trained_models/model_3/naive_bayes_pdfs.txt",
        device=0,
        debug='print')

    # Median Filter
    device, blur = pcv.median_blur(mask_img.get('plant'), 5, device, debug)
    print("step two")
    """
    Step one: Identifiy the objects, extract and filter the objects
    """

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         blur,
                                                         device,
                                                         debug=None)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(resize_img,
                                                 'rectangle',
                                                 device,
                                                 roi=True,
                                                 roi_input='default',
                                                 debug=True,
                                                 adjust=True,
                                                 x_adj=50,
                                                 y_adj=10,
                                                 w_adj=-100,
                                                 h_adj=0)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img, 'cutto', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)
    # print(roi_objects[0])
    # cv2.drawContours(resize_img, [roi_objects[0]], 0, (0, 255, 0), 3)
    # cv2.imshow("img",resize_img)
    # cv2.waitKey(0)
    area_oud = 0
    i = 0
    index = 0
    object_list = []
    # a = np.array([[hierarchy3[0][0]]])
    hierarchy = []
    for cnt in roi_objects:
        area = cv2.contourArea(cnt)
        M = cv2.moments(cnt)
        if M["m10"] or M["m01"]:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # check if the location of the contour is between the constrains
            if cX > 200 and cX < 500 and cY > 25 and cY < 400:
                # cv2.circle(resize_img, (cX, cY), 5, (255, 0, 255), thickness=1, lineType=1, shift=0)
                # check if the size of the contour is bigger than 250
                if area > 450:
                    obj = np.vstack(roi_objects)
                    object_list.append(roi_objects[i])
                    hierarchy.append(hierarchy3[0][i])
                    print(i)
        i = i + 1
    a = np.array([hierarchy])
    # a = [[[-1,-1,-1,-1][-1,-1,-1,-1][-1,-1,-1,-1]]]
    # Object combine kept objects
    # device, obj, mask_2 = pcv.object_composition(resize_img, object_list, a, device, debug)

    mask_contours = np.zeros(resize_img.shape, np.uint8)
    cv2.drawContours(mask_contours, object_list, -1, (255, 255, 255), -1)
    gray_image = cv2.cvtColor(mask_contours, cv2.COLOR_BGR2GRAY)
    ret, mask_contours = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         mask_contours,
                                                         device,
                                                         debug=None)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img,
        'cutto',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=None)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(resize_img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=None)
    ############### Analysis ################
    masked = mask.copy()

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    print("step three")
    """
    Step three: Calculate all the features
    """
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        resize_img, args.image, obj, mask, device, debug, filename="/file")
    print(shape_img)
    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        resize_img, args.image, obj, mask, 1680, device)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        resize_img, args.image, kept_mask, 256, device, debug, 'all', 'v',
        'img', 300)
    maks_watershed = mask.copy()
    kernel = np.zeros((5, 5), dtype=np.uint8)
    device, mask_watershed, = pcv.erode(maks_watershed, 5, 1, device, debug)

    device, watershed_header, watershed_data, analysis_images = pcv.watershed_segmentation(
        device, resize_img, mask, 50, './examples', debug)
    device, list_of_acute_points = pcv.acute_vertex(obj, 30, 60, 10,
                                                    resize_img, device, debug)

    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(
        obj, mask, list_of_acute_points, 225, device, debug)

    # Identify acute vertices (tip points) of an object
    # Results in set of point values that may indicate tip points
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.landmark_reference_pt_dist(
        points_rescaled, centroid_rescaled, bottomline_rescaled, device, debug)

    landmark_header = [
        'HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r',
        'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c',
        'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b',
        'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r',
        'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
        'bottom_lmk_r', 'center_v_lmk_r'
    ]
    landmark_data = [
        'LANDMARK_DATA', 0, 0, 0, 0,
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0
    ]
    shape_data_train = list(shape_data)
    shape_data_train.pop(0)
    shape_data_train.pop(10)
    watershed_data_train = list(watershed_data)
    watershed_data_train.pop(0)
    landmark_data_train = [
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b
    ]
    X = shape_data_train + watershed_data_train + landmark_data_train
    print("len X", len(X))
    print(X)
    # Write shape and color data to results fil
    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_header)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_data)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()
    print("done")
    print(shape_img)
    return X, shape_img, masked
Example #6
0
args = options()
debug = args.debug
device = 0
img = cv2.imread("/home/matthijs/Downloads/tomaat_3.jpg")
img_2 = cv2.imread("/home/matthijs/Downloads/tomaat_3.jpg", 0)

cimg = cv2.cvtColor(img_2, cv2.COLOR_BAYER_BG2BGR)
device, s = pcv.rgb2gray_hsv(img, 's', device)

device, a = pcv.rgb2gray_lab(img, 'a', device, debug)  # looks most promissing

device, a_thresh = pcv.binary_threshold(a, 135, 255, 'light', device, debug)

device, a_mblur = pcv.median_blur(a_thresh, 5, device, debug)
kernel = np.zeros((3, 3), dtype=np.uint8)
device, mask_watershed, = pcv.erode(a_mblur, 5, 1, device, debug)

circles = cv2.HoughCircles(img_2,
                           cv2.cv.CV_HOUGH_GRADIENT,
                           1,
                           30,
                           param1=50,
                           param2=20,
                           minRadius=130,
                           maxRadius=220)
print("watt?")

circles = np.uint16(np.around(circles))

for i in circles[0, :]:
    cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
Example #7
0
def main():
    # obtiene opciones de imagen
    args = options()
    #LINEA 22
    if args.debug:
        print("Debug mode turned on...")
    # lee la imagen el flags=0 indica que se espera una imagen a escala de grises
    img = cv2.imread(args.image, flags=0)
    # cv2.imshow("imagen original",img)
    # Get directory path and image name from command line arguments
    path, img_name = os.path.split(args.image)

    #LINEA 30
    # Read in image which is the pixelwise average of background images
    img_bkgrd = cv2.imread("background_average.jpg", flags=0)
    #cv2.imshow("ventana del fondo",img_bkgrd)
    # paso del procesamiento de imagenes
    device = 0
    ######hasta qui bien
    #linea 37
    # Restar la imagen de fondo de la imagen con la planta.
    device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device,
                                             args.debug)
    #cv2.imshow("imagen resta",bkg_sub_img)
    # Threshold the image of interest using the two-sided cv2.inRange function (keep what is between 50-190)
    bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 50, 190)
    if args.debug:
        cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img)
#hasta qui todo bien
#linea 46
# Filtrado de Laplace (identificar bordes basados ​​en la derivada 2)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    #cv2.imshow("imagen de filtrado",lp_img)
    if args.debug:
        pcv.plot_hist(lp_img, 'histograma_lp')

    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    #cv2.imshow("imagen de borde lapacian",lp_shrp_img)
    if args.debug:
        pcv.plot_hist(lp_shrp_img, 'histograma_lp_shrp')
#hasta aqui todo bien linea 58
# Sobel filtering-filtrado de sobel
# 1ª derivada filtrado sobel a lo largo del eje horizontal, núcleo = 1, sin escala)
    """    segun esta masl son siete,kito scale y me kedo con apertura k,chekar sobel en docs
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
   """
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, device, args.debug)
    #cv2.imshow("imagen sobel-eje horizontal",sbx_img)
    if args.debug:
        pcv.plot_hist(sbx_img, 'histograma_sbx')

    # Filtrado de la primera derivada sobel a lo largo del eje vertical, núcleo = 1, sin escala)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, device, args.debug)
    #cv2.imshow("imagen sobel-ejevertical",sby_img)
    if args.debug:
        pcv.plot_hist(sby_img, 'histograma_sby')

    # Combina los efectos de ambos filtros x e y mediante la suma de matrizes
    # Esto captura los bordes identificados dentro de cada plano y enfatiza los bordes encontrados en ambas imágenes
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    #cv2.imshow("imagen suma de sobel",sb_img)
    if args.debug:
        pcv.plot_hist(sb_img, 'histograma_sb_comb_img')
#hasta aqui todo bien linea 82
# usar filtro pasa bajo blur para suavizar la imagen de sobel
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    #cv2.imshow("imagen blur",mblur_img)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)
    #cv2.imshow("imagen blur-invertido",mblur_invert_img)
    # Combinar la imagen suavizada del sobel con la imagen afilada del laplaciano
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169
    #Combina las mejores características de ambos métodos como se describe en "Digital Image Processing" por González y Woods pág. 169
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img,
                                          device, args.debug)
    #cv2.imshow("imagen-combinacion-sobel-laplacian",mblur_img)
    if args.debug:
        pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img')

    # Realizar el umbral para generar una imagen binaria
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 125, 255, 'dark',
                                             device, args.debug)
    #cv2.imshow("imagen binaria de combinacion",tr_es_img)
    #hasta aqui todo bien linea 99
    # Prepare a few small kernels for morphological filtering
    #prepara nucleos pequeños para un filtrado moorfologico
    kern = np.zeros((3, 3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1, 1:3] = 1
    kern2 = np.copy(kern)
    kern2[1, 0:2] = 1
    kern3 = np.copy(kern)
    kern3[0:2, 1] = 1
    kern4 = np.copy(kern)
    kern4[1:3, 1] = 1

    # prepara un nucleo grande para la dilatacion
    kern[1, 0:3] = 1
    kern[0:3, 1] = 1
    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 1",e1_img)
    device, e2_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 2",e2_img)
    device, e3_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 3",e3_img)
    device, e4_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 4",e4_img)

    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    #cv2.imshow("c12",c12_img)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    #cv2.imshow("c123",c123_img)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)
    #cv2.imshow("c1234",c1234_img)

    # Bring the two object identification approaches together.
    # Using a logical OR combine object identified by background subtraction and the object identified by derivative filter.
    device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device,
                                      args.debug)
    #cv2.imshow("comb_img",comb_img)
    # Get masked image, Essentially identify pixels corresponding to plant and keep those.
    device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device,
                                        args.debug)
    #cv2.imshow("masked_erd",masked_erd)
    #cv2.imshow("imagen original chkar",img)
    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)
    # mask for the bottom of the image
    device, im2, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(
        img, (120, 184), (215, 252), device, args.debug, color='white')
    #cv2.imshow("im2",box1_img)
    # mask for the left side of the image
    device, im3, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(
        img, (1, 1), (85, 252), device, args.debug, color='white')
    #cv2.imshow("im3",box2_img)
    # mask for the right side of the image
    device, im4, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(
        img, (240, 1), (318, 252), device, args.debug, color='white')
    #cv2.imshow("im4",box3_img)
    # mask the edges
    device, im5, box4_img, rect_contour4, hierarchy4 = pcv.rectangle_mask(
        img, (1, 1), (318, 252), device, args.debug)
    #cv2.imshow("im5",box4_img)

    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device,
                                        args.debug)
    #cv2.imshow("combinacion logica or",bx1234_img)

    # invert this mask and then apply it the masked image.
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)
    # cv2.imshow("combinacion logica or invertida",inv_bx1234_img)
    device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img,
                                             'black', device, args.debug)
    # cv2.imshow("edge_masked_img",edge_masked_img)

    # assign the coordinates of an area of interest (rectangle around the area you expect the plant to be in)
    device, im6, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(
        img, (120, 75), (200, 184), device, args.debug)
    #cv2.imshow("im6",roi_img)
    # get the coordinates of the plant from the masked object
    plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,
                                                      cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_NONE)

    # Obtain the coordinates of the plant object which are partially within the area of interest
    device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi_contour, roi_hierarchy, plant_objects,
        plant_hierarchy, device, args.debug)

    # Apply the box mask to the image to ensure no background
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black',
                                        device, args.debug)
    #cv2.imshow("mascara final",masked_img)
    #/////////////////////////////////////////////////////////////
    #device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug)
    rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    #cv2.imshow("rgb",rgb)
    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device,
                                        args.debug)
    #cv2.imshow("mask",mask)
    mask3d = np.copy(mask)
    plant_objects_2, plant_hierarchy_2 = cv2.findContours(
        mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device,
                                          args.debug)

    # Get final masked image
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black',
                                        device, args.debug)
    #cv2.imshow("maskara final2",masked_img)
    ################### copia lo de arriba esta mal el tutorial
    # Obtain a 3 dimensional representation of this grayscale image (for pseudocoloring)
    #rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)

    # Generate a binary to send to the analysis function
    #device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug)

    # Make a copy of this mask for pseudocoloring
    #mask3d = np.copy(mask)

    # Extract coordinates of plant for pseudocoloring of plant
    #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)

    # Extract coordinates of plant for pseudocoloring of plant
    #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)
    ####################################
    #######################
    ### Analysis ###
    # Perform signal analysis
    #################pruebas de que esta masl el tutorial""""""""""""""""
    #ols=type(args.image)
    #print ols
    ##############pruebas de que no agarro     device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name)

    #print(args.outdir+'/'+img_name)
    #print(args.debug)
    #al final si salio se agrego lo qyue esta debug= and filename=
    ##################################################### debug me marca True por ello puse pritn de mas
    #device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, rgb, mask, 256, device, debug='print', filename=False)
    device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(
        img,
        rgb,
        mask,
        256,
        device,
        debug=args.debug,
        filename=args.outdir + '/' + img_name)

    # Perform shape analysis
    device, shape_header, shape_data, ori_img = pcv.analyze_object(
        rgb,
        args.image,
        o,
        m,
        device,
        debug=args.debug,
        filename=args.outdir + '/' + img_name)

    # Print the results to STDOUT
    pcv.print_results(args.image, hist_header, hist_data)
    pcv.print_results(args.image, shape_header, shape_data)

    cv2.waitKey()
    cv2.destroyAllWdindows()
Example #8
0
def main():
   
    # Get options
    args = options()
    if args.debug:
      print("Analyzing your image dude...")
    # Read image
    img = cv2.imread(args.image, flags=0)
    # if a region of interest is specified read it in
    roi = cv2.imread(args.roi)
    # Pipeline step
    device = 0
    
    # Start by examining the distribution of pixel intensity values
    if args.debug:
      pcv.plot_hist(img, 'hist_img')
      
    # Will intensity transformation enhance your ability to isolate object of interest by thesholding?
    device, he_img = pcv.HistEqualization(img, device, args.debug)
    if args.debug:
      pcv.plot_hist(he_img, 'hist_img_he')
    
    # Laplace filtering (identify edges based on 2nd derivative)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(lp_img, 'hist_lp')
    
    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp')
      
    # Sobel filtering  
    # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled)
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(sbx_img, 'hist_sbx')
      
    # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(sby_img, 'hist_sby')
      
    # Combine the effects of both x and y filters through matrix addition
    # This will capture edges identified within each plane and emphesize edges found in both images
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(sb_img, 'hist_sb_comb_img')
    
    # Use a lowpass (blurring) filter to smooth sobel image
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)
    
    # combine the smoothed sobel image with the laplacian sharpened image
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img')
      
    # Perform thresholding to generate a binary image
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug)
    
    # Prepare a few small kernels for morphological filtering
    kern = np.zeros((3,3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1,1:3]=1
    kern2 = np.copy(kern)
    kern2[1,0:2]=1
    kern3 = np.copy(kern)
    kern3[0:2,1]=1
    kern4 = np.copy(kern)
    kern4[1:3,1]=1
    
    # Prepare a larger kernel for dilation
    kern[1,0:3]=1
    kern[0:3,1]=1
    
    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug)
    device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug)
    device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug)
    device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug)
    
    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)
    
    # Perform dilation
    device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug)
    
    # Get masked image
    # The dilated image may contain some pixels which are not plant
    device, masked_erd = pcv.apply_mask(img, c1234_img, 'black', device, args.debug)
    device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug)
    
    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)
    device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (1,1), (64,252), device, args.debug)
    device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (256,1), (318,252), device, args.debug)
    device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (1,184), (318,252), device, args.debug)
    device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug)
    
    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug)
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)
    
    # Apply the box mask to the image
    device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
    
    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug)
    pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, 'example')
def main():
    # Parse command-line options
    args = options()

    device = 0

    # Open output file
    out = open(args.outfile, "w")

    # Open the image file
    img, path, fname = pcv.readimage(filename=args.image, debug=args.debug)
    # Classify healthy and unhealthy plant pixels
    device, masks = pcv.naive_bayes_classifier(img=img,
                                               pdf_file=args.pdfs,
                                               device=device)

    # Use the identified blue mesh area to build a mask for the pot area
    # First errode the blue mesh region to remove background
    device, mesh_errode = pcv.erode(img=masks["Background_Blue"],
                                    kernel=9,
                                    i=3,
                                    device=device,
                                    debug=args.debug)
    # Define a region of interest for blue mesh contours
    device, pot_roi, pot_hierarchy = pcv.define_roi(img=img,
                                                    shape='rectangle',
                                                    device=device,
                                                    roi=None,
                                                    roi_input='default',
                                                    debug=args.debug,
                                                    adjust=True,
                                                    x_adj=0,
                                                    y_adj=500,
                                                    w_adj=0,
                                                    h_adj=-650)
    # Find blue mesh contours
    device, mesh_objects, mesh_hierarchy = pcv.find_objects(img=img,
                                                            mask=mesh_errode,
                                                            device=device,
                                                            debug=args.debug)
    # Keep blue mesh contours in the region of interest
    device, kept_mesh_objs, kept_mesh_hierarchy, kept_mask_mesh, _ = pcv.roi_objects(
        img=img,
        roi_type='partial',
        roi_contour=pot_roi,
        roi_hierarchy=pot_hierarchy,
        object_contour=mesh_objects,
        obj_hierarchy=mesh_hierarchy,
        device=device,
        debug=args.debug)
    # Flatten the blue mesh contours into a single object
    device, mesh_flattened, mesh_mask = pcv.object_composition(
        img=img,
        contours=kept_mesh_objs,
        hierarchy=kept_mesh_hierarchy,
        device=device,
        debug=args.debug)
    # Initialize a pot mask
    pot_mask = np.zeros(np.shape(masks["Background_Blue"]), dtype=np.uint8)
    # Find the minimum bounding rectangle for the blue mesh region
    rect = cv2.minAreaRect(mesh_flattened)
    # Create a contour for the minimum bounding box
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    # Create a mask from the bounding box contour
    cv2.drawContours(pot_mask, [box], 0, (255), -1)
    # If the bounding box area is too small then the plant has likely occluded too much of the pot for us to use this
    # as a marker for the pot area
    if np.sum(pot_mask) / 255 < 2900000:
        print(np.sum(pot_mask) / 255)
        # Create a new pot mask
        pot_mask = np.zeros(np.shape(masks["Background_Blue"]), dtype=np.uint8)
        # Set the mask area to the ROI area
        box = np.array([[0, 500], [0, 2806], [2304, 2806], [2304, 500]])
        cv2.drawContours(pot_mask, [box], 0, (255), -1)
    # Dialate the blue mesh area to include the ridge of the pot
    device, pot_mask_dilated = pcv.dilate(img=pot_mask,
                                          kernel=3,
                                          i=60,
                                          device=device,
                                          debug=args.debug)
    # Mask the healthy mask
    device, healthy_masked = pcv.apply_mask(img=cv2.merge(
        [masks["Healthy"], masks["Healthy"], masks["Healthy"]]),
                                            mask=pot_mask_dilated,
                                            mask_color="black",
                                            device=device,
                                            debug=args.debug)
    # Mask the unhealthy mask
    device, unhealthy_masked = pcv.apply_mask(img=cv2.merge(
        [masks["Unhealthy"], masks["Unhealthy"], masks["Unhealthy"]]),
                                              mask=pot_mask_dilated,
                                              mask_color="black",
                                              device=device,
                                              debug=args.debug)
    # Convert the masks back to binary
    healthy_masked, _, _ = cv2.split(healthy_masked)
    unhealthy_masked, _, _ = cv2.split(unhealthy_masked)

    # Fill small objects
    device, fill_image_healthy = pcv.fill(img=np.copy(healthy_masked),
                                          mask=np.copy(healthy_masked),
                                          size=300,
                                          device=device,
                                          debug=args.debug)
    device, fill_image_unhealthy = pcv.fill(img=np.copy(unhealthy_masked),
                                            mask=np.copy(unhealthy_masked),
                                            size=1000,
                                            device=device,
                                            debug=args.debug)
    # Define a region of interest
    device, roi1, roi_hierarchy = pcv.define_roi(img=img,
                                                 shape='rectangle',
                                                 device=device,
                                                 roi=None,
                                                 roi_input='default',
                                                 debug=args.debug,
                                                 adjust=True,
                                                 x_adj=450,
                                                 y_adj=1000,
                                                 w_adj=-400,
                                                 h_adj=-1000)
    # Filter objects that overlap the ROI
    device, id_objects, obj_hierarchy_healthy = pcv.find_objects(
        img=img, mask=fill_image_healthy, device=device, debug=args.debug)
    device, _, _, kept_mask_healthy, _ = pcv.roi_objects(
        img=img,
        roi_type='partial',
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy_healthy,
        device=device,
        debug=args.debug)
    device, id_objects, obj_hierarchy_unhealthy = pcv.find_objects(
        img=img, mask=fill_image_unhealthy, device=device, debug=args.debug)
    device, _, _, kept_mask_unhealthy, _ = pcv.roi_objects(
        img=img,
        roi_type='partial',
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy_unhealthy,
        device=device,
        debug=args.debug)
    # Combine the healthy and unhealthy mask
    device, mask = pcv.logical_or(img1=kept_mask_healthy,
                                  img2=kept_mask_unhealthy,
                                  device=device,
                                  debug=args.debug)

    # Output a healthy/unhealthy image
    classified_img = cv2.merge([
        np.zeros(np.shape(mask), dtype=np.uint8), kept_mask_healthy,
        kept_mask_unhealthy
    ])
    pcv.print_image(img=classified_img,
                    filename=os.path.join(
                        args.outdir,
                        os.path.basename(args.image)[:-4] + ".classified.png"))

    # Output a healthy/unhealthy image overlaid on the original image
    overlayed = cv2.addWeighted(src1=np.copy(classified_img),
                                alpha=0.5,
                                src2=np.copy(img),
                                beta=0.5,
                                gamma=0)
    pcv.print_image(img=overlayed,
                    filename=os.path.join(
                        args.outdir,
                        os.path.basename(args.image)[:-4] + ".overlaid.png"))

    # Extract hue values from the image
    device, h = pcv.rgb2gray_hsv(img=img,
                                 channel="h",
                                 device=device,
                                 debug=args.debug)

    # Extract the plant hue values
    plant_hues = h[np.where(mask == 255)]

    # Initialize hue histogram
    hue_hist = {}
    for i in range(0, 180):
        hue_hist[i] = 0

    # Store all hue values
    hue_values = []

    # Populate histogram
    total_px = len(plant_hues)
    for hue in plant_hues:
        hue_hist[hue] += 1
        hue_values.append(hue)

    # Parse the filename
    genotype, treatment, replicate, timepoint = os.path.basename(
        args.image)[:-4].split("_")
    replicate = replicate.replace("#", "")
    if timepoint[-3:] == "dbi":
        timepoint = -1
    else:
        timepoint = timepoint.replace("dpi", "")

    # Output results
    for i in range(0, 180):
        out.write("\t".join(
            map(str, [
                genotype, treatment, timepoint, replicate, total_px, i,
                hue_hist[i]
            ])) + "\n")
    out.close()

    # Calculate basic statistics
    healthy_sum = int(np.sum(kept_mask_healthy))
    unhealthy_sum = int(np.sum(kept_mask_unhealthy))
    healthy_total_ratio = healthy_sum / float(healthy_sum + unhealthy_sum)
    unhealthy_total_ratio = unhealthy_sum / float(healthy_sum + unhealthy_sum)
    stats = open(args.outfile[:-4] + ".stats.txt", "w")
    stats.write("%s, %f, %f, %f, %f" %
                (os.path.basename(args.image), healthy_sum, unhealthy_sum,
                 healthy_total_ratio, unhealthy_total_ratio) + '\n')
    stats.close()

    # Fit a 3-component Gaussian Mixture Model
    gmm = mixture.GaussianMixture(n_components=3,
                                  covariance_type="full",
                                  tol=0.001)
    gmm.fit(np.expand_dims(hue_values, 1))
    gmm3 = open(args.outfile[:-4] + ".gmm3.txt", "w")
    gmm3.write("%s, %f, %f, %f, %f, %f, %f, %f, %f, %f" %
               (os.path.basename(args.image), gmm.means_.ravel()[0],
                gmm.means_.ravel()[1], gmm.means_.ravel()[2],
                np.sqrt(gmm.covariances_.ravel()[0]),
                np.sqrt(gmm.covariances_.ravel()[1]),
                np.sqrt(gmm.covariances_.ravel()[2]), gmm.weights_.ravel()[0],
                gmm.weights_.ravel()[1], gmm.weights_.ravel()[2]) + '\n')
    gmm3.close()

    # Fit a 2-component Gaussian Mixture Model
    gmm = mixture.GaussianMixture(n_components=2,
                                  covariance_type="full",
                                  tol=0.001)
    gmm.fit(np.expand_dims(hue_values, 1))
    gmm2 = open(args.outfile[:-4] + ".gmm2.txt", "w")
    gmm2.write("%s, %f, %f, %f, %f, %f, %f" %
               (os.path.basename(args.image), gmm.means_.ravel()[0],
                gmm.means_.ravel()[1], np.sqrt(gmm.covariances_.ravel()[0]),
                np.sqrt(gmm.covariances_.ravel()[1]), gmm.weights_.ravel()[0],
                gmm.weights_.ravel()[1]) + '\n')
    gmm2.close()

    # Fit a 1-component Gaussian Mixture Model
    gmm = mixture.GaussianMixture(n_components=1,
                                  covariance_type="full",
                                  tol=0.001)
    gmm.fit(np.expand_dims(hue_values, 1))
    gmm1 = open(args.outfile[:-4] + ".gmm1.txt", "w")
    gmm1.write(
        "%s, %f, %f, %f" %
        (os.path.basename(args.image), gmm.means_.ravel()[0],
         np.sqrt(gmm.covariances_.ravel()[0]), gmm.weights_.ravel()[0]) + '\n')
    gmm1.close()