def main():
    # Initialize device
    device = 0

    # Parse command-line options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(filename=args.image, debug=args.debug)

    # Convert RGB to LAB and extract the Blue-Yellow channel
    device, blue_channel = pcv.rgb2gray_lab(img=img,
                                            channel="b",
                                            device=device,
                                            debug=args.debug)

    # Threshold the blue image using the triangle autothreshold method
    device, blue_tri = pcv.triangle_auto_threshold(device=device,
                                                   img=blue_channel,
                                                   maxvalue=255,
                                                   object_type="light",
                                                   xstep=1,
                                                   debug=args.debug)

    # Extract core plant region from the image to preserve delicate plant features during filtering
    device += 1
    plant_region = blue_tri[0:1750, 600:2080]
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_extract_plant_region.png",
                        img=plant_region)

    # Use a Gaussian blur to disrupt the strong edge features in the cabinet
    device, blur_gaussian = pcv.gaussian_blur(device=device,
                                              img=blue_tri,
                                              ksize=(3, 3),
                                              sigmax=0,
                                              sigmay=None,
                                              debug=args.debug)

    # Threshold the blurred image to remove features that were blurred
    device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian,
                                                    threshold=250,
                                                    maxValue=255,
                                                    object_type="light",
                                                    device=device,
                                                    debug=args.debug)

    # Add the plant region back in to the filtered image
    device += 1
    blur_thresholded[0:1750, 600:2080] = plant_region
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_replace_plant_region.png",
                        img=blur_thresholded)

    # Fill small noise
    device, blue_fill_50 = pcv.fill(img=np.copy(blur_thresholded),
                                    mask=np.copy(blur_thresholded),
                                    size=50,
                                    device=device,
                                    debug=args.debug)

    # Identify objects
    device, contours, contour_hierarchy = pcv.find_objects(img=img,
                                                           mask=blue_fill_50,
                                                           device=device,
                                                           debug=args.debug)

    # Define ROI
    device, roi, roi_hierarchy = pcv.define_roi(img=img,
                                                shape="rectangle",
                                                device=device,
                                                roi=None,
                                                roi_input="default",
                                                debug=args.debug,
                                                adjust=True,
                                                x_adj=565,
                                                y_adj=0,
                                                w_adj=-490,
                                                h_adj=-250)

    # Decide which objects to keep
    device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects(
        img=img,
        roi_type="partial",
        roi_contour=roi,
        roi_hierarchy=roi_hierarchy,
        object_contour=contours,
        obj_hierarchy=contour_hierarchy,
        device=device,
        debug=args.debug)

    # If there are no contours left we cannot measure anything
    if len(roi_contours) > 0:
        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(
            img=img,
            contours=roi_contours,
            hierarchy=roi_contour_hierarchy,
            device=device,
            debug=args.debug)

        outfile = False
        if args.writeimg:
            outfile = args.outdir + "/" + filename

        # Find shape properties, output shape image (optional)
        device, shape_header, shape_data, shape_img = pcv.analyze_object(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Shape properties relative to user boundary line (optional)
        device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            line_position=440,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images (optional)
        device, color_header, color_data, color_img = pcv.analyze_color(
            img=img,
            imgname=args.image,
            mask=plant_mask,
            bins=256,
            device=device,
            debug=args.debug,
            hist_plot_type=None,
            pseudo_channel="v",
            pseudo_bkg="img",
            resolution=300,
            filename=outfile)

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        result.write('\t'.join(map(str, boundary_header)) + "\n")
        result.write('\t'.join(map(str, boundary_data)) + "\n")
        result.write('\t'.join(map(str, boundary_img)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()

        # Find matching NIR image
        device, nirpath = pcv.get_nir(path=path,
                                      filename=filename,
                                      device=device,
                                      debug=args.debug)
        nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath)
        nir_img = cv2.imread(nirpath, 0)

        # Make mask glovelike in proportions via dilation
        device, d_mask = pcv.dilate(plant_mask,
                                    kernel=1,
                                    i=0,
                                    device=device,
                                    debug=args.debug)

        # Resize mask
        prop2, prop1 = conv_ratio()
        device, nmask = pcv.resize(img=d_mask,
                                   resize_x=prop1,
                                   resize_y=prop2,
                                   device=device,
                                   debug=args.debug)

        # Convert the resized mask to a binary mask
        device, bmask = pcv.binary_threshold(img=nmask,
                                             threshold=0,
                                             maxValue=255,
                                             object_type="light",
                                             device=device,
                                             debug=args.debug)

        device, crop_img = crop_sides_equally(mask=bmask,
                                              nir=nir_img,
                                              device=device,
                                              debug=args.debug)

        # position, and crop mask
        device, newmask = pcv.crop_position_mask(img=nir_img,
                                                 mask=crop_img,
                                                 device=device,
                                                 x=34,
                                                 y=9,
                                                 v_pos="top",
                                                 h_pos="right",
                                                 debug=args.debug)

        # Identify objects
        device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb,
                                                              mask=newmask,
                                                              device=device,
                                                              debug=args.debug)

        # Object combine kept objects
        device, nir_combined, nir_combinedmask = pcv.object_composition(
            img=nir_rgb,
            contours=nir_objects,
            hierarchy=nir_hierarchy,
            device=device,
            debug=args.debug)

        if args.writeimg:
            outfile = args.outdir + "/" + nir_filename

        # Analyze NIR signal data
        device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
            img=nir_img,
            rgbimg=nir_rgb,
            mask=nir_combinedmask,
            bins=256,
            device=device,
            histplot=False,
            debug=args.debug,
            filename=outfile)

        # Analyze the shape of the plant contour from the NIR image
        device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
            img=nir_img,
            imgname=nir_filename,
            obj=nir_combined,
            mask=nir_combinedmask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Write NIR data to co-results file
        coresult = open(args.coresult, "a")
        coresult.write('\t'.join(map(str, nhist_header)) + "\n")
        coresult.write('\t'.join(map(str, nhist_data)) + "\n")
        for row in nir_imgs:
            coresult.write('\t'.join(map(str, row)) + "\n")
        coresult.write('\t'.join(map(str, nshape_header)) + "\n")
        coresult.write('\t'.join(map(str, nshape_data)) + "\n")
        coresult.write('\t'.join(map(str, nir_shape)) + "\n")
        coresult.close()
def get_feature(img):
    print("step one")
    """
    Step one: Background forground substraction 
    """
    # Get options
    args = options()
    debug = args.debug
    # Read image
    filename = args.result
    # img, path, filename = pcv.readimage(args.image)
    # Pipeline step
    device = 0
    device, resize_img = pcv.resize(img, 0.4, 0.4, device, debug)
    # Classify the pixels as plant or background
    device, mask_img = pcv.naive_bayes_classifier(
        resize_img,
        pdf_file=
        "/home/matthijs/PycharmProjects/SMR1/src/vision/ML_background/Trained_models/model_3/naive_bayes_pdfs.txt",
        device=0,
        debug='print')

    # Median Filter
    device, blur = pcv.median_blur(mask_img.get('plant'), 5, device, debug)
    print("step two")
    """
    Step one: Identifiy the objects, extract and filter the objects
    """

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         blur,
                                                         device,
                                                         debug=None)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(resize_img,
                                                 'rectangle',
                                                 device,
                                                 roi=True,
                                                 roi_input='default',
                                                 debug=True,
                                                 adjust=True,
                                                 x_adj=50,
                                                 y_adj=10,
                                                 w_adj=-100,
                                                 h_adj=0)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img, 'cutto', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)
    # print(roi_objects[0])
    # cv2.drawContours(resize_img, [roi_objects[0]], 0, (0, 255, 0), 3)
    # cv2.imshow("img",resize_img)
    # cv2.waitKey(0)
    area_oud = 0
    i = 0
    index = 0
    object_list = []
    # a = np.array([[hierarchy3[0][0]]])
    hierarchy = []
    for cnt in roi_objects:
        area = cv2.contourArea(cnt)
        M = cv2.moments(cnt)
        if M["m10"] or M["m01"]:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # check if the location of the contour is between the constrains
            if cX > 200 and cX < 500 and cY > 25 and cY < 400:
                # cv2.circle(resize_img, (cX, cY), 5, (255, 0, 255), thickness=1, lineType=1, shift=0)
                # check if the size of the contour is bigger than 250
                if area > 450:
                    obj = np.vstack(roi_objects)
                    object_list.append(roi_objects[i])
                    hierarchy.append(hierarchy3[0][i])
                    print(i)
        i = i + 1
    a = np.array([hierarchy])
    # a = [[[-1,-1,-1,-1][-1,-1,-1,-1][-1,-1,-1,-1]]]
    # Object combine kept objects
    # device, obj, mask_2 = pcv.object_composition(resize_img, object_list, a, device, debug)

    mask_contours = np.zeros(resize_img.shape, np.uint8)
    cv2.drawContours(mask_contours, object_list, -1, (255, 255, 255), -1)
    gray_image = cv2.cvtColor(mask_contours, cv2.COLOR_BGR2GRAY)
    ret, mask_contours = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         mask_contours,
                                                         device,
                                                         debug=None)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img,
        'cutto',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=None)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(resize_img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=None)
    ############### Analysis ################
    masked = mask.copy()

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    print("step three")
    """
    Step three: Calculate all the features
    """
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        resize_img, args.image, obj, mask, device, debug, filename="/file")
    print(shape_img)
    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        resize_img, args.image, obj, mask, 1680, device)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        resize_img, args.image, kept_mask, 256, device, debug, 'all', 'v',
        'img', 300)
    maks_watershed = mask.copy()
    kernel = np.zeros((5, 5), dtype=np.uint8)
    device, mask_watershed, = pcv.erode(maks_watershed, 5, 1, device, debug)

    device, watershed_header, watershed_data, analysis_images = pcv.watershed_segmentation(
        device, resize_img, mask, 50, './examples', debug)
    device, list_of_acute_points = pcv.acute_vertex(obj, 30, 60, 10,
                                                    resize_img, device, debug)

    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(
        obj, mask, list_of_acute_points, 225, device, debug)

    # Identify acute vertices (tip points) of an object
    # Results in set of point values that may indicate tip points
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.landmark_reference_pt_dist(
        points_rescaled, centroid_rescaled, bottomline_rescaled, device, debug)

    landmark_header = [
        'HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r',
        'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c',
        'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b',
        'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r',
        'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
        'bottom_lmk_r', 'center_v_lmk_r'
    ]
    landmark_data = [
        'LANDMARK_DATA', 0, 0, 0, 0,
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0
    ]
    shape_data_train = list(shape_data)
    shape_data_train.pop(0)
    shape_data_train.pop(10)
    watershed_data_train = list(watershed_data)
    watershed_data_train.pop(0)
    landmark_data_train = [
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b
    ]
    X = shape_data_train + watershed_data_train + landmark_data_train
    print("len X", len(X))
    print(X)
    # Write shape and color data to results fil
    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_header)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_data)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()
    print("done")
    print(shape_img)
    return X, shape_img, masked
Example #3
0
def test_plantcv_resize():
    img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
    device, resized_img = pcv.resize(img, 0.5, 0.5, device=0, debug=None)
    ix, iy, iz = np.shape(img)
    rx, ry, rz = np.shape(resized_img)
    assert ix > rx
Example #4
0
def get_height(img):
    # print("step one")
    """
    Step one: Background forground substraction
    """
    # Get options
    args = options()
    debug = args.debug
    # Read image
    filename = args.result
    # img, path, filename = pcv.readimage(args.image)
    # Pipeline step
    device = 0
    device, resize_img = pcv.resize(img, 0.4, 0.4, device, debug)
    # Classify the pixels as plant or background
    device, mask_img = pcv.naive_bayes_classifier(
        resize_img,
        pdf_file=
        "./../ML_background/Trained_models/model_6/naive_bayes_pdfs.txt",
        device=0,
        debug='print')

    # Median Filter
    device, blur = pcv.median_blur(mask_img.get('plant'), 5, device, debug)
    # print("step two")
    """
    Step one: Identifiy the objects, extract and filter the objects
    """

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         blur,
                                                         device,
                                                         debug=None)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(resize_img,
                                                 'rectangle',
                                                 device,
                                                 roi=True,
                                                 roi_input='default',
                                                 debug=None,
                                                 adjust=True,
                                                 x_adj=50,
                                                 y_adj=10,
                                                 w_adj=0,
                                                 h_adj=0)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img, 'cutto', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)
    # print(roi_objects[0])
    # cv2.drawContours(resize_img, [roi_objects[0]], 0, (0, 255, 0), 3)
    # cv2.imshow("img",resize_img)
    # cv2.waitKey(0)
    area_oud = 0
    i = 0
    index = 0
    object_list = []
    # a = np.array([[hierarchy3[0][0]]])
    hierarchy = []
    for cnt in roi_objects:
        area = cv2.contourArea(cnt)
        M = cv2.moments(cnt)
        if M["m10"] or M["m01"]:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # check if the location of the contour is between the constrains
            # cv2.circle(resize_img, (cX, cY), 5, (255, 0, 255), thickness=1, lineType=1, shift=0)
            # check if the size of the contour is bigger than 250
            if area > 200:
                obj = np.vstack(roi_objects)
                object_list.append(roi_objects[i])
                hierarchy.append(hierarchy3[0][i])
                # print(i)
        i = i + 1
    a = np.array([hierarchy])
    # a = [[[-1,-1,-1,-1][-1,-1,-1,-1][-1,-1,-1,-1]]]
    # Object combine kept objects
    # device, obj, mask_2 = pcv.object_composition(resize_img, object_list, a, device, debug)

    mask_contours = np.zeros(resize_img.shape, np.uint8)
    cv2.drawContours(mask_contours, object_list, -1, (255, 255, 255), -1)
    gray_image = cv2.cvtColor(mask_contours, cv2.COLOR_BGR2GRAY)
    ret, mask_contours = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         mask_contours,
                                                         device,
                                                         debug=None)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img,
        'cutto',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=None)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(resize_img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=None)
    ############### Analysis ################
    try:
        if len(obj) > 0:
            # Find shape properties, output shape image (optional)
            device, shape_header, shape_data, shape_img = pcv.analyze_object(
                resize_img, args.image, obj, mask, device, debug)
            # cv2.waitKey(10000)
            return shape_data[6]
        else:
            return -1
    except:
        return -1
Example #5
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    brass_mask = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light',
                                                device, args.debug)
    device, brass_inv = pcv.invert(brass_thresh, device, args.debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device,
                                          args.debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark',
                                             device, args.debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light',
                                             device, args.debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white',
                                         device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark',
                                                device, args.debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light',
                                                device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device,
                                     args.debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device,
                                         args.debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 150, device, args.debug)

    # Median Filter
    #device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
    #device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, soil_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device,
                                                 None, 'default', args.debug,
                                                 True, 600, 450, -600, -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300,
        outfile)

    # Output shape and color data

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()

    ############################# Use VIS image mask for NIR image#########################
    # Find matching NIR image
    device, nirpath = pcv.get_nir(path, filename, device, args.debug)
    nir, path1, filename1 = pcv.readimage(nirpath)
    nir2 = cv2.imread(nirpath, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, args.debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, args.debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top",
                                             "right", args.debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir, newmask, device, args.debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir, nir_objects, nir_hierarchy, device, args.debug)

    ####################################### Analysis #############################################
    outfile1 = False
    if args.writeimg == True:
        outfile1 = args.outdir + "/" + filename1

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir2, filename1, nir_combinedmask, 256, device, False, args.debug,
        outfile1)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir2, filename1, nir_combined, nir_combinedmask, device, args.debug,
        outfile1)

    coresult = open(args.coresult, "a")
    coresult.write('\t'.join(map(str, nhist_header)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nhist_data)))
    coresult.write("\n")
    for row in nir_imgs:
        coresult.write('\t'.join(map(str, row)))
        coresult.write("\n")

    coresult.write('\t'.join(map(str, nshape_header)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nshape_data)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nir_shape)))
    coresult.write("\n")
    coresult.close()
Example #6
0
def process_sv_images_core(vis_id,
                           vis_img,
                           nir_id,
                           nir_rgb,
                           nir_cv2,
                           traits,
                           debug=None):
    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device,
                                            debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark',
                                                  device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                    debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark',
                                                   device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255,
                                                   'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device,
                                                   debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device,
                                                   debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur,
                                     masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', debug, True,
                                                 700, 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects,
                                               hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        vis_img, vis_id, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        vis_img, vis_id, obj, mask, 384, device, debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(1, len(boundary_header)):
        vis_traits[boundary_header[i]] = boundary_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])

    ############################# Use VIS image mask for NIR image#########################
    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device,
                               debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4,
                                             "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['sv_area'].append(vis_traits['area'])
    traits['hull_area'].append(vis_traits['hull-area'])
    traits['solidity'].append(vis_traits['solidity'])
    traits['height'].append(vis_traits['height_above_bound'])
    traits['perimeter'].append(vis_traits['perimeter'])

    return [vis_traits, nir_traits]
Example #7
0
import glob

import cv2
import plantcv as pcv

cv_img = []
for img in glob.glob("Data_set/Foto's/*.jpg"):
    n = cv2.imread(img)
    device = 0
    device, n = pcv.resize(n, 0.2, 0.2, device)
    # Classify the pixels as plant or background
    device, mask = pcv.naive_bayes_classifier(n, pdf_file="Trained_models/model_2/naive_bayes_pdfs.txt", device=0,
                                              debug=None)
    cv2.imshow("mask", mask.get('plant'))
    cv2.waitKey(2000)
def process_sv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, traits, debug=None):
    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700,
                                                 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(vis_img, vis_id, obj, mask, 384, device,
                                                                              debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug,
                                                                    None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(1, len(boundary_header)):
        vis_traits[boundary_header[i]] = boundary_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])


    ############################# Use VIS image mask for NIR image#########################
    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['sv_area'].append(vis_traits['area'])
    traits['hull_area'].append(vis_traits['hull-area'])
    traits['solidity'].append(vis_traits['solidity'])
    traits['height'].append(vis_traits['height_above_bound'])
    traits['perimeter'].append(vis_traits['perimeter'])

    return [vis_traits, nir_traits]
def process_tv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, brass_mask, traits, debug=None):
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600,
                                                 -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug, None,
                                                                    'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])

    ############################# Use VIS image mask for NIR image#########################


    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['tv_area'] = vis_traits['area']

    return [vis_traits, nir_traits]
def process_sv_images(session, url, vis_id, nir_id, traits, debug=None):
    """Process side-view images from Clowder.

    Inputs:
    session = requests session object
    url     = Clowder URL
    vis_id  = The Clowder ID of an RGB image
    nir_img = The Clowder ID of an NIR grayscale image
    traits  = traits table (dictionary)
    debug   = None, print, or plot. Print = save to file, Plot = print to screen

    :param session: requests session object
    :param url: str
    :param vis_id: str
    :param nir_id: str
    :param traits: dict
    :param debug: str
    :return traits: dict
    """
    # Read VIS image from Clowder
    vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True)
    img_array = np.asarray(bytearray(vis_r.content), dtype="uint8")
    img = cv2.imdecode(img_array, -1)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700,
                                                 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, vis_id, obj, mask, 384, device,
                                                                              debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug,
                                                                    None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(1, len(boundary_header)):
        vis_traits[boundary_header[i]] = boundary_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])
    #print(vis_traits)
    add_plantcv_metadata(session, url, vis_id, vis_traits)

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image from Clowder
    nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True)
    nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8")
    nir = cv2.imdecode(nir_array, -1)
    nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR)

    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])
    #print(nir_traits)
    add_plantcv_metadata(session, url, nir_id, nir_traits)

    # Add data to traits table
    traits['sv_area'].append(vis_traits['area'])
    traits['hull_area'].append(vis_traits['hull-area'])
    traits['solidity'].append(vis_traits['solidity'])
    traits['height'].append(vis_traits['height_above_bound'])
    traits['perimeter'].append(vis_traits['perimeter'])

    return traits
def process_tv_images(session, url, vis_id, nir_id, traits, debug=False):
    """Process top-view images.

    Inputs:
    session = requests session object
    url     = Clowder URL
    vis_id  = The Clowder ID of an RGB image
    nir_img = The Clowder ID of an NIR grayscale image
    traits  = traits table (dictionary)
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    :param session: requests session object
    :param url: str
    :param vis_id: str
    :param nir_id: str
    :param traits: dict
    :param debug: str
    :return traits: dict
    """
    # Read VIS image from Clowder
    vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True)
    img_array = np.asarray(bytearray(vis_r.content), dtype="uint8")
    img = cv2.imdecode(img_array, -1)

    # Read the VIS top-view image mask for zoom = 1 from Clowder
    mask_r = session.get(posixpath.join(url, "api/files/57451b28e4b0efbe2dc3d4d5"), stream=True)
    mask_array = np.asarray(bytearray(mask_r.content), dtype="uint8")
    brass_mask = cv2.imdecode(mask_array, -1)

    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600,
                                                 -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug, None,
                                                                    'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])
    #print(vis_traits)
    add_plantcv_metadata(session, url, vis_id, vis_traits)

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image from Clowder
    nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True)
    nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8")
    nir = cv2.imdecode(nir_array, -1)
    nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])
    #print(nir_traits)
    add_plantcv_metadata(session, url, nir_id, nir_traits)

    # Add data to traits table
    traits['tv_area'] = vis_traits['area']

    return traits
def process_sv_images(vis_img, nir_img, debug=None):
    """Process side-view images.

    Inputs:
    vis_img = An RGB image.
    nir_img = An NIR grayscale image.
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    :param vis_img: str
    :param nir_img: str
    :param debug: str
    :return:
    """
    # Read VIS image
    img, path, filename = pcv.readimage(vis_img)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700,
                                                 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, vis_img, obj, mask, 384, device,
                                                                              debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug,
                                                                    None, 'v', 'img', 300)

    # Output shape and color data
    print('\t'.join(map(str, shape_header)) + '\n')
    print('\t'.join(map(str, shape_data)) + '\n')
    for row in shape_img:
        print('\t'.join(map(str, row)) + '\n')
    print('\t'.join(map(str, color_header)) + '\n')
    print('\t'.join(map(str, color_data)) + '\n')
    print('\t'.join(map(str, boundary_header)) + '\n')
    print('\t'.join(map(str, boundary_data)) + '\n')
    print('\t'.join(map(str, boundary_img1)) + '\n')
    for row in color_img:
        print('\t'.join(map(str, row)) + '\n')

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image
    nir, path1, filename1 = pcv.readimage(nir_img)
    nir2 = cv2.imread(nir_img, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 30, 4, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask,
                                                                       device, debug)

    print('\t'.join(map(str, nhist_header)) + '\n')
    print('\t'.join(map(str, nhist_data)) + '\n')
    for row in nir_imgs:
        print('\t'.join(map(str, row)) + '\n')
    print('\t'.join(map(str, nshape_header)) + '\n')
    print('\t'.join(map(str, nshape_data)) + '\n')
    print('\t'.join(map(str, nir_shape)) + '\n')
def process_tv_images(vis_img, nir_img, debug=False):
    """Process top-view images.

    Inputs:
    vis_img = An RGB image.
    nir_img = An NIR grayscale image.
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    :param vis_img: str
    :param nir_img: str
    :param debug: str
    :return:
    """
    # Read image
    img, path, filename = pcv.readimage(vis_img)
    brass_mask = cv2.imread('mask_brass_tv_z1_L1.png')

    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600,
                                                 -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug, None,
                                                                    'v', 'img', 300)

    print('\t'.join(map(str, shape_header)) + '\n')
    print('\t'.join(map(str, shape_data)) + '\n')
    for row in shape_img:
        print('\t'.join(map(str, row)) + '\n')
    print('\t'.join(map(str, color_header)) + '\n')
    print('\t'.join(map(str, color_data)) + '\n')
    for row in color_img:
        print('\t'.join(map(str, row)) + '\n')

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image
    nir, path1, filename1 = pcv.readimage(nir_img)
    nir2 = cv2.imread(nir_img, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask,
                                                                       device, debug)

    print('\t'.join(map(str,nhist_header)) + '\n')
    print('\t'.join(map(str,nhist_data)) + '\n')
    for row in nir_imgs:
      print('\t'.join(map(str,row)) + '\n')
    print('\t'.join(map(str,nshape_header)) + '\n')
    print('\t'.join(map(str,nshape_data)) + '\n')
    print('\t'.join(map(str,nir_shape)) + '\n')
Example #14
0
def process_tv_images_core(vis_id,
                           vis_img,
                           nir_id,
                           nir_rgb,
                           nir_cv2,
                           brass_mask,
                           traits,
                           debug=None):
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device,
                                            debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light',
                                                device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device,
                                          debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark',
                                             device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light',
                                             device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white',
                                         device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark',
                                                device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light',
                                                device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device,
                                         debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device,
                                     debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device,
                                                 None, 'default', debug, True,
                                                 600, 450, -600, -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects,
                                               hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        vis_img, vis_id, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(
        vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])

    ############################# Use VIS image mask for NIR image#########################

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5,
                                             "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['tv_area'] = vis_traits['area']

    return [vis_traits, nir_traits]
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    brass_mask = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, "b", device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, "white", device, args.debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug)
    device, brass_inv = pcv.invert(brass_thresh, device, args.debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug)
    device, soil_car = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, "dark", device, args.debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 150, 255, "light", device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 75, device, args.debug)

    # Median Filter
    # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
    # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(
        img, "circle", device, None, "default", args.debug, True, 0, 0, -200, -200
    )

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug
    )

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)

    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile
    )

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile
    )

    # Output shape and color data

    result = open(args.result, "a")
    result.write("\t".join(map(str, shape_header)))
    result.write("\n")
    result.write("\t".join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.write("\t".join(map(str, color_header)))
    result.write("\n")
    result.write("\t".join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.close()

    ############################# Use VIS image mask for NIR image#########################
    # Find matching NIR image
    device, nirpath = pcv.get_nir(path, filename, device, args.debug)
    nir, path1, filename1 = pcv.readimage(nirpath)
    nir2 = cv2.imread(nirpath, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, args.debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 9, 12, "top", "left", args.debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug)

    ####################################### Analysis #############################################
    outfile1 = False
    if args.writeimg == True:
        outfile1 = args.outdir + "/" + filename1

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1
    )
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1
    )

    coresult = open(args.coresult, "a")
    coresult.write("\t".join(map(str, nhist_header)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nhist_data)))
    coresult.write("\n")
    for row in nir_imgs:
        coresult.write("\t".join(map(str, row)))
        coresult.write("\n")

    coresult.write("\t".join(map(str, nshape_header)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nshape_data)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nir_shape)))
    coresult.write("\n")
    coresult.close()
Example #16
0
def main():
    # Initialize device
    device = 0

    # Parse command-line options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(filename=args.image, debug=args.debug)

    # Convert RGB to LAB and extract the Green-Magenta channel
    device, green_channel = pcv.rgb2gray_lab(img=img,
                                             channel="a",
                                             device=device,
                                             debug=args.debug)

    # Invert the Green-Magenta image because the plant is dark green
    device, green_inv = pcv.invert(img=green_channel,
                                   device=device,
                                   debug=args.debug)

    # Threshold the inverted Green-Magenta image to mostly isolate green pixels
    device, green_thresh = pcv.binary_threshold(img=green_inv,
                                                threshold=134,
                                                maxValue=255,
                                                object_type="light",
                                                device=device,
                                                debug=args.debug)

    # Extract core plant region from the image to preserve delicate plant features during filtering
    device += 1
    plant_region = green_thresh[100:2000, 250:2250]
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_extract_plant_region.png",
                        img=plant_region)

    # Use a Gaussian blur to disrupt the strong edge features in the cabinet
    device, blur_gaussian = pcv.gaussian_blur(device=device,
                                              img=green_thresh,
                                              ksize=(7, 7),
                                              sigmax=0,
                                              sigmay=None,
                                              debug=args.debug)

    # Threshold the blurred image to remove features that were blurred
    device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian,
                                                    threshold=250,
                                                    maxValue=255,
                                                    object_type="light",
                                                    device=device,
                                                    debug=args.debug)

    # Add the plant region back in to the filtered image
    device += 1
    blur_thresholded[100:2000, 250:2250] = plant_region
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_replace_plant_region.png",
                        img=blur_thresholded)

    # Use a median blur to breakup the horizontal and vertical lines caused by shadows from the track edges
    device, med_blur = pcv.median_blur(img=blur_thresholded,
                                       ksize=5,
                                       device=device,
                                       debug=args.debug)

    # Fill in small contours
    device, green_fill_50 = pcv.fill(img=np.copy(med_blur),
                                     mask=np.copy(med_blur),
                                     size=50,
                                     device=device,
                                     debug=args.debug)

    # Define an ROI for the brass stopper
    device, stopper_roi, stopper_hierarchy = pcv.define_roi(
        img=img,
        shape="rectangle",
        device=device,
        roi=None,
        roi_input="default",
        debug=args.debug,
        adjust=True,
        x_adj=1420,
        y_adj=890,
        w_adj=-920,
        h_adj=-1040)

    # Identify all remaining contours in the binary image
    device, contours, hierarchy = pcv.find_objects(img=img,
                                                   mask=np.copy(green_fill_50),
                                                   device=device,
                                                   debug=args.debug)

    # Remove contours completely contained within the stopper region of interest
    device, remove_stopper_mask = remove_countors_roi(mask=green_fill_50,
                                                      contours=contours,
                                                      hierarchy=hierarchy,
                                                      roi=stopper_roi,
                                                      device=device,
                                                      debug=args.debug)

    # Define an ROI for a screw hole
    device, screw_roi, screw_hierarchy = pcv.define_roi(img=img,
                                                        shape="rectangle",
                                                        device=device,
                                                        roi=None,
                                                        roi_input="default",
                                                        debug=args.debug,
                                                        adjust=True,
                                                        x_adj=1870,
                                                        y_adj=1010,
                                                        w_adj=-485,
                                                        h_adj=-960)

    # Remove contours completely contained within the screw region of interest
    device, remove_screw_mask = remove_countors_roi(mask=remove_stopper_mask,
                                                    contours=contours,
                                                    hierarchy=hierarchy,
                                                    roi=screw_roi,
                                                    device=device,
                                                    debug=args.debug)

    # Identify objects
    device, contours, contour_hierarchy = pcv.find_objects(
        img=img, mask=remove_screw_mask, device=device, debug=args.debug)

    # Define ROI
    device, roi, roi_hierarchy = pcv.define_roi(img=img,
                                                shape="rectangle",
                                                device=device,
                                                roi=None,
                                                roi_input="default",
                                                debug=args.debug,
                                                adjust=True,
                                                x_adj=565,
                                                y_adj=200,
                                                w_adj=-490,
                                                h_adj=-250)

    # Decide which objects to keep
    device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects(
        img=img,
        roi_type="partial",
        roi_contour=roi,
        roi_hierarchy=roi_hierarchy,
        object_contour=contours,
        obj_hierarchy=contour_hierarchy,
        device=device,
        debug=args.debug)

    # If there are no contours left we cannot measure anything
    if len(roi_contours) > 0:
        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(
            img=img,
            contours=roi_contours,
            hierarchy=roi_contour_hierarchy,
            device=device,
            debug=args.debug)

        outfile = False
        if args.writeimg:
            outfile = args.outdir + "/" + filename

        # Find shape properties, output shape image (optional)
        device, shape_header, shape_data, shape_img = pcv.analyze_object(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images (optional)
        device, color_header, color_data, color_img = pcv.analyze_color(
            img=img,
            imgname=args.image,
            mask=plant_mask,
            bins=256,
            device=device,
            debug=args.debug,
            hist_plot_type=None,
            pseudo_channel="v",
            pseudo_bkg="img",
            resolution=300,
            filename=outfile)

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()

        # Find matching NIR image
        device, nirpath = pcv.get_nir(path=path,
                                      filename=filename,
                                      device=device,
                                      debug=args.debug)
        nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath)
        nir_img = cv2.imread(nirpath, 0)

        # Make mask glovelike in proportions via dilation
        device, d_mask = pcv.dilate(plant_mask,
                                    kernel=1,
                                    i=0,
                                    device=device,
                                    debug=args.debug)

        # Resize mask
        prop2, prop1 = conv_ratio()
        device, nmask = pcv.resize(img=d_mask,
                                   resize_x=prop1,
                                   resize_y=prop2,
                                   device=device,
                                   debug=args.debug)

        # Convert the resized mask to a binary mask
        device, bmask = pcv.binary_threshold(img=nmask,
                                             threshold=0,
                                             maxValue=255,
                                             object_type="light",
                                             device=device,
                                             debug=args.debug)

        device, crop_img = crop_sides_equally(mask=bmask,
                                              nir=nir_img,
                                              device=device,
                                              debug=args.debug)

        # position, and crop mask
        device, newmask = pcv.crop_position_mask(img=nir_img,
                                                 mask=crop_img,
                                                 device=device,
                                                 x=0,
                                                 y=1,
                                                 v_pos="bottom",
                                                 h_pos="right",
                                                 debug=args.debug)

        # Identify objects
        device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb,
                                                              mask=newmask,
                                                              device=device,
                                                              debug=args.debug)

        # Object combine kept objects
        device, nir_combined, nir_combinedmask = pcv.object_composition(
            img=nir_rgb,
            contours=nir_objects,
            hierarchy=nir_hierarchy,
            device=device,
            debug=args.debug)

        if args.writeimg:
            outfile = args.outdir + "/" + nir_filename

        # Analyze NIR signal data
        device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
            img=nir_img,
            rgbimg=nir_rgb,
            mask=nir_combinedmask,
            bins=256,
            device=device,
            histplot=False,
            debug=args.debug,
            filename=outfile)

        # Analyze the shape of the plant contour from the NIR image
        device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
            img=nir_img,
            imgname=nir_filename,
            obj=nir_combined,
            mask=nir_combinedmask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Write NIR data to co-results file
        coresult = open(args.coresult, "a")
        coresult.write('\t'.join(map(str, nhist_header)) + "\n")
        coresult.write('\t'.join(map(str, nhist_data)) + "\n")
        for row in nir_imgs:
            coresult.write('\t'.join(map(str, row)) + "\n")
        coresult.write('\t'.join(map(str, nshape_header)) + "\n")
        coresult.write('\t'.join(map(str, nshape_data)) + "\n")
        coresult.write('\t'.join(map(str, nir_shape)) + "\n")
        coresult.close()
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
    
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug)
  
  # Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
  device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)
  
  # Fill small objects
  #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
  
  # Convert RGB to LAB and extract the Blue channel
  device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
  
  # Threshold the blue image
  device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug)
  device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug)
  
  # Fill small objects
  #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images
  device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)
  
  # Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
  device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug)
  device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images (OR)
  device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  
  # Fill small noise
  device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug)
  
  # Dilate to join small objects with larger ones
  device, ab_cnt1=pcv.dilate(ab_fill1, 3, 2, device, args.debug)
  device, ab_cnt2=pcv.dilate(ab_fill1, 3, 2, device, args.debug)
  
  # Fill dilated image mask
  device, ab_cnt3=pcv.fill(ab_cnt2,ab_cnt1,150,device,args.debug)
  device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug)
  device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, args.debug)
  device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, args.debug)
  device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug)
  
  # Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug)
  
  # Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 550, 0,-600,-907)
  
  # Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
  
  # Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
  
  ############## VIS Analysis ################
  
  outfile=False
  if args.writeimg==True:
    outfile=args.outdir+"/"+filename
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile)
  
  # Shape properties relative to user boundary line (optional)
  device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 935, device,args.debug,outfile)
  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile)
  
  # Output shape and color data
  result=open(args.result,"a")
  result.write('\t'.join(map(str,shape_header)))
  result.write("\n")
  result.write('\t'.join(map(str,shape_data)))
  result.write("\n")
  for row in shape_img:
      result.write('\t'.join(map(str,row)))
      result.write("\n")
  result.write('\t'.join(map(str,color_header)))
  result.write("\n")
  result.write('\t'.join(map(str,color_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_header)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_img1)))
  result.write("\n")
  for row in color_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.close()
    
############################# Use VIS image mask for NIR image#########################
  # Find matching NIR image
  device, nirpath=pcv.get_nir(path,filename,device,args.debug)
  nir, path1, filename1=pcv.readimage(nirpath)
  nir2=cv2.imread(nirpath,-1)
  
  # Flip mask
  device, f_mask= pcv.flip(mask,"vertical",device,args.debug)
  
  # Reize mask
  device, nmask = pcv.resize(f_mask, 0.118069,0.118069, device, args.debug)
  
  # position, and crop mask
  device,newmask=pcv.crop_position_mask(nir,nmask,device,40,3,"top","right",args.debug)
  
  # Identify objects
  device, nir_objects,nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug)
  
  # Object combine kept objects
  device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug)

####################################### Analysis #############################################
  outfile1=False
  if args.writeimg==True:
    outfile1=args.outdir+"/"+filename1

  device,nhist_header, nhist_data,nir_imgs= pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256, device,False, args.debug, outfile1)
  device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1)
  
  coresult=open(args.coresult,"a")
  coresult.write('\t'.join(map(str,nhist_header)))
  coresult.write("\n")
  coresult.write('\t'.join(map(str,nhist_data)))
  coresult.write("\n")
  for row in nir_imgs:
    coresult.write('\t'.join(map(str,row)))
    coresult.write("\n")
    
  coresult.write('\t'.join(map(str,nshape_header)))
  coresult.write("\n")
  coresult.write('\t'.join(map(str,nshape_data)))
  coresult.write("\n")
  coresult.write('\t'.join(map(str,nir_shape)))
  coresult.write("\n")
  coresult.close()
Example #18
0
def test_plantcv_resize():
    img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
    device, resized_img = pcv.resize(img, 0.5, 0.5, device=0, debug=None)
    ix, iy, iz = np.shape(img)
    rx, ry, rz = np.shape(resized_img)
    assert ix > rx