Esempio n. 1
0
def main():
  # Get options
  args = options()
  
  # Read image (converting fmax and track to 8 bit just to create a mask, use 16-bit for all the math)
  mask, path, filename = pcv.readimage(args.fmax)
  #mask = cv2.imread(args.fmax)
  track = cv2.imread(args.track)
  
  mask1, mask2, mask3= cv2.split(mask)
  
  # Pipeline step
  device = 0
  
  # Mask pesky track autofluor
  device, track1= pcv.rgb2gray_hsv(track, 'v', device, args.debug)
  device, track_thresh = pcv.binary_threshold(track1, 0, 255, 'light', device, args.debug)
  device, track_inv=pcv.invert(track_thresh, device, args.debug)
  device, track_masked = pcv.apply_mask(mask1, track_inv, 'black', device, args.debug)
  
  # Threshold the Saturation image
  device, fmax_thresh = pcv.binary_threshold(track_masked, 20, 255, 'light', device, args.debug)
  
  # Median Filter
  device, s_mblur = pcv.median_blur(fmax_thresh, 0, device, args.debug)
  device, s_cnt = pcv.median_blur(fmax_thresh, 0, device, args.debug)
  
  # Fill small objects
  device, s_fill = pcv.fill(s_mblur, s_cnt, 5, device, args.debug)
  device, sfill_cnt = pcv.fill(s_mblur, s_cnt, 5, device, args.debug)
  
  # Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(mask, sfill_cnt, device, args.debug)
  
  # Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(mask,'circle', device, None, 'default', args.debug,True, 0,0,-100,-100)
  
  # Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(mask,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
  
  # Object combine kept objects
  device, obj, masked = pcv.object_composition(mask, roi_objects, hierarchy3, device, args.debug)
  
################ Analysis ################  
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(mask, args.fmax, obj, masked, device,args.debug, args.outdir+'/'+filename)
  
  # Fluorescence Measurement (read in 16-bit images)
  fdark=cv2.imread(args.fdark, -1)
  fmin=cv2.imread(args.fmin, -1)
  fmax=cv2.imread(args.fmax, -1)
  
  device, fvfm_header, fvfm_data=pcv.fluor_fvfm(fdark,fmin,fmax,kept_mask, device, args.outdir+'/'+filename, 1000, args.debug)

  # Output shape and color data
  pcv.print_results(args.fmax, shape_header, shape_data)
  pcv.print_results(args.fmax, fvfm_header, fvfm_data)
Esempio n. 2
0
def test_plantcv_roi_objects():
    img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
    roi_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI))
    roi_contour = roi_npz['arr_0']
    roi_hierarchy = roi_npz['arr_1']
    contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS))
    object_contours = contours_npz['arr_0']
    object_hierarchy = contours_npz['arr_1']
    device, kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_type="partial",
                                                                        roi_contour=roi_contour,
                                                                        roi_hierarchy=roi_hierarchy,
                                                                        object_contour=object_contours,
                                                                        obj_hierarchy=object_hierarchy,
                                                                        device=0, debug=None)
    # Assert that the contours were filtered as expected
    assert len(kept_contours) == 1046
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)

    # Fill small objects
    #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark',
                                                  device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                    args.debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, args.debug)
    img2 = np.copy(img)
    device, masked2 = pcv.apply_mask(img2, ab_cnt3, 'white', device,
                                     args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark',
                                                   device, args.debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255,
                                                   'light', device, args.debug)
    device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', args.debug,
                                                 True, 550, 10, -590, -940)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############## Landmarks    ################

    device, points = pcv.acute_vertex(obj, 20, 50, 20, img, device, args.debug)
    boundary_line = 900
    # Use acute fxn to estimate tips
    device, points_r, centroid_r, bline_r = pcv.scale_features(
        obj, mask, points, boundary_line, device, args.debug)
    # Get number of points
    tips = len(points_r)
    # Use turgor_proxy fxn to get distances
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy(
        points_r, centroid_r, bline_r, device, args.debug)
    # Get pseudomarkers along the y-axis
    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, img, device, args.debug)
    # Re-scale the points
    device, left_r, left_cr, left_br = pcv.scale_features(
        obj, mask, left, boundary_line, device, args.debug)
    device, right_r, right_cr, right_br = pcv.scale_features(
        obj, mask, right, boundary_line, device, args.debug)
    device, center_hr, center_hcr, center_hbr = pcv.scale_features(
        obj, mask, center_h, boundary_line, device, args.debug)

    # Get pseudomarkers along the x-axis
    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, img, device, args.debug)

    # Re-scale the points
    device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top,
                                                       boundary_line, device,
                                                       args.debug)
    device, bottom_r, bottom_cr, bottom_br = pcv.scale_features(
        obj, mask, bottom, boundary_line, device, args.debug)
    device, center_vr, center_vcr, center_vbr = pcv.scale_features(
        obj, mask, center_v, boundary_line, device, args.debug)

    ## Need to convert the points into a list of tuples format to match the scaled points
    points = points.reshape(len(points), 2)
    points = points.tolist()
    temp_out = []
    for p in points:
        p = tuple(p)
        temp_out.append(p)
    points = temp_out
    left = left.reshape(20, 2)
    left = left.tolist()
    temp_out = []
    for l in left:
        l = tuple(l)
        temp_out.append(l)
    left = temp_out
    right = right.reshape(20, 2)
    right = right.tolist()
    temp_out = []
    for r in right:
        r = tuple(r)
        temp_out.append(r)
    right = temp_out
    center_h = center_h.reshape(20, 2)
    center_h = center_h.tolist()
    temp_out = []
    for ch in center_h:
        ch = tuple(ch)
        temp_out.append(ch)
    center_h = temp_out
    ## Need to convert the points into a list of tuples format to match the scaled points
    top = top.reshape(20, 2)
    top = top.tolist()
    temp_out = []
    for t in top:
        t = tuple(t)
        temp_out.append(t)
    top = temp_out
    bottom = bottom.reshape(20, 2)
    bottom = bottom.tolist()
    temp_out = []
    for b in bottom:
        b = tuple(b)
        temp_out.append(b)
    bottom = temp_out
    center_v = center_v.reshape(20, 2)
    center_v = center_v.tolist()
    temp_out = []
    for cvr in center_v:
        cvr = tuple(cvr)
        temp_out.append(cvr)
    center_v = temp_out

    #Store Landmark Data
    landmark_header = ('HEADER_LANDMARK', 'tip_points', 'tip_points_r',
                       'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c',
                       'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b',
                       'hori_ave_b', 'euc_ave_b', 'ang_ave_b', 'left_lmk',
                       'right_lmk', 'center_h_lmk', 'left_lmk_r',
                       'right_lmk_r', 'center_h_lmk_r', 'top_lmk',
                       'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
                       'bottom_lmk_r', 'center_v_lmk_r')

    landmark_data = ('LANDMARK_DATA', points, points_r, centroid_r, bline_r,
                     tips, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c,
                     vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, left, right,
                     center_h, left_r, right_r, center_hr, top, bottom,
                     center_v, top_r, bottom_r, center_vr)

    ############## VIS Analysis ################

    outfile = False
    #if args.writeimg==True:
    #outfile=args.outdir+"/"+filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 900, device, args.debug, outfile)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300,
        outfile)

    # Output shape and color data

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_header)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_img1)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    result.close()
def get_feature(img):
    print("step one")
    """
    Step one: Background forground substraction 
    """
    # Get options
    args = options()
    debug = args.debug
    # Read image
    filename = args.result
    # img, path, filename = pcv.readimage(args.image)
    # Pipeline step
    device = 0
    device, resize_img = pcv.resize(img, 0.4, 0.4, device, debug)
    # Classify the pixels as plant or background
    device, mask_img = pcv.naive_bayes_classifier(
        resize_img,
        pdf_file=
        "/home/matthijs/PycharmProjects/SMR1/src/vision/ML_background/Trained_models/model_3/naive_bayes_pdfs.txt",
        device=0,
        debug='print')

    # Median Filter
    device, blur = pcv.median_blur(mask_img.get('plant'), 5, device, debug)
    print("step two")
    """
    Step one: Identifiy the objects, extract and filter the objects
    """

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         blur,
                                                         device,
                                                         debug=None)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(resize_img,
                                                 'rectangle',
                                                 device,
                                                 roi=True,
                                                 roi_input='default',
                                                 debug=True,
                                                 adjust=True,
                                                 x_adj=50,
                                                 y_adj=10,
                                                 w_adj=-100,
                                                 h_adj=0)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img, 'cutto', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)
    # print(roi_objects[0])
    # cv2.drawContours(resize_img, [roi_objects[0]], 0, (0, 255, 0), 3)
    # cv2.imshow("img",resize_img)
    # cv2.waitKey(0)
    area_oud = 0
    i = 0
    index = 0
    object_list = []
    # a = np.array([[hierarchy3[0][0]]])
    hierarchy = []
    for cnt in roi_objects:
        area = cv2.contourArea(cnt)
        M = cv2.moments(cnt)
        if M["m10"] or M["m01"]:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # check if the location of the contour is between the constrains
            if cX > 200 and cX < 500 and cY > 25 and cY < 400:
                # cv2.circle(resize_img, (cX, cY), 5, (255, 0, 255), thickness=1, lineType=1, shift=0)
                # check if the size of the contour is bigger than 250
                if area > 450:
                    obj = np.vstack(roi_objects)
                    object_list.append(roi_objects[i])
                    hierarchy.append(hierarchy3[0][i])
                    print(i)
        i = i + 1
    a = np.array([hierarchy])
    # a = [[[-1,-1,-1,-1][-1,-1,-1,-1][-1,-1,-1,-1]]]
    # Object combine kept objects
    # device, obj, mask_2 = pcv.object_composition(resize_img, object_list, a, device, debug)

    mask_contours = np.zeros(resize_img.shape, np.uint8)
    cv2.drawContours(mask_contours, object_list, -1, (255, 255, 255), -1)
    gray_image = cv2.cvtColor(mask_contours, cv2.COLOR_BGR2GRAY)
    ret, mask_contours = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         mask_contours,
                                                         device,
                                                         debug=None)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img,
        'cutto',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=None)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(resize_img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=None)
    ############### Analysis ################
    masked = mask.copy()

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    print("step three")
    """
    Step three: Calculate all the features
    """
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        resize_img, args.image, obj, mask, device, debug, filename="/file")
    print(shape_img)
    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        resize_img, args.image, obj, mask, 1680, device)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        resize_img, args.image, kept_mask, 256, device, debug, 'all', 'v',
        'img', 300)
    maks_watershed = mask.copy()
    kernel = np.zeros((5, 5), dtype=np.uint8)
    device, mask_watershed, = pcv.erode(maks_watershed, 5, 1, device, debug)

    device, watershed_header, watershed_data, analysis_images = pcv.watershed_segmentation(
        device, resize_img, mask, 50, './examples', debug)
    device, list_of_acute_points = pcv.acute_vertex(obj, 30, 60, 10,
                                                    resize_img, device, debug)

    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(
        obj, mask, list_of_acute_points, 225, device, debug)

    # Identify acute vertices (tip points) of an object
    # Results in set of point values that may indicate tip points
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.landmark_reference_pt_dist(
        points_rescaled, centroid_rescaled, bottomline_rescaled, device, debug)

    landmark_header = [
        'HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r',
        'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c',
        'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b',
        'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r',
        'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
        'bottom_lmk_r', 'center_v_lmk_r'
    ]
    landmark_data = [
        'LANDMARK_DATA', 0, 0, 0, 0,
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0
    ]
    shape_data_train = list(shape_data)
    shape_data_train.pop(0)
    shape_data_train.pop(10)
    watershed_data_train = list(watershed_data)
    watershed_data_train.pop(0)
    landmark_data_train = [
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b
    ]
    X = shape_data_train + watershed_data_train + landmark_data_train
    print("len X", len(X))
    print(X)
    # Write shape and color data to results fil
    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_header)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_data)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()
    print("done")
    print(shape_img)
    return X, shape_img, masked
Esempio n. 5
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    # brass_mask = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    device, mask = pcv.naive_bayes_classifier(img, "naive_bayes.pdf.txt",
                                              device, args.debug)

    mask1 = np.uint8(mask)

    mask_copy = np.copy(mask1)

    # Fill small objects
    device, soil_fill = pcv.fill(mask1, mask_copy, 1500, device, args.debug)

    # Median Filter
    device, soil_mblur = pcv.median_blur(soil_fill, 11, device, args.debug)
    device, soil_cnt = pcv.median_blur(soil_fill, 11, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(img, soil_cnt, 'white', device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, soil_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'circle', device, None,
                                                 'default', args.debug, True,
                                                 0, 50, -1500, -1500)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    # ############# Analysis ################

    # output mask
    device, maskpath, mask_images = pcv.output_mask(device, img, mask,
                                                    filename, args.outdir,
                                                    True, args.debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
    # output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300)

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in mask_images:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    result.close()
Esempio n. 6
0
def main():
    # Get options
    args = options()

    # Read image (converting fmax and track to 8 bit just to create a mask, use 16-bit for all the math)
    mask, path, filename = pcv.readimage(args.fmax)
    #mask = cv2.imread(args.fmax)
    track = cv2.imread(args.track)

    mask1, mask2, mask3 = cv2.split(mask)

    # Pipeline step
    device = 0

    # Mask pesky track autofluor
    device, track1 = pcv.rgb2gray_hsv(track, 'v', device, args.debug)
    device, track_thresh = pcv.binary_threshold(track1, 0, 255, 'light',
                                                device, args.debug)
    device, track_inv = pcv.invert(track_thresh, device, args.debug)
    device, track_masked = pcv.apply_mask(mask1, track_inv, 'black', device,
                                          args.debug)

    # Threshold the Saturation image
    device, fmax_thresh = pcv.binary_threshold(track_masked, 20, 255, 'light',
                                               device, args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(fmax_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(fmax_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 110, device, args.debug)
    device, sfill_cnt = pcv.fill(s_mblur, s_cnt, 110, device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        mask, sfill_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(mask, 'circle', device, None,
                                                 'default', args.debug, True,
                                                 0, 0, -50, -50)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        mask, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, args.debug)

    # Object combine kept objects
    device, obj, masked = pcv.object_composition(mask, roi_objects, hierarchy3,
                                                 device, args.debug)

    ################ Analysis ################

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        mask, args.fmax, obj, masked, device, args.debug,
        args.outdir + '/' + filename)

    # Fluorescence Measurement (read in 16-bit images)
    fdark = cv2.imread(args.fdark, -1)
    fmin = cv2.imread(args.fmin, -1)
    fmax = cv2.imread(args.fmax, -1)

    device, fvfm_header, fvfm_data = pcv.fluor_fvfm(
        fdark, fmin, fmax, kept_mask, device, 1000, args.debug,
        args.outdir + '/' + filename)

    # Output shape and color data
    pcv.print_results(args.fmax, shape_header, shape_data)
    pcv.print_results(args.fmax, fvfm_header, fvfm_data)
def main():
    # Parse command-line options
    args = options()

    device = 0

    # Open output file
    out = open(args.outfile, "w")

    # Open the image file
    img, path, fname = pcv.readimage(filename=args.image, debug=args.debug)
    # Classify healthy and unhealthy plant pixels
    device, masks = pcv.naive_bayes_classifier(img=img,
                                               pdf_file=args.pdfs,
                                               device=device)

    # Use the identified blue mesh area to build a mask for the pot area
    # First errode the blue mesh region to remove background
    device, mesh_errode = pcv.erode(img=masks["Background_Blue"],
                                    kernel=9,
                                    i=3,
                                    device=device,
                                    debug=args.debug)
    # Define a region of interest for blue mesh contours
    device, pot_roi, pot_hierarchy = pcv.define_roi(img=img,
                                                    shape='rectangle',
                                                    device=device,
                                                    roi=None,
                                                    roi_input='default',
                                                    debug=args.debug,
                                                    adjust=True,
                                                    x_adj=0,
                                                    y_adj=500,
                                                    w_adj=0,
                                                    h_adj=-650)
    # Find blue mesh contours
    device, mesh_objects, mesh_hierarchy = pcv.find_objects(img=img,
                                                            mask=mesh_errode,
                                                            device=device,
                                                            debug=args.debug)
    # Keep blue mesh contours in the region of interest
    device, kept_mesh_objs, kept_mesh_hierarchy, kept_mask_mesh, _ = pcv.roi_objects(
        img=img,
        roi_type='partial',
        roi_contour=pot_roi,
        roi_hierarchy=pot_hierarchy,
        object_contour=mesh_objects,
        obj_hierarchy=mesh_hierarchy,
        device=device,
        debug=args.debug)
    # Flatten the blue mesh contours into a single object
    device, mesh_flattened, mesh_mask = pcv.object_composition(
        img=img,
        contours=kept_mesh_objs,
        hierarchy=kept_mesh_hierarchy,
        device=device,
        debug=args.debug)
    # Initialize a pot mask
    pot_mask = np.zeros(np.shape(masks["Background_Blue"]), dtype=np.uint8)
    # Find the minimum bounding rectangle for the blue mesh region
    rect = cv2.minAreaRect(mesh_flattened)
    # Create a contour for the minimum bounding box
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    # Create a mask from the bounding box contour
    cv2.drawContours(pot_mask, [box], 0, (255), -1)
    # If the bounding box area is too small then the plant has likely occluded too much of the pot for us to use this
    # as a marker for the pot area
    if np.sum(pot_mask) / 255 < 2900000:
        print(np.sum(pot_mask) / 255)
        # Create a new pot mask
        pot_mask = np.zeros(np.shape(masks["Background_Blue"]), dtype=np.uint8)
        # Set the mask area to the ROI area
        box = np.array([[0, 500], [0, 2806], [2304, 2806], [2304, 500]])
        cv2.drawContours(pot_mask, [box], 0, (255), -1)
    # Dialate the blue mesh area to include the ridge of the pot
    device, pot_mask_dilated = pcv.dilate(img=pot_mask,
                                          kernel=3,
                                          i=60,
                                          device=device,
                                          debug=args.debug)
    # Mask the healthy mask
    device, healthy_masked = pcv.apply_mask(img=cv2.merge(
        [masks["Healthy"], masks["Healthy"], masks["Healthy"]]),
                                            mask=pot_mask_dilated,
                                            mask_color="black",
                                            device=device,
                                            debug=args.debug)
    # Mask the unhealthy mask
    device, unhealthy_masked = pcv.apply_mask(img=cv2.merge(
        [masks["Unhealthy"], masks["Unhealthy"], masks["Unhealthy"]]),
                                              mask=pot_mask_dilated,
                                              mask_color="black",
                                              device=device,
                                              debug=args.debug)
    # Convert the masks back to binary
    healthy_masked, _, _ = cv2.split(healthy_masked)
    unhealthy_masked, _, _ = cv2.split(unhealthy_masked)

    # Fill small objects
    device, fill_image_healthy = pcv.fill(img=np.copy(healthy_masked),
                                          mask=np.copy(healthy_masked),
                                          size=300,
                                          device=device,
                                          debug=args.debug)
    device, fill_image_unhealthy = pcv.fill(img=np.copy(unhealthy_masked),
                                            mask=np.copy(unhealthy_masked),
                                            size=1000,
                                            device=device,
                                            debug=args.debug)
    # Define a region of interest
    device, roi1, roi_hierarchy = pcv.define_roi(img=img,
                                                 shape='rectangle',
                                                 device=device,
                                                 roi=None,
                                                 roi_input='default',
                                                 debug=args.debug,
                                                 adjust=True,
                                                 x_adj=450,
                                                 y_adj=1000,
                                                 w_adj=-400,
                                                 h_adj=-1000)
    # Filter objects that overlap the ROI
    device, id_objects, obj_hierarchy_healthy = pcv.find_objects(
        img=img, mask=fill_image_healthy, device=device, debug=args.debug)
    device, _, _, kept_mask_healthy, _ = pcv.roi_objects(
        img=img,
        roi_type='partial',
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy_healthy,
        device=device,
        debug=args.debug)
    device, id_objects, obj_hierarchy_unhealthy = pcv.find_objects(
        img=img, mask=fill_image_unhealthy, device=device, debug=args.debug)
    device, _, _, kept_mask_unhealthy, _ = pcv.roi_objects(
        img=img,
        roi_type='partial',
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy_unhealthy,
        device=device,
        debug=args.debug)
    # Combine the healthy and unhealthy mask
    device, mask = pcv.logical_or(img1=kept_mask_healthy,
                                  img2=kept_mask_unhealthy,
                                  device=device,
                                  debug=args.debug)

    # Output a healthy/unhealthy image
    classified_img = cv2.merge([
        np.zeros(np.shape(mask), dtype=np.uint8), kept_mask_healthy,
        kept_mask_unhealthy
    ])
    pcv.print_image(img=classified_img,
                    filename=os.path.join(
                        args.outdir,
                        os.path.basename(args.image)[:-4] + ".classified.png"))

    # Output a healthy/unhealthy image overlaid on the original image
    overlayed = cv2.addWeighted(src1=np.copy(classified_img),
                                alpha=0.5,
                                src2=np.copy(img),
                                beta=0.5,
                                gamma=0)
    pcv.print_image(img=overlayed,
                    filename=os.path.join(
                        args.outdir,
                        os.path.basename(args.image)[:-4] + ".overlaid.png"))

    # Extract hue values from the image
    device, h = pcv.rgb2gray_hsv(img=img,
                                 channel="h",
                                 device=device,
                                 debug=args.debug)

    # Extract the plant hue values
    plant_hues = h[np.where(mask == 255)]

    # Initialize hue histogram
    hue_hist = {}
    for i in range(0, 180):
        hue_hist[i] = 0

    # Store all hue values
    hue_values = []

    # Populate histogram
    total_px = len(plant_hues)
    for hue in plant_hues:
        hue_hist[hue] += 1
        hue_values.append(hue)

    # Parse the filename
    genotype, treatment, replicate, timepoint = os.path.basename(
        args.image)[:-4].split("_")
    replicate = replicate.replace("#", "")
    if timepoint[-3:] == "dbi":
        timepoint = -1
    else:
        timepoint = timepoint.replace("dpi", "")

    # Output results
    for i in range(0, 180):
        out.write("\t".join(
            map(str, [
                genotype, treatment, timepoint, replicate, total_px, i,
                hue_hist[i]
            ])) + "\n")
    out.close()

    # Calculate basic statistics
    healthy_sum = int(np.sum(kept_mask_healthy))
    unhealthy_sum = int(np.sum(kept_mask_unhealthy))
    healthy_total_ratio = healthy_sum / float(healthy_sum + unhealthy_sum)
    unhealthy_total_ratio = unhealthy_sum / float(healthy_sum + unhealthy_sum)
    stats = open(args.outfile[:-4] + ".stats.txt", "w")
    stats.write("%s, %f, %f, %f, %f" %
                (os.path.basename(args.image), healthy_sum, unhealthy_sum,
                 healthy_total_ratio, unhealthy_total_ratio) + '\n')
    stats.close()

    # Fit a 3-component Gaussian Mixture Model
    gmm = mixture.GaussianMixture(n_components=3,
                                  covariance_type="full",
                                  tol=0.001)
    gmm.fit(np.expand_dims(hue_values, 1))
    gmm3 = open(args.outfile[:-4] + ".gmm3.txt", "w")
    gmm3.write("%s, %f, %f, %f, %f, %f, %f, %f, %f, %f" %
               (os.path.basename(args.image), gmm.means_.ravel()[0],
                gmm.means_.ravel()[1], gmm.means_.ravel()[2],
                np.sqrt(gmm.covariances_.ravel()[0]),
                np.sqrt(gmm.covariances_.ravel()[1]),
                np.sqrt(gmm.covariances_.ravel()[2]), gmm.weights_.ravel()[0],
                gmm.weights_.ravel()[1], gmm.weights_.ravel()[2]) + '\n')
    gmm3.close()

    # Fit a 2-component Gaussian Mixture Model
    gmm = mixture.GaussianMixture(n_components=2,
                                  covariance_type="full",
                                  tol=0.001)
    gmm.fit(np.expand_dims(hue_values, 1))
    gmm2 = open(args.outfile[:-4] + ".gmm2.txt", "w")
    gmm2.write("%s, %f, %f, %f, %f, %f, %f" %
               (os.path.basename(args.image), gmm.means_.ravel()[0],
                gmm.means_.ravel()[1], np.sqrt(gmm.covariances_.ravel()[0]),
                np.sqrt(gmm.covariances_.ravel()[1]), gmm.weights_.ravel()[0],
                gmm.weights_.ravel()[1]) + '\n')
    gmm2.close()

    # Fit a 1-component Gaussian Mixture Model
    gmm = mixture.GaussianMixture(n_components=1,
                                  covariance_type="full",
                                  tol=0.001)
    gmm.fit(np.expand_dims(hue_values, 1))
    gmm1 = open(args.outfile[:-4] + ".gmm1.txt", "w")
    gmm1.write(
        "%s, %f, %f, %f" %
        (os.path.basename(args.image), gmm.means_.ravel()[0],
         np.sqrt(gmm.covariances_.ravel()[0]), gmm.weights_.ravel()[0]) + '\n')
    gmm1.close()
Esempio n. 8
0
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
    
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug)
  
  # Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
  device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)
  
  # Fill small objects
  #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
  
  # Convert RGB to LAB and extract the Blue channel
  device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
  
  # Threshold the blue image
  device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug)
  device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug)
  
  # Fill small objects
  #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images
  device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)
  
  # Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
  device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug)
  device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images (OR)
  device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  
  # Fill small noise
  device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug)
  
  # Dilate to join small objects with larger ones
  device, ab_cnt1=pcv.dilate(ab_fill1, 3, 2, device, args.debug)
  device, ab_cnt2=pcv.dilate(ab_fill1, 3, 2, device, args.debug)
  
  # Fill dilated image mask
  device, ab_cnt3=pcv.fill(ab_cnt2,ab_cnt1,150,device,args.debug)
  device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug)
  device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, args.debug)
  device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, args.debug)
  device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug)
  
  # Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug)
  
  # Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 525, 0,-490,-150)
  
  # Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
  
  # Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
  
  ############## VIS Analysis ################
  
  outfile=False
  if args.writeimg==True:
    outfile=args.outdir+"/"+filename
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile)
  
  # Shape properties relative to user boundary line (optional)
  device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 325, device,args.debug,outfile)
  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile)
  
  # Output shape and color data

  result=open(args.result,"a")
  result.write('\t'.join(map(str,shape_header)))
  result.write("\n")
  result.write('\t'.join(map(str,shape_data)))
  result.write("\n")
  for row in shape_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.write('\t'.join(map(str,color_header)))
  result.write("\n")
  result.write('\t'.join(map(str,color_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_header)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_img1)))
  result.write("\n")
  for row in color_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.close()
Esempio n. 9
0
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
  brass_mask = cv2.imread(args.roi)
  
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug)
  
   #Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
  
   #Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, s_mblur, 'white', device, args.debug)
  
#   Convert RGB to LAB and extract the Green-Magenta 
  device, soil_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
#  
#   Threshold the green-magenta 
  device, soila_thresh = pcv.binary_threshold(soil_a, 133, 255, 'light', device, args.debug)
  device, soila_cnt = pcv.binary_threshold(soil_a, 133, 255, 'light', device, args.debug)

#
#   Fill small objects
  device, soil_fill = pcv.fill(soila_thresh, soila_cnt, 200, device, args.debug)
#
#   Median Filter
  device, soil_mblur = pcv.median_blur(soil_fill, 13, device, args.debug)
  device, soil_cnt = pcv.median_blur(soil_fill, 13, device, args.debug)
#  
#   Apply mask (for vis images, mask_color=white)
  device, masked2 = pcv.apply_mask(soil_mblur, soil_cnt, 'white', device, args.debug)
#  
#   Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)
#
#   Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,True, 400,400,-400,-400)
#  
#   Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
#  
#   Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
#  
############## Analysis ################  
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename)
   
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename)
  
  # Output shape and color data
  pcv.print_results(args.image, shape_header, shape_data)
  pcv.print_results(args.image, color_header, color_data)
Esempio n. 10
0
def main():
    # Get options 1
    args = options()

    # lee imagen 2
    img, path, filename = pcv.readimage(args.image)
   # cv2.imshow("imagen",img)
    # pasos del pipeline 3
    device = 0
    debug=args.debug 

    # Convert RGB to HSV and extract the Saturation channel 4
    #convertir RGB a HSV y extraer el canal de saturacion
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)
   # cv2.imshow("rgb a hsv y extraer saturacion 4",s)
     # Threshold the Saturation image 5
     #sacar imagen binaria del canal de saturacion
    device, s_thresh = pcv.binary_threshold(s, 85, 255, 'light', device, debug)
   # cv2.imshow("imagen binaria de hsv",s_thresh)
    # Median Filter 6
    #sacar un filtro median_blur
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)
   # cv2.imshow("s_mblur",s_mblur)
   # cv2.imshow("s_cnt",s_cnt)
    # Convert RGB to LAB and extract the Blue channel 7
    #convertir RGB(imagen original) a LAB Y extraer el canal azul
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)
   # cv2.imshow("convertir RGB a LAB",b)
    # Threshold the blue image 8
    #sacar imagen binaria de LAB  imagen blue
    device, b_thresh = pcv.binary_threshold(b, 160, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 160, 255, 'light', device, debug)
   # cv2.imshow("imagen binaria de LAB",b_thresh)
   # cv2.imshow("imagen binaria",b_cnt)
    # Fill small objects
    #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug)
    
     # Join the thresholded saturation and blue-yellow images 9
    #
    device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug)
   # cv2.imshow("suma logica s_mblur and b_cnt",bs)
     # Apply Mask (for vis images, mask_color=white) 10
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)
   # cv2.imshow("aplicar mascara masked",masked)
    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels 11
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)
   # cv2.imshow("canal verde-magenta",masked_a)
   # cv2.imshow("canal azul-amarillo",masked_b)  
    # Threshold the green-magenta and blue images 12
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', device, debug)
    device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)
   # cv2.imshow("threshold de canal verde-magenta dark",maskeda_thresh)
   # cv2.imshow("threshold de canal verde-magenta light",maskeda_thresh1)
   # cv2.imshow("threshold de canal azul-amarillo",maskedb_thresh)
    # Join the thresholded saturation and blue-yellow images (OR) 13
    device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug)
   # cv2.imshow("suma logica or 1",ab1)
   # cv2.imshow("suma logica or 2 ab",ab)
   # cv2.imshow("suma logica or 3 ab_cnt",ab_cnt)
   
    # Fill small objects 14
    device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug)
   # cv2.imshow("ab_fill",ab_fill)
    # Apply mask (for vis images, mask_color=white) 15
    device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug)
   # cv2.imshow("aplicar maskara2 white",masked2)
   
    ####################entendible hasta aqui######################
    # Identify objects 16 solo print Se utiliza para identificar objetos (material vegetal) en una imagen.
    #imprime la imagen si uso print o no si uso plot no almacena la imagen pero en pritn si la aguarda
    #usa b_thresh y observa
    device,id_objects,obj_hierarchy = pcv.find_objects(masked2,ab_fill, device, debug)
  
    # Define ROI 17 solo print encierra el objeto detectato pero aun es manual aun no automatico
    device, roi1, roi_hierarchy= pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 92, 80, -127, -343)
    
    # Decide which objects to keep 18
    device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device, debug)
    
    # Object combine kept objects 19
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)


    ############### Analysis ################

    outfile=False
    if args.writeimg==True:
        outfile=args.outdir+"/"+filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img,'image', obj, mask, device,args.outdir + '/' + filename)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, args.image, obj, mask, 1680, device, debug, args.outdir + '/' + filename)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(img, args.image, kept_mask, 256, device, debug, 'all', 'v', 'img', 300, args.outdir + '/' + filename)

     #Write shape and color data to results file
    result=open(args.result,"a")
    result.write('\t'.join(map(str,shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str,shape_data)))
    result.write("\n")
    for row in shape_img:  
        result.write('\t'.join(map(str,row)))
        result.write("\n")
    result.write('\t'.join(map(str,color_header)))
    result.write("\n")
    result.write('\t'.join(map(str,color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str,row)))
        result.write("\n")
    result.close()
    cv2.waitKey()
    cv2.destroyAllWindows()
Esempio n. 11
0
    # Mask white-balanced image (here, based on blue channel)
	device, masked = pcv.apply_mask(corr_img, b_fill, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked, img_fill, device, args.debug)

    # Define region of interest
	device, roi1, roi_hierarchy = pcv.define_roi(masked, 'rectangle', device, None, 'default', debug, True, 
                                             1000, 10, -1000, -300)
	# Modify the 4 numbers in the parenthesis to draw a square that surrounds or overlaps the entire plant, 
	# but does not surround or overlap background.

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(corr_img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           args.debug)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)

    ############## Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, args.image, obj, mask, device, args.debug,
                                                                     outfile)

    # Shape properties relative to user boundary line (optional)
Esempio n. 12
0
def process_tv_images_core(vis_id,
                           vis_img,
                           nir_id,
                           nir_rgb,
                           nir_cv2,
                           brass_mask,
                           traits,
                           debug=None):
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device,
                                            debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light',
                                                device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device,
                                          debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark',
                                             device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light',
                                             device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white',
                                         device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark',
                                                device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light',
                                                device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device,
                                         debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device,
                                     debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device,
                                                 None, 'default', debug, True,
                                                 600, 450, -600, -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects,
                                               hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        vis_img, vis_id, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(
        vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])

    ############################# Use VIS image mask for NIR image#########################

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5,
                                             "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['tv_area'] = vis_traits['area']

    return [vis_traits, nir_traits]
Esempio n. 13
0
def process_sv_images_core(vis_id,
                           vis_img,
                           nir_id,
                           nir_rgb,
                           nir_cv2,
                           traits,
                           debug=None):
    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device,
                                            debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark',
                                                  device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                    debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark',
                                                   device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255,
                                                   'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device,
                                                   debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device,
                                                   debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur,
                                     masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', debug, True,
                                                 700, 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        vis_img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects,
                                               hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        vis_img, vis_id, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        vis_img, vis_id, obj, mask, 384, device, debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        vis_img, vis_id, mask, 256, device, debug, None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(1, len(boundary_header)):
        vis_traits[boundary_header[i]] = boundary_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])

    ############################# Use VIS image mask for NIR image#########################
    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device,
                               debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4,
                                             "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir_cv2, nir_id, nir_combinedmask, 256, device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir_cv2, nir_id, nir_combined, nir_combinedmask, device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['sv_area'].append(vis_traits['area'])
    traits['hull_area'].append(vis_traits['hull-area'])
    traits['solidity'].append(vis_traits['solidity'])
    traits['height'].append(vis_traits['height_above_bound'])
    traits['perimeter'].append(vis_traits['perimeter'])

    return [vis_traits, nir_traits]
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, "light", device, args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, "b", device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, "light", device, args.debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, "light", device, args.debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, "white", device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, "a", device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(masked, "b", device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, "dark", device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, "light", device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, args.debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, "white", device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, "a", device, args.debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, "b", device, args.debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, "dark", device, args.debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, "light", device, args.debug)
    device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(
        masked2, "rectangle", device, None, "default", args.debug, True, 500, 0, -600, -885
    )

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug
    )

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)

    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile
    )

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 845, device, args.debug, outfile
    )

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile
    )

    # Output shape and color data

    result = open(args.result, "a")
    result.write("\t".join(map(str, shape_header)))
    result.write("\n")
    result.write("\t".join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.write("\t".join(map(str, color_header)))
    result.write("\n")
    result.write("\t".join(map(str, color_data)))
    result.write("\n")
    result.write("\t".join(map(str, boundary_header)))
    result.write("\n")
    result.write("\t".join(map(str, boundary_data)))
    result.write("\n")
    result.write("\t".join(map(str, boundary_img1)))
    result.write("\n")
    for row in color_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.close()

    ############################# Use VIS image mask for NIR image#########################
    # Find matching NIR image
    device, nirpath = pcv.get_nir(path, filename, device, args.debug)
    nir, path1, filename1 = pcv.readimage(nirpath)
    nir2 = cv2.imread(nirpath, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, args.debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 65, 0, "top", "left", args.debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug)

    ####################################### Analysis #############################################
    outfile1 = False
    if args.writeimg == True:
        outfile1 = args.outdir + "/" + filename1

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1
    )
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1
    )

    coresult = open(args.coresult, "a")
    coresult.write("\t".join(map(str, nhist_header)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nhist_data)))
    coresult.write("\n")
    for row in nir_imgs:
        coresult.write("\t".join(map(str, row)))
        coresult.write("\n")

    coresult.write("\t".join(map(str, nshape_header)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nshape_data)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nir_shape)))
    coresult.write("\n")
    coresult.close()
Esempio n. 15
0
def main():
    # obtiene opciones de imagen
    args = options()
    #LINEA 22
    if args.debug:
        print("Debug mode turned on...")
    # lee la imagen el flags=0 indica que se espera una imagen a escala de grises
    img = cv2.imread(args.image, flags=0)
    # cv2.imshow("imagen original",img)
    # Get directory path and image name from command line arguments
    path, img_name = os.path.split(args.image)

    #LINEA 30
    # Read in image which is the pixelwise average of background images
    img_bkgrd = cv2.imread("background_average.jpg", flags=0)
    #cv2.imshow("ventana del fondo",img_bkgrd)
    # paso del procesamiento de imagenes
    device = 0
    ######hasta qui bien
    #linea 37
    # Restar la imagen de fondo de la imagen con la planta.
    device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device,
                                             args.debug)
    #cv2.imshow("imagen resta",bkg_sub_img)
    # Threshold the image of interest using the two-sided cv2.inRange function (keep what is between 50-190)
    bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 50, 190)
    if args.debug:
        cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img)
#hasta qui todo bien
#linea 46
# Filtrado de Laplace (identificar bordes basados ​​en la derivada 2)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    #cv2.imshow("imagen de filtrado",lp_img)
    if args.debug:
        pcv.plot_hist(lp_img, 'histograma_lp')

    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    #cv2.imshow("imagen de borde lapacian",lp_shrp_img)
    if args.debug:
        pcv.plot_hist(lp_shrp_img, 'histograma_lp_shrp')
#hasta aqui todo bien linea 58
# Sobel filtering-filtrado de sobel
# 1ª derivada filtrado sobel a lo largo del eje horizontal, núcleo = 1, sin escala)
    """    segun esta masl son siete,kito scale y me kedo con apertura k,chekar sobel en docs
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
   """
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, device, args.debug)
    #cv2.imshow("imagen sobel-eje horizontal",sbx_img)
    if args.debug:
        pcv.plot_hist(sbx_img, 'histograma_sbx')

    # Filtrado de la primera derivada sobel a lo largo del eje vertical, núcleo = 1, sin escala)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, device, args.debug)
    #cv2.imshow("imagen sobel-ejevertical",sby_img)
    if args.debug:
        pcv.plot_hist(sby_img, 'histograma_sby')

    # Combina los efectos de ambos filtros x e y mediante la suma de matrizes
    # Esto captura los bordes identificados dentro de cada plano y enfatiza los bordes encontrados en ambas imágenes
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    #cv2.imshow("imagen suma de sobel",sb_img)
    if args.debug:
        pcv.plot_hist(sb_img, 'histograma_sb_comb_img')
#hasta aqui todo bien linea 82
# usar filtro pasa bajo blur para suavizar la imagen de sobel
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    #cv2.imshow("imagen blur",mblur_img)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)
    #cv2.imshow("imagen blur-invertido",mblur_invert_img)
    # Combinar la imagen suavizada del sobel con la imagen afilada del laplaciano
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169
    #Combina las mejores características de ambos métodos como se describe en "Digital Image Processing" por González y Woods pág. 169
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img,
                                          device, args.debug)
    #cv2.imshow("imagen-combinacion-sobel-laplacian",mblur_img)
    if args.debug:
        pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img')

    # Realizar el umbral para generar una imagen binaria
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 125, 255, 'dark',
                                             device, args.debug)
    #cv2.imshow("imagen binaria de combinacion",tr_es_img)
    #hasta aqui todo bien linea 99
    # Prepare a few small kernels for morphological filtering
    #prepara nucleos pequeños para un filtrado moorfologico
    kern = np.zeros((3, 3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1, 1:3] = 1
    kern2 = np.copy(kern)
    kern2[1, 0:2] = 1
    kern3 = np.copy(kern)
    kern3[0:2, 1] = 1
    kern4 = np.copy(kern)
    kern4[1:3, 1] = 1

    # prepara un nucleo grande para la dilatacion
    kern[1, 0:3] = 1
    kern[0:3, 1] = 1
    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 1",e1_img)
    device, e2_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 2",e2_img)
    device, e3_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 3",e3_img)
    device, e4_img = pcv.erode(tr_es_img, 1, 1, device, args.debug)
    #cv2.imshow("erosion 4",e4_img)

    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    #cv2.imshow("c12",c12_img)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    #cv2.imshow("c123",c123_img)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)
    #cv2.imshow("c1234",c1234_img)

    # Bring the two object identification approaches together.
    # Using a logical OR combine object identified by background subtraction and the object identified by derivative filter.
    device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device,
                                      args.debug)
    #cv2.imshow("comb_img",comb_img)
    # Get masked image, Essentially identify pixels corresponding to plant and keep those.
    device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device,
                                        args.debug)
    #cv2.imshow("masked_erd",masked_erd)
    #cv2.imshow("imagen original chkar",img)
    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)
    # mask for the bottom of the image
    device, im2, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(
        img, (120, 184), (215, 252), device, args.debug, color='white')
    #cv2.imshow("im2",box1_img)
    # mask for the left side of the image
    device, im3, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(
        img, (1, 1), (85, 252), device, args.debug, color='white')
    #cv2.imshow("im3",box2_img)
    # mask for the right side of the image
    device, im4, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(
        img, (240, 1), (318, 252), device, args.debug, color='white')
    #cv2.imshow("im4",box3_img)
    # mask the edges
    device, im5, box4_img, rect_contour4, hierarchy4 = pcv.rectangle_mask(
        img, (1, 1), (318, 252), device, args.debug)
    #cv2.imshow("im5",box4_img)

    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device,
                                        args.debug)
    #cv2.imshow("combinacion logica or",bx1234_img)

    # invert this mask and then apply it the masked image.
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)
    # cv2.imshow("combinacion logica or invertida",inv_bx1234_img)
    device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img,
                                             'black', device, args.debug)
    # cv2.imshow("edge_masked_img",edge_masked_img)

    # assign the coordinates of an area of interest (rectangle around the area you expect the plant to be in)
    device, im6, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(
        img, (120, 75), (200, 184), device, args.debug)
    #cv2.imshow("im6",roi_img)
    # get the coordinates of the plant from the masked object
    plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,
                                                      cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_NONE)

    # Obtain the coordinates of the plant object which are partially within the area of interest
    device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi_contour, roi_hierarchy, plant_objects,
        plant_hierarchy, device, args.debug)

    # Apply the box mask to the image to ensure no background
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black',
                                        device, args.debug)
    #cv2.imshow("mascara final",masked_img)
    #/////////////////////////////////////////////////////////////
    #device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug)
    rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    #cv2.imshow("rgb",rgb)
    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device,
                                        args.debug)
    #cv2.imshow("mask",mask)
    mask3d = np.copy(mask)
    plant_objects_2, plant_hierarchy_2 = cv2.findContours(
        mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device,
                                          args.debug)

    # Get final masked image
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black',
                                        device, args.debug)
    #cv2.imshow("maskara final2",masked_img)
    ################### copia lo de arriba esta mal el tutorial
    # Obtain a 3 dimensional representation of this grayscale image (for pseudocoloring)
    #rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)

    # Generate a binary to send to the analysis function
    #device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug)

    # Make a copy of this mask for pseudocoloring
    #mask3d = np.copy(mask)

    # Extract coordinates of plant for pseudocoloring of plant
    #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)

    # Extract coordinates of plant for pseudocoloring of plant
    #plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    #device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)
    ####################################
    #######################
    ### Analysis ###
    # Perform signal analysis
    #################pruebas de que esta masl el tutorial""""""""""""""""
    #ols=type(args.image)
    #print ols
    ##############pruebas de que no agarro     device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name)

    #print(args.outdir+'/'+img_name)
    #print(args.debug)
    #al final si salio se agrego lo qyue esta debug= and filename=
    ##################################################### debug me marca True por ello puse pritn de mas
    #device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, rgb, mask, 256, device, debug='print', filename=False)
    device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(
        img,
        rgb,
        mask,
        256,
        device,
        debug=args.debug,
        filename=args.outdir + '/' + img_name)

    # Perform shape analysis
    device, shape_header, shape_data, ori_img = pcv.analyze_object(
        rgb,
        args.image,
        o,
        m,
        device,
        debug=args.debug,
        filename=args.outdir + '/' + img_name)

    # Print the results to STDOUT
    pcv.print_results(args.image, hist_header, hist_data)
    pcv.print_results(args.image, shape_header, shape_data)

    cv2.waitKey()
    cv2.destroyAllWdindows()
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    #roi = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 30, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)

    # Fill small objects
    #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 135, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 135, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_or(s_mblur, b_cnt, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark',
                                                  device, args.debug)
    device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light',
                                                   device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                 args.debug)
    device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, args.debug)

    # Fill small objects
    device, ab_fill = pcv.fill(ab, ab_cnt, 50, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', args.debug,
                                                 True, 550, 0, -500, -907)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############### Analysis ################

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug,
        args.outdir + '/' + filename)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 830, device, args.debug,
        args.outdir + '/' + filename)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, norm_slice = pcv.analyze_color(
        img, args.image, kept_mask, 256, device, args.debug, 'all', 'rgb', 'v',
        'img', 300, args.outdir + '/' + filename)

    # Output shape and color data
    pcv.print_results(args.image, shape_header, shape_data)
    pcv.print_results(args.image, color_header, color_data)
    pcv.print_results(args.image, boundary_header, boundary_data)
def main():
    # Sets variables from input arguments
    args = options()

    device = 0  # Workflow step counter
    debug = args.debug  # Option to display debug images to the notebook
    rgb_img = args.image  # Name of seed Image
    writeimg = args.writeimg
    outfile = str(args.result)
    outdir = str(args.outdir)

    # Reads in RGB image
    img, path, filename = pcv.readimage(rgb_img)
    if writeimg is True:
        pcv.print_image(img, outfile + "_original.jpg")

    # Converts RGB to HSV and extract the Saturation channel and inverts image
    device, img_gray_sat = pcv.rgb2gray_hsv(img, 's', device, debug)
    img_gray_sat = 255 - img_gray_sat

    # Corrects saturation image background brightness
    sat_img2 = 255 - correct_white_background(img_gray_sat)

    # Convert RGB to HSV and extract the Value channel
    device, img_gray_val = pcv.rgb2gray_hsv(img, 'v', device, debug)

    # Corrects value image background brightness
    val_img2 = 255 - correct_white_background(img_gray_val)

    # Convert RGB to HSV and extract the Hue channel
    device, img_hue = pcv.rgb2gray_hsv(img, 'h', device, debug)
    # Corrects Hue Image Based on standard
    mask = np.zeros(img.shape[:2], np.uint8)
    mask[1050: 1150, 3750: 3850] = 255
    huehist = cv2.calcHist([img_hue], [0], mask, [256], [0, 256])
    correction_factor = 155 - np.argmax(huehist)
    hue_channel = np.add(img_hue, correction_factor)
    if correction_factor > 0:
        hue_channel = np.where(hue_channel > 179, hue_channel - 180, hue_channel)
    elif correction_factor < 0:
        hue_channel = np.where(hue_channel < 0, 180 + hue_channel, hue_channel)

    # Thresholds the Saturation image
    device, sat_img_binary = pcv.binary_threshold(sat_img2, 35, 255, 'light', device, debug)

    # Threshold the Value image
    device, val_img_binary = pcv.binary_threshold(val_img2, 35, 255, 'light', device, debug)

    # Combines masks
    img_binary = np.where(sat_img_binary < 255, val_img_binary, sat_img_binary)

    # Fills in speckles smaller than 200 pixels
    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 200, device, debug)
    if writeimg is True:
        pcv.print_image(mask, outfile + "_mask.jpg")
        pcv.print_image(img_binary, outfile + "_binary.jpg")

    # Identifies objects using filled binary image as a mask
    device, id_objects, obj_hierarchy = pcv.find_objects(img, fill_image, device, debug)

    # Defines rectangular ROI
    device, roi, roi_hierarchy = \
        pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 300, 1000, -1250, -425)

    # Keeps only objects within or partially within ROI
    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = \
        pcv.roi_objects(img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device, debug)

    # Randomly colors the individual seeds
    img_copy = np.copy(img)
    for i in range(0, len(roi_objects)):
        rand_color = color_palette(1)
        cv2.drawContours(img_copy, roi_objects, i, rand_color[0], -1, lineType=8, hierarchy=roi_obj_hierarchy)
    if writeimg is True:
        pcv.print_image(img_copy, outfile + "_coloredseeds.jpg")

    # Gets the area of each seed, saved in shape_data
    shape_header = []
    table = []
    for i in range(0, len(roi_objects)):
        if roi_obj_hierarchy[0][i][3] == -1:  # Checks if shape is a parent contour

            # Object combine kept objects
            device, obj, mask2 = pcv.object_composition(img, [roi_objects[i]], np.array([[roi_obj_hierarchy[0][i]]]),
                                                        device, debug)
            if obj is not None:
                device, shape_header, shape_data, shape_img = \
                    pcv.analyze_object(img, rgb_img, obj, mask2, device, debug)
                if shape_header is not None:
                    shape_header.append('hue')
                    shape_header.append('saturation')
                if shape_data is not None:
                    darkval = float(np.sum(np.multiply(sat_img2, mask2))) / np.sum(mask2)
                    huehist = cv2.calcHist([hue_channel], [0], mask2, [256], [0, 256])
                    hueval = np.argmax(huehist)
                    shape_data.append(hueval)
                    shape_data.append(darkval)
                    table.append(shape_data)

    # Finds the area of the size marker in pixels and saves to "marker data"
    device, marker_header, marker_data, analysis_images =\
        pcv.report_size_marker_area(img, 'rectangle', device, debug, "detect", 3525, 850, -200, -1700, "black",
                                    "light", "h", 120)
    # shape_header.append("marker_area")

    # Saves seed and marker shape data results to file
    metadata = open(posixpath.join(outdir, outfile), 'r').read()
    os.remove(posixpath.join(outdir, outfile))

    for seed, row in enumerate(table):
        prefix = posixpath.join(outdir, outfile[0:-4])
        results = open(prefix + '_' + str(seed + 1) + '.txt', 'w')
        results.write(metadata)
        results.write('\t'.join(map(str, shape_header)) + '\n')
        # row.append(marker_data[1])
        results.write('\t'.join(map(str, row)) + '\n')
        results.write('\t'.join(map(str, marker_header)) + '\n')
        results.write('\t'.join(map(str, marker_data)) + '\n')
        results.close()
def process_tv_images(vis_img, nir_img, debug=False):
    """Process top-view images.

    Inputs:
    vis_img = An RGB image.
    nir_img = An NIR grayscale image.
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    :param vis_img: str
    :param nir_img: str
    :param debug: str
    :return:
    """
    # Read image
    img, path, filename = pcv.readimage(vis_img)
    brass_mask = cv2.imread('mask_brass_tv_z1_L1.png')

    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600,
                                                 -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug, None,
                                                                    'v', 'img', 300)

    print('\t'.join(map(str, shape_header)) + '\n')
    print('\t'.join(map(str, shape_data)) + '\n')
    for row in shape_img:
        print('\t'.join(map(str, row)) + '\n')
    print('\t'.join(map(str, color_header)) + '\n')
    print('\t'.join(map(str, color_data)) + '\n')
    for row in color_img:
        print('\t'.join(map(str, row)) + '\n')

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image
    nir, path1, filename1 = pcv.readimage(nir_img)
    nir2 = cv2.imread(nir_img, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask,
                                                                       device, debug)

    print('\t'.join(map(str,nhist_header)) + '\n')
    print('\t'.join(map(str,nhist_data)) + '\n')
    for row in nir_imgs:
      print('\t'.join(map(str,row)) + '\n')
    print('\t'.join(map(str,nshape_header)) + '\n')
    print('\t'.join(map(str,nshape_data)) + '\n')
    print('\t'.join(map(str,nir_shape)) + '\n')
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
  #roi = cv2.imread(args.roi)
  
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug)
  
  # Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
  device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
  
  # Fill small objects
  device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
  
  # Convert RGB to LAB and extract the Blue channel
  device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
  
  # Threshold the blue image
  device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
  device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
  
  # Fill small objects
  device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images
  device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)
  
  # Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
  device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark', device, args.debug)
  device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light', device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images (OR)
  device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  
  # Fill small objects
  device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug)
  
  # Apply mask (for vis images, mask_color=white)
  device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug)
  
  # Select area with black bars and find overlapping plant material
  device, roi1, roi_hierarchy1= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 0, 0,-1900,0)
  device, id_objects1,obj_hierarchy1 = pcv.find_objects(masked2, ab_fill, device, args.debug)
  device,roi_objects1, hierarchy1, kept_mask1, obj_area1 = pcv.roi_objects(masked2,'cutto',roi1,roi_hierarchy1,id_objects1,obj_hierarchy1,device, args.debug)
  device, masked3 = pcv.apply_mask(masked2, kept_mask1, 'white', device, args.debug)
  device, masked_a1 = pcv.rgb2gray_lab(masked3, 'a', device, args.debug)
  device, masked_b1 = pcv.rgb2gray_lab(masked3, 'b', device, args.debug)
  device, maskeda_thresh1 = pcv.binary_threshold(masked_a1, 122, 255, 'dark', device, args.debug)
  device, maskedb_thresh1 = pcv.binary_threshold(masked_b1, 170, 255, 'light', device, args.debug)
  device, ab1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug)
  device, ab_cnt1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug)
  device, ab_fill1 = pcv.fill(ab1, ab_cnt1, 300, device, args.debug)

  
  device, roi2, roi_hierarchy2= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 1900, 0,0,0)
  device, id_objects2,obj_hierarchy2 = pcv.find_objects(masked2, ab_fill, device, args.debug)
  device,roi_objects2, hierarchy2, kept_mask2, obj_area2 = pcv.roi_objects(masked2,'cutto',roi2,roi_hierarchy2,id_objects2,obj_hierarchy2,device, args.debug)
  device, masked4 = pcv.apply_mask(masked2, kept_mask2, 'white', device, args.debug)
  device, masked_a2 = pcv.rgb2gray_lab(masked4, 'a', device, args.debug)
  device, masked_b2 = pcv.rgb2gray_lab(masked4, 'b', device, args.debug)
  device, maskeda_thresh2 = pcv.binary_threshold(masked_a2, 122, 255, 'dark', device, args.debug)
  device, maskedb_thresh2 = pcv.binary_threshold(masked_b2, 170, 255, 'light', device, args.debug)
  device, ab2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug)
  device, ab_cnt2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug)
  device, ab_fill2 = pcv.fill(ab2, ab_cnt2, 200, device, args.debug)
  
  device, ab_cnt3 = pcv.logical_or(ab_fill1, ab_fill2, device, args.debug)
  device, masked3 = pcv.apply_mask(masked2, ab_cnt3, 'white', device, args.debug)
  
  # Identify objects
  device, id_objects3,obj_hierarchy3 = pcv.find_objects(masked2, ab_fill, device, args.debug)

  # Define ROI
  device, roi3, roi_hierarchy3= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 500, 0,-450,-530)
 
  # Decide which objects to keep and combine with objects overlapping with black bars
  device,roi_objects3, hierarchy3, kept_mask3, obj_area1 = pcv.roi_objects(img,'cutto',roi3,roi_hierarchy3,id_objects3,obj_hierarchy3,device, args.debug)
  device, kept_mask4_1 = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
  device, kept_cnt = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
  device, kept_mask4 = pcv.fill(kept_mask4_1, kept_cnt, 200, device, args.debug)
  device, masked5 = pcv.apply_mask(masked2, kept_mask4, 'white', device, args.debug)
  device, id_objects4,obj_hierarchy4 = pcv.find_objects(masked5, kept_mask4, device, args.debug)
  device, roi4, roi_hierarchy4= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,False, 0, 0,0,0)
  device,roi_objects4, hierarchy4, kept_mask4, obj_area = pcv.roi_objects(img,'partial',roi4,roi_hierarchy4,id_objects4,obj_hierarchy4,device, args.debug)

 # Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects4, hierarchy4, device, args.debug)
  
############## Analysis ################  
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename)
   
  # Shape properties relative to user boundary line (optional)
  device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 950, device,args.debug,args.outdir+'/'+filename)
  
  # Tiller Tool Test
  device, tillering_header, tillering_data, tillering_img= pcv.tiller_count(img, args.image,obj, mask, 965, device,args.debug,args.outdir+'/'+filename)

  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask4, 256, device, args.debug,'all','rgb','v',args.outdir+'/'+filename)
  
  # Output shape and color data
  pcv.print_results(args.image, shape_header, shape_data)
  pcv.print_results(args.image, color_header, color_data)
  pcv.print_results(args.image, boundary_header, boundary_data)
  pcv.print_results(args.image, tillering_header,tillering_data)
def process_sv_images(vis_img, nir_img, debug=None):
    """Process side-view images.

    Inputs:
    vis_img = An RGB image.
    nir_img = An NIR grayscale image.
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    :param vis_img: str
    :param nir_img: str
    :param debug: str
    :return:
    """
    # Read VIS image
    img, path, filename = pcv.readimage(vis_img)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700,
                                                 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_img, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, vis_img, obj, mask, 384, device,
                                                                              debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_img, mask, 256, device, debug,
                                                                    None, 'v', 'img', 300)

    # Output shape and color data
    print('\t'.join(map(str, shape_header)) + '\n')
    print('\t'.join(map(str, shape_data)) + '\n')
    for row in shape_img:
        print('\t'.join(map(str, row)) + '\n')
    print('\t'.join(map(str, color_header)) + '\n')
    print('\t'.join(map(str, color_data)) + '\n')
    print('\t'.join(map(str, boundary_header)) + '\n')
    print('\t'.join(map(str, boundary_data)) + '\n')
    print('\t'.join(map(str, boundary_img1)) + '\n')
    for row in color_img:
        print('\t'.join(map(str, row)) + '\n')

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image
    nir, path1, filename1 = pcv.readimage(nir_img)
    nir2 = cv2.imread(nir_img, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 30, 4, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir2, filename1, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir2, filename1, nir_combined, nir_combinedmask,
                                                                       device, debug)

    print('\t'.join(map(str, nhist_header)) + '\n')
    print('\t'.join(map(str, nhist_data)) + '\n')
    for row in nir_imgs:
        print('\t'.join(map(str, row)) + '\n')
    print('\t'.join(map(str, nshape_header)) + '\n')
    print('\t'.join(map(str, nshape_data)) + '\n')
    print('\t'.join(map(str, nir_shape)) + '\n')
Esempio n. 21
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    brass_mask = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, "b", device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, "white", device, args.debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug)
    device, brass_inv = pcv.invert(brass_thresh, device, args.debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, "light", device, args.debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, "dark", device, args.debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, "light", device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 250, device, args.debug)

    # Median Filter
    # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
    # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(
        img, "rectangle", device, None, "default", args.debug, True, 600, 450, -600, -350
    )

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug
    )

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile
    )

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile
    )

    # Output shape and color data

    result = open(args.result, "a")
    result.write("\t".join(map(str, shape_header)))
    result.write("\n")
    result.write("\t".join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.write("\t".join(map(str, color_header)))
    result.write("\n")
    result.write("\t".join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
def process_sv_images(session, url, vis_id, nir_id, traits, debug=None):
    """Process side-view images from Clowder.

    Inputs:
    session = requests session object
    url     = Clowder URL
    vis_id  = The Clowder ID of an RGB image
    nir_img = The Clowder ID of an NIR grayscale image
    traits  = traits table (dictionary)
    debug   = None, print, or plot. Print = save to file, Plot = print to screen

    :param session: requests session object
    :param url: str
    :param vis_id: str
    :param nir_id: str
    :param traits: dict
    :param debug: str
    :return traits: dict
    """
    # Read VIS image from Clowder
    vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True)
    img_array = np.asarray(bytearray(vis_r.content), dtype="uint8")
    img = cv2.imdecode(img_array, -1)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700,
                                                 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(img, vis_id, obj, mask, 384, device,
                                                                              debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug,
                                                                    None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(1, len(boundary_header)):
        vis_traits[boundary_header[i]] = boundary_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])
    #print(vis_traits)
    add_plantcv_metadata(session, url, vis_id, vis_traits)

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image from Clowder
    nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True)
    nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8")
    nir = cv2.imdecode(nir_array, -1)
    nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR)

    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])
    #print(nir_traits)
    add_plantcv_metadata(session, url, nir_id, nir_traits)

    # Add data to traits table
    traits['sv_area'].append(vis_traits['area'])
    traits['hull_area'].append(vis_traits['hull-area'])
    traits['solidity'].append(vis_traits['solidity'])
    traits['height'].append(vis_traits['height_above_bound'])
    traits['perimeter'].append(vis_traits['perimeter'])

    return traits
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)

    # Fill small objects
    #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark',
                                                  device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                    args.debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, args.debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device,
                                     args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark',
                                                   device, args.debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255,
                                                   'light', device, args.debug)
    device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', args.debug,
                                                 True, 500, 0, -600, -885)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 830, device, args.debug, outfile)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300,
        outfile)

    # Output shape and color data

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_header)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_img1)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()

    ############################# Use VIS image mask for NIR image#########################
    # Find matching NIR image
    device, nirpath = pcv.get_nir(path, filename, device, args.debug)
    nir, path1, filename1 = pcv.readimage(nirpath)
    nir2 = cv2.imread(nirpath, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, args.debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 65, 0, "top",
                                             "left", args.debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir, newmask, device, args.debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir, nir_objects, nir_hierarchy, device, args.debug)

    ####################################### Analysis #############################################
    outfile1 = False
    if args.writeimg == True:
        outfile1 = args.outdir + "/" + filename1

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir2, filename1, nir_combinedmask, 256, device, False, args.debug,
        outfile1)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir2, filename1, nir_combined, nir_combinedmask, device, args.debug,
        outfile1)

    coresult = open(args.coresult, "a")
    coresult.write('\t'.join(map(str, nhist_header)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nhist_data)))
    coresult.write("\n")
    for row in nir_imgs:
        coresult.write('\t'.join(map(str, row)))
        coresult.write("\n")

    coresult.write('\t'.join(map(str, nshape_header)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nshape_data)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nir_shape)))
    coresult.write("\n")
    coresult.close()
def process_tv_images(session, url, vis_id, nir_id, traits, debug=False):
    """Process top-view images.

    Inputs:
    session = requests session object
    url     = Clowder URL
    vis_id  = The Clowder ID of an RGB image
    nir_img = The Clowder ID of an NIR grayscale image
    traits  = traits table (dictionary)
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    :param session: requests session object
    :param url: str
    :param vis_id: str
    :param nir_id: str
    :param traits: dict
    :param debug: str
    :return traits: dict
    """
    # Read VIS image from Clowder
    vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True)
    img_array = np.asarray(bytearray(vis_r.content), dtype="uint8")
    img = cv2.imdecode(img_array, -1)

    # Read the VIS top-view image mask for zoom = 1 from Clowder
    mask_r = session.get(posixpath.join(url, "api/files/57451b28e4b0efbe2dc3d4d5"), stream=True)
    mask_array = np.asarray(bytearray(mask_r.content), dtype="uint8")
    brass_mask = cv2.imdecode(mask_array, -1)

    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600,
                                                 -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(img, vis_id, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(img, vis_id, mask, 256, device, debug, None,
                                                                    'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])
    #print(vis_traits)
    add_plantcv_metadata(session, url, vis_id, vis_traits)

    ############################# Use VIS image mask for NIR image#########################
    # Read NIR image from Clowder
    nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True)
    nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8")
    nir = cv2.imdecode(nir_array, -1)
    nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])
    #print(nir_traits)
    add_plantcv_metadata(session, url, nir_id, nir_traits)

    # Add data to traits table
    traits['tv_area'] = vis_traits['area']

    return traits
Esempio n. 25
0
def get_height(img):
    # print("step one")
    """
    Step one: Background forground substraction
    """
    # Get options
    args = options()
    debug = args.debug
    # Read image
    filename = args.result
    # img, path, filename = pcv.readimage(args.image)
    # Pipeline step
    device = 0
    device, resize_img = pcv.resize(img, 0.4, 0.4, device, debug)
    # Classify the pixels as plant or background
    device, mask_img = pcv.naive_bayes_classifier(
        resize_img,
        pdf_file=
        "./../ML_background/Trained_models/model_6/naive_bayes_pdfs.txt",
        device=0,
        debug='print')

    # Median Filter
    device, blur = pcv.median_blur(mask_img.get('plant'), 5, device, debug)
    # print("step two")
    """
    Step one: Identifiy the objects, extract and filter the objects
    """

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         blur,
                                                         device,
                                                         debug=None)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(resize_img,
                                                 'rectangle',
                                                 device,
                                                 roi=True,
                                                 roi_input='default',
                                                 debug=None,
                                                 adjust=True,
                                                 x_adj=50,
                                                 y_adj=10,
                                                 w_adj=0,
                                                 h_adj=0)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img, 'cutto', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)
    # print(roi_objects[0])
    # cv2.drawContours(resize_img, [roi_objects[0]], 0, (0, 255, 0), 3)
    # cv2.imshow("img",resize_img)
    # cv2.waitKey(0)
    area_oud = 0
    i = 0
    index = 0
    object_list = []
    # a = np.array([[hierarchy3[0][0]]])
    hierarchy = []
    for cnt in roi_objects:
        area = cv2.contourArea(cnt)
        M = cv2.moments(cnt)
        if M["m10"] or M["m01"]:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # check if the location of the contour is between the constrains
            # cv2.circle(resize_img, (cX, cY), 5, (255, 0, 255), thickness=1, lineType=1, shift=0)
            # check if the size of the contour is bigger than 250
            if area > 200:
                obj = np.vstack(roi_objects)
                object_list.append(roi_objects[i])
                hierarchy.append(hierarchy3[0][i])
                # print(i)
        i = i + 1
    a = np.array([hierarchy])
    # a = [[[-1,-1,-1,-1][-1,-1,-1,-1][-1,-1,-1,-1]]]
    # Object combine kept objects
    # device, obj, mask_2 = pcv.object_composition(resize_img, object_list, a, device, debug)

    mask_contours = np.zeros(resize_img.shape, np.uint8)
    cv2.drawContours(mask_contours, object_list, -1, (255, 255, 255), -1)
    gray_image = cv2.cvtColor(mask_contours, cv2.COLOR_BGR2GRAY)
    ret, mask_contours = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         mask_contours,
                                                         device,
                                                         debug=None)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img,
        'cutto',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=None)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(resize_img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=None)
    ############### Analysis ################
    try:
        if len(obj) > 0:
            # Find shape properties, output shape image (optional)
            device, shape_header, shape_data, shape_img = pcv.analyze_object(
                resize_img, args.image, obj, mask, device, debug)
            # cv2.waitKey(10000)
            return shape_data[6]
        else:
            return -1
    except:
        return -1
def process_sv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, traits, debug=None):
    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)

    # Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
    device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark', device, debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255, 'light', device, debug)

    device, masked2a_thresh_blur = pcv.median_blur(masked2a_thresh, 5, device, debug)
    device, masked2b_thresh_blur = pcv.median_blur(masked2b_thresh, 13, device, debug)

    device, ab_fill = pcv.logical_or(masked2a_thresh_blur, masked2b_thresh_blur, device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 700,
                                                 0, -600, -300)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device,
                                                                           debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug)

    ############## VIS Analysis ################
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(vis_img, vis_id, obj, mask, 384, device,
                                                                              debug)

    # Determine color properties: Histograms, Color Slices and
    # Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug,
                                                                    None, 'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(1, len(boundary_header)):
        vis_traits[boundary_header[i]] = boundary_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])


    ############################# Use VIS image mask for NIR image#########################
    # Flip mask
    device, f_mask = pcv.flip(mask, "vertical", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1154905775, 0.1154905775, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 30, 4, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################
    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['sv_area'].append(vis_traits['area'])
    traits['hull_area'].append(vis_traits['hull-area'])
    traits['solidity'].append(vis_traits['solidity'])
    traits['height'].append(vis_traits['height_above_bound'])
    traits['perimeter'].append(vis_traits['perimeter'])

    return [vis_traits, nir_traits]
Esempio n. 27
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    brass_mask = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light',
                                                device, args.debug)
    device, brass_inv = pcv.invert(brass_thresh, device, args.debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device,
                                          args.debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark',
                                             device, args.debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light',
                                             device, args.debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, args.debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white',
                                         device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark',
                                                device, args.debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light',
                                                device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device,
                                     args.debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device,
                                         args.debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 150, device, args.debug)

    # Median Filter
    #device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
    #device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, soil_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(img, 'rectangle', device,
                                                 None, 'default', args.debug,
                                                 True, 600, 450, -600, -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300,
        outfile)

    # Output shape and color data

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()

    ############################# Use VIS image mask for NIR image#########################
    # Find matching NIR image
    device, nirpath = pcv.get_nir(path, filename, device, args.debug)
    nir, path1, filename1 = pcv.readimage(nirpath)
    nir2 = cv2.imread(nirpath, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, args.debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, args.debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 15, 5, "top",
                                             "right", args.debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(
        nir, newmask, device, args.debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(
        nir, nir_objects, nir_hierarchy, device, args.debug)

    ####################################### Analysis #############################################
    outfile1 = False
    if args.writeimg == True:
        outfile1 = args.outdir + "/" + filename1

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir2, filename1, nir_combinedmask, 256, device, False, args.debug,
        outfile1)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir2, filename1, nir_combined, nir_combinedmask, device, args.debug,
        outfile1)

    coresult = open(args.coresult, "a")
    coresult.write('\t'.join(map(str, nhist_header)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nhist_data)))
    coresult.write("\n")
    for row in nir_imgs:
        coresult.write('\t'.join(map(str, row)))
        coresult.write("\n")

    coresult.write('\t'.join(map(str, nshape_header)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nshape_data)))
    coresult.write("\n")
    coresult.write('\t'.join(map(str, nir_shape)))
    coresult.write("\n")
    coresult.close()
def process_tv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, brass_mask, traits, debug=None):
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 75, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, debug)
    device, brass_inv = pcv.invert(brass_thresh, device, debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, debug)
    device, soil_car1 = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, debug)
    device, soil_car2 = pcv.binary_threshold(masked_a, 128, 255, 'light', device, debug)
    device, soil_car = pcv.logical_or(soil_car1, soil_car2, device, debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 124, 255, 'dark', device, debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 148, 255, 'light', device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 300, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(vis_img, 'rectangle', device, None, 'default', debug, True, 600, 450, -600,
                                                 -350)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(vis_img, 'partial', roi1, roi_hierarchy,
                                                                           id_objects, obj_hierarchy, device, debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(vis_img, roi_objects, hierarchy3, device, debug)

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(vis_img, vis_id, obj, mask, device, debug)

    # Determine color properties
    device, color_header, color_data, color_img = pcv.analyze_color(vis_img, vis_id, mask, 256, device, debug, None,
                                                                    'v', 'img', 300)

    # Output shape and color data
    vis_traits = {}
    for i in range(1, len(shape_header)):
        vis_traits[shape_header[i]] = shape_data[i]
    for i in range(2, len(color_header)):
        vis_traits[color_header[i]] = serialize_color_data(color_data[i])

    ############################# Use VIS image mask for NIR image#########################


    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.116148, 0.116148, device, debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir_rgb, nmask, device, 15, 5, "top", "right", debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir_rgb, newmask, device, debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir_rgb, nir_objects, nir_hierarchy, device, debug)

    ####################################### Analysis #############################################

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(nir_cv2, nir_id, nir_combinedmask, 256,
                                                                           device, False, debug)
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(nir_cv2, nir_id, nir_combined, nir_combinedmask,
                                                                       device, debug)

    nir_traits = {}
    for i in range(1, len(nshape_header)):
        nir_traits[nshape_header[i]] = nshape_data[i]
    for i in range(2, len(nhist_header)):
        nir_traits[nhist_header[i]] = serialize_color_data(nhist_data[i])

    # Add data to traits table
    traits['tv_area'] = vis_traits['area']

    return [vis_traits, nir_traits]
def main():
    # Initialize device
    device = 0

    # Parse command-line options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(filename=args.image, debug=args.debug)

    # Convert RGB to LAB and extract the Blue-Yellow channel
    device, blue_channel = pcv.rgb2gray_lab(img=img,
                                            channel="b",
                                            device=device,
                                            debug=args.debug)

    # Threshold the blue image using the triangle autothreshold method
    device, blue_tri = pcv.triangle_auto_threshold(device=device,
                                                   img=blue_channel,
                                                   maxvalue=255,
                                                   object_type="light",
                                                   xstep=1,
                                                   debug=args.debug)

    # Extract core plant region from the image to preserve delicate plant features during filtering
    device += 1
    plant_region = blue_tri[0:1750, 600:2080]
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_extract_plant_region.png",
                        img=plant_region)

    # Use a Gaussian blur to disrupt the strong edge features in the cabinet
    device, blur_gaussian = pcv.gaussian_blur(device=device,
                                              img=blue_tri,
                                              ksize=(3, 3),
                                              sigmax=0,
                                              sigmay=None,
                                              debug=args.debug)

    # Threshold the blurred image to remove features that were blurred
    device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian,
                                                    threshold=250,
                                                    maxValue=255,
                                                    object_type="light",
                                                    device=device,
                                                    debug=args.debug)

    # Add the plant region back in to the filtered image
    device += 1
    blur_thresholded[0:1750, 600:2080] = plant_region
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_replace_plant_region.png",
                        img=blur_thresholded)

    # Fill small noise
    device, blue_fill_50 = pcv.fill(img=np.copy(blur_thresholded),
                                    mask=np.copy(blur_thresholded),
                                    size=50,
                                    device=device,
                                    debug=args.debug)

    # Identify objects
    device, contours, contour_hierarchy = pcv.find_objects(img=img,
                                                           mask=blue_fill_50,
                                                           device=device,
                                                           debug=args.debug)

    # Define ROI
    device, roi, roi_hierarchy = pcv.define_roi(img=img,
                                                shape="rectangle",
                                                device=device,
                                                roi=None,
                                                roi_input="default",
                                                debug=args.debug,
                                                adjust=True,
                                                x_adj=565,
                                                y_adj=0,
                                                w_adj=-490,
                                                h_adj=-250)

    # Decide which objects to keep
    device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects(
        img=img,
        roi_type="partial",
        roi_contour=roi,
        roi_hierarchy=roi_hierarchy,
        object_contour=contours,
        obj_hierarchy=contour_hierarchy,
        device=device,
        debug=args.debug)

    # If there are no contours left we cannot measure anything
    if len(roi_contours) > 0:
        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(
            img=img,
            contours=roi_contours,
            hierarchy=roi_contour_hierarchy,
            device=device,
            debug=args.debug)

        outfile = False
        if args.writeimg:
            outfile = args.outdir + "/" + filename

        # Find shape properties, output shape image (optional)
        device, shape_header, shape_data, shape_img = pcv.analyze_object(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Shape properties relative to user boundary line (optional)
        device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            line_position=440,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images (optional)
        device, color_header, color_data, color_img = pcv.analyze_color(
            img=img,
            imgname=args.image,
            mask=plant_mask,
            bins=256,
            device=device,
            debug=args.debug,
            hist_plot_type=None,
            pseudo_channel="v",
            pseudo_bkg="img",
            resolution=300,
            filename=outfile)

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        result.write('\t'.join(map(str, boundary_header)) + "\n")
        result.write('\t'.join(map(str, boundary_data)) + "\n")
        result.write('\t'.join(map(str, boundary_img)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()

        # Find matching NIR image
        device, nirpath = pcv.get_nir(path=path,
                                      filename=filename,
                                      device=device,
                                      debug=args.debug)
        nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath)
        nir_img = cv2.imread(nirpath, 0)

        # Make mask glovelike in proportions via dilation
        device, d_mask = pcv.dilate(plant_mask,
                                    kernel=1,
                                    i=0,
                                    device=device,
                                    debug=args.debug)

        # Resize mask
        prop2, prop1 = conv_ratio()
        device, nmask = pcv.resize(img=d_mask,
                                   resize_x=prop1,
                                   resize_y=prop2,
                                   device=device,
                                   debug=args.debug)

        # Convert the resized mask to a binary mask
        device, bmask = pcv.binary_threshold(img=nmask,
                                             threshold=0,
                                             maxValue=255,
                                             object_type="light",
                                             device=device,
                                             debug=args.debug)

        device, crop_img = crop_sides_equally(mask=bmask,
                                              nir=nir_img,
                                              device=device,
                                              debug=args.debug)

        # position, and crop mask
        device, newmask = pcv.crop_position_mask(img=nir_img,
                                                 mask=crop_img,
                                                 device=device,
                                                 x=34,
                                                 y=9,
                                                 v_pos="top",
                                                 h_pos="right",
                                                 debug=args.debug)

        # Identify objects
        device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb,
                                                              mask=newmask,
                                                              device=device,
                                                              debug=args.debug)

        # Object combine kept objects
        device, nir_combined, nir_combinedmask = pcv.object_composition(
            img=nir_rgb,
            contours=nir_objects,
            hierarchy=nir_hierarchy,
            device=device,
            debug=args.debug)

        if args.writeimg:
            outfile = args.outdir + "/" + nir_filename

        # Analyze NIR signal data
        device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
            img=nir_img,
            rgbimg=nir_rgb,
            mask=nir_combinedmask,
            bins=256,
            device=device,
            histplot=False,
            debug=args.debug,
            filename=outfile)

        # Analyze the shape of the plant contour from the NIR image
        device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
            img=nir_img,
            imgname=nir_filename,
            obj=nir_combined,
            mask=nir_combinedmask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Write NIR data to co-results file
        coresult = open(args.coresult, "a")
        coresult.write('\t'.join(map(str, nhist_header)) + "\n")
        coresult.write('\t'.join(map(str, nhist_data)) + "\n")
        for row in nir_imgs:
            coresult.write('\t'.join(map(str, row)) + "\n")
        coresult.write('\t'.join(map(str, nshape_header)) + "\n")
        coresult.write('\t'.join(map(str, nshape_data)) + "\n")
        coresult.write('\t'.join(map(str, nir_shape)) + "\n")
        coresult.close()
Esempio n. 30
0
def main():
   
    # Get options
    args = options()
    if args.debug:
      print("Analyzing your image dude...")
    # Read image
    device = 0
    img = cv2.imread(args.image, flags=0)
    path, img_name = os.path.split(args.image)
    # Read in image which is average of average of backgrounds
    img_bkgrd = cv2.imread("bkgrd_ave_z2500.png", flags=0)
    
    # NIR images for burnin2 are up-side down. This may be fixed in later experiments
    img =  ndimage.rotate(img, 180)
    img_bkgrd =  ndimage.rotate(img_bkgrd, 180)
    
    # Subtract the image from the image background to make the plant more prominent
    device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug)
    if args.debug:
        pcv.plot_hist(bkg_sub_img, 'bkg_sub_img')
    device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, 'dark', device, args.debug)
    bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220)
    if args.debug:
        cv2.imwrite('bkgrd_sub_thres.png', bkg_sub_thres_img)
    
    #device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug)

    # if a region of interest is specified read it in
    roi = cv2.imread(args.roi)

    
    # Start by examining the distribution of pixel intensity values
    if args.debug:
      pcv.plot_hist(img, 'hist_img')
      
    # Will intensity transformation enhance your ability to isolate object of interest by thesholding?
    device, he_img = pcv.HistEqualization(img, device, args.debug)
    if args.debug:
      pcv.plot_hist(he_img, 'hist_img_he')
    
    # Laplace filtering (identify edges based on 2nd derivative)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(lp_img, 'hist_lp')
    
    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(lp_shrp_img, 'hist_lp_shrp')
      
    # Sobel filtering  
    # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled)
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(sbx_img, 'hist_sbx')
      
    # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug)
    if args.debug:
      pcv.plot_hist(sby_img, 'hist_sby')
      
    # Combine the effects of both x and y filters through matrix addition
    # This will capture edges identified within each plane and emphesize edges found in both images
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(sb_img, 'hist_sb_comb_img')
    
    # Use a lowpass (blurring) filter to smooth sobel image
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)
    
    # combine the smoothed sobel image with the laplacian sharpened image
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169 
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug)
    if args.debug:
      pcv.plot_hist(edge_shrp_img, 'hist_edge_shrp_img')
      
    # Perform thresholding to generate a binary image
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, 'dark', device, args.debug)
    
    # Prepare a few small kernels for morphological filtering
    kern = np.zeros((3,3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1,1:3]=1
    kern2 = np.copy(kern)
    kern2[1,0:2]=1
    kern3 = np.copy(kern)
    kern3[0:2,1]=1
    kern4 = np.copy(kern)
    kern4[1:3,1]=1
    
    # Prepare a larger kernel for dilation
    kern[1,0:3]=1
    kern[0:3,1]=1
    
    
    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug)
    device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug)
    device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug)
    device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug)
    
    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)
    
    # Perform dilation
    # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug)
    device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug)

    # Get masked image
    # The dilated image may contain some pixels which are not plant
    device, masked_erd = pcv.apply_mask(img, comb_img, 'black', device, args.debug)
    # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug)
    
    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)
    # mask for the bottom of the image
    device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (120,184), (215,252), device, args.debug)
    # mask for the left side of the image
    device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1,1), (85,252), device, args.debug)
    # mask for the right side of the image
    device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240,1), (318,252), device, args.debug)
    # mask the edges
    device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1,1), (318,252), device, args.debug)
    
    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug)
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)
    
    # Make a ROI around the plant, include connected objects
    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)

    device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, 'black', device, args.debug)

    device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (120,75), (200,184), device, args.debug)

    plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(img, 'partial', roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug)
    
      
    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, 'black', device, args.debug)
    rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, 'light', device, args.debug)
    mask3d = np.copy(mask)
    plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)
    
    ### Analysis ###
    device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(img, args.image, mask, 256, device, args.debug, args.outdir + '/' + img_name)
    device, shape_header, shape_data, ori_img = pcv.analyze_object(rgb, args.image, o, m, device, args.debug, args.outdir + '/' + img_name)
    
    pcv.print_results(args.image, hist_header, hist_data)
    pcv.print_results(args.image, shape_header, shape_data)
Esempio n. 31
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    #roi = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark',
                                                  device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light',
                                                  device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                    args.debug)

    # Fill small objects
    device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device,
                                     args.debug)

    # Select area with black bars and find overlapping plant material
    device, roi1, roi_hierarchy1 = pcv.define_roi(masked2, 'rectangle', device,
                                                  None, 'default', args.debug,
                                                  True, 0, 0, -1900, 0)
    device, id_objects1, obj_hierarchy1 = pcv.find_objects(
        masked2, ab_fill, device, args.debug)
    device, roi_objects1, hierarchy1, kept_mask1, obj_area1 = pcv.roi_objects(
        masked2, 'cutto', roi1, roi_hierarchy1, id_objects1, obj_hierarchy1,
        device, args.debug)
    device, masked3 = pcv.apply_mask(masked2, kept_mask1, 'white', device,
                                     args.debug)
    device, masked_a1 = pcv.rgb2gray_lab(masked3, 'a', device, args.debug)
    device, masked_b1 = pcv.rgb2gray_lab(masked3, 'b', device, args.debug)
    device, maskeda_thresh1 = pcv.binary_threshold(masked_a1, 122, 255, 'dark',
                                                   device, args.debug)
    device, maskedb_thresh1 = pcv.binary_threshold(masked_b1, 170, 255,
                                                   'light', device, args.debug)
    device, ab1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device,
                                 args.debug)
    device, ab_cnt1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device,
                                     args.debug)
    device, ab_fill1 = pcv.fill(ab1, ab_cnt1, 200, device, args.debug)

    device, roi2, roi_hierarchy2 = pcv.define_roi(masked2, 'rectangle', device,
                                                  None, 'default', args.debug,
                                                  True, 1900, 0, 0, 0)
    device, id_objects2, obj_hierarchy2 = pcv.find_objects(
        masked2, ab_fill, device, args.debug)
    device, roi_objects2, hierarchy2, kept_mask2, obj_area2 = pcv.roi_objects(
        masked2, 'cutto', roi2, roi_hierarchy2, id_objects2, obj_hierarchy2,
        device, args.debug)
    device, masked4 = pcv.apply_mask(masked2, kept_mask2, 'white', device,
                                     args.debug)
    device, masked_a2 = pcv.rgb2gray_lab(masked4, 'a', device, args.debug)
    device, masked_b2 = pcv.rgb2gray_lab(masked4, 'b', device, args.debug)
    device, maskeda_thresh2 = pcv.binary_threshold(masked_a2, 122, 255, 'dark',
                                                   device, args.debug)
    device, maskedb_thresh2 = pcv.binary_threshold(masked_b2, 170, 255,
                                                   'light', device, args.debug)
    device, ab2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device,
                                 args.debug)
    device, ab_cnt2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device,
                                     args.debug)
    device, ab_fill2 = pcv.fill(ab2, ab_cnt2, 200, device, args.debug)

    device, ab_cnt3 = pcv.logical_or(ab_fill1, ab_fill2, device, args.debug)
    device, masked3 = pcv.apply_mask(masked2, ab_cnt3, 'white', device,
                                     args.debug)

    # Identify objects
    device, id_objects3, obj_hierarchy3 = pcv.find_objects(
        masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi3, roi_hierarchy3 = pcv.define_roi(masked2, 'rectangle', device,
                                                  None, 'default', args.debug,
                                                  True, 500, 0, -450, -50)

    # Decide which objects to keep and combine with objects overlapping with black bars
    device, roi_objects3, hierarchy3, kept_mask3, obj_area1 = pcv.roi_objects(
        img, 'cutto', roi3, roi_hierarchy3, id_objects3, obj_hierarchy3,
        device, args.debug)
    device, kept_mask4_1 = pcv.logical_or(ab_cnt3, kept_mask3, device,
                                          args.debug)
    device, kept_cnt = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
    device, kept_mask4 = pcv.fill(kept_mask4_1, kept_cnt, 200, device,
                                  args.debug)
    device, masked5 = pcv.apply_mask(masked2, kept_mask4, 'white', device,
                                     args.debug)
    device, id_objects4, obj_hierarchy4 = pcv.find_objects(
        masked5, kept_mask4, device, args.debug)
    device, roi4, roi_hierarchy4 = pcv.define_roi(masked2, 'rectangle', device,
                                                  None, 'default', args.debug,
                                                  False, 0, 0, 0, 0)
    device, roi_objects4, hierarchy4, kept_mask4, obj_area = pcv.roi_objects(
        img, 'partial', roi4, roi_hierarchy4, id_objects4, obj_hierarchy4,
        device, args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects4, hierarchy4,
                                               device, args.debug)

    ############## Analysis ################

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug,
        args.outdir + '/' + filename)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 291, device, args.debug,
        args.outdir + '/' + filename)

    # Tiller Tool Test
    #device, tillering= pcv.tiller_tool(img, args.image,obj, mask, 315, device,args.debug,args.outdir+'/'+filename)
    #device, tillering= pcv.tiller_tool(img, args.image, roi_objects4, hierarchy4, device,args.debug, args.outdir+'/'+filename, x_adj=1080,y_adj=1710,w_adj=-1080,h_adj=-310)
    device, tillering = pcv.tiller_tool(img,
                                        args.image,
                                        roi_objects4,
                                        hierarchy4,
                                        device,
                                        args.debug,
                                        args.outdir + '/' + filename,
                                        x_adj=0,
                                        y_adj=0,
                                        w_adj=0,
                                        h_adj=0)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, norm_slice = pcv.analyze_color(
        img, args.image, kept_mask4, 256, device, args.debug, 'all', 'rgb',
        'v', args.outdir + '/' + filename)

    # Output shape and color data
    pcv.print_results(args.image, shape_header, shape_data)
    pcv.print_results(args.image, color_header, color_data)
    pcv.print_results(args.image, boundary_header, boundary_data)
Esempio n. 32
0
def main():
    # Sets variables from input arguments
    args = options()

    device = 0  # Workflow step counter
    debug = args.debug  # Option to display debug images to the notebook
    rgb_img = args.image  # Name of seed Image
    writeimg = args.writeimg
    outfile = str(args.result)
    outdir = str(args.outdir)

    # Reads in RGB image
    img, path, filename = pcv.readimage(rgb_img)
    if writeimg is True:
        pcv.print_image(img, outfile + "_original.jpg")

    # Converts RGB to HSV and extract the Saturation channel and inverts image
    device, img_gray_sat = pcv.rgb2gray_hsv(img, 's', device, debug)
    img_gray_sat = 255 - img_gray_sat

    # Convert RGB to HSV and extract the Value channel
    device, img_gray_val = pcv.rgb2gray_hsv(img, 'v', device, debug)

    # Corrects saturation image background brightness
    sat_img2 = 255 - correct_white_background(img_gray_sat)

    # Corrects value image background brightness
    val_img2 = 255 - correct_white_background(img_gray_val)

    # Thresholds the Saturation image
    device, sat_img_binary = pcv.binary_threshold(sat_img2, 35, 255, 'light',
                                                  device, debug)

    # Threshold the Value image
    device, val_img_binary = pcv.binary_threshold(val_img2, 35, 255, 'light',
                                                  device, debug)

    # Combines masks
    img_binary = np.where(sat_img_binary < 255, val_img_binary, sat_img_binary)

    # Fills in speckles smaller than 200 pixels
    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 200, device, debug)
    if writeimg is True:
        pcv.print_image(mask, outfile + "_mask.jpg")
        pcv.print_image(img_binary, outfile + "_binary.jpg")

    # Identifies objects using filled binary image as a mask
    device, id_objects, obj_hierarchy = pcv.find_objects(
        img, fill_image, device, debug)

    # Defines rectangular ROI
    device, roi, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None,
                                                'default', debug, True, 300,
                                                1000, -1250, -425)

    # Keeps only objects within or partially within ROI
    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = \
        pcv.roi_objects(img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device, debug)

    # Randomly colors the individual seeds
    img_copy = np.copy(img)
    for i in range(0, len(roi_objects)):
        rand_color = color_palette(1)
        cv2.drawContours(img_copy,
                         roi_objects,
                         i,
                         rand_color[0],
                         -1,
                         lineType=8,
                         hierarchy=roi_obj_hierarchy)
    if writeimg is True:
        pcv.print_image(img_copy, outfile + "_coloredseeds.jpg")

    # Gets the area of each seed, saved in shape_data
    shape_header = []
    table = []
    for i in range(0, len(roi_objects)):
        if roi_obj_hierarchy[0][i][
                3] == -1:  # Checks if shape is a parent contour

            # Object combine kept objects
            device, obj, mask2 = pcv.object_composition(
                img, [roi_objects[i]], np.array([[roi_obj_hierarchy[0][i]]]),
                device, debug)
            if obj is not None:
                device, shape_header, shape_data, shape_img = pcv.analyze_object(
                    img, rgb_img, obj, mask2, device, debug)
                if shape_data is not None:
                    table.append(shape_data)

    # Finds the area of the size marker in pixels and saves to "marker data"
    device, marker_header, marker_data, analysis_images =\
        pcv.report_size_marker_area(img, 'rectangle', device, debug, "detect", 3525, 850, -200, -1700, "black",
                                    "light", "h", 120)
    # shape_header.append("marker_area")

    # Saves seed and marker shape data results to file
    metadata = open(posixpath.join(outdir, outfile), 'r').read()
    os.remove(posixpath.join(outdir, outfile))

    for seed, row in enumerate(table):
        prefix = posixpath.join(outdir, outfile[0:-4])
        results = open(prefix + '_' + str(seed + 1) + '.txt', 'w')
        results.write(metadata)
        results.write('\t'.join(map(str, shape_header)) + '\n')
        # row.append(marker_data[1])
        results.write('\t'.join(map(str, row)) + '\n')
        results.write('\t'.join(map(str, marker_header)) + '\n')
        results.write('\t'.join(map(str, marker_data)) + '\n')
        results.close()
Esempio n. 33
0
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
  brass_mask = cv2.imread(args.roi)
  
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug)
  
  # Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
  device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
  
  # Fill small objects
  device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)
  
  # Convert RGB to LAB and extract the Blue channel
  device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
  
  # Threshold the blue image
  device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
  device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
  
  # Fill small objects
  device, b_fill = pcv.fill(b_thresh, b_cnt, 100, device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images
  device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)
  
  # Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, bs,'white', device, args.debug)
    
  # Mask pesky brass piece
  device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug)
  device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug)
  device, brass_inv=pcv.invert(brass_thresh, device, args.debug)
  device, brass_masked = pcv.apply_mask(masked, brass_inv, 'white', device, args.debug)
  
  # Further mask soil and car
  device, masked_a = pcv.rgb2gray_lab(brass_masked, 'a', device, args.debug)
  device, soil_car = pcv.binary_threshold(masked_a, 128, 255, 'dark', device, args.debug)
  device, soil_masked = pcv.apply_mask(brass_masked, soil_car, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, soil_a = pcv.rgb2gray_lab(soil_masked, 'a', device, args.debug)
  device, soil_b = pcv.rgb2gray_lab(soil_masked, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, 'dark', device, args.debug)
  device, soilb_thresh = pcv.binary_threshold(soil_b, 155, 255, 'light', device, args.debug)

  # Join the thresholded saturation and blue-yellow images (OR)
  device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)
  device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)

  # Fill small objects
  device, soil_fill = pcv.fill(soil_ab, soil_ab_cnt, 75, device, args.debug)

  # Median Filter
  device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
  device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)
  
  # Apply mask (for vis images, mask_color=white)
  device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, 'white', device, args.debug)
  
  # Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)

  # Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(img,'circle', device, None, 'default', args.debug,True, 0,0,-50,-50)
  
  # Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
  
  # Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
  
############## Analysis ################  
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename)
  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','v','img',300,args.outdir+'/'+filename)
  
  # Output shape and color data
  pcv.print_results(args.image, shape_header, shape_data)
  pcv.print_results(args.image, color_header, color_data)
Esempio n. 34
0
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
    
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug)

  # Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
  device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

  # Fill small objects
  # device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

  # Convert RGB to LAB and extract the Blue channel
  device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

  # Threshold the blue image
  device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug)
  device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug)

  # Fill small objects
  # device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)

  # Join the thresholded saturation and blue-yellow images
  device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)

  # Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
  device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)

  # Threshold the green-magenta and blue images
  device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug)
  device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug)

  # Join the thresholded saturation and blue-yellow images (OR)
  device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)

  # Fill small objects
  device, ab_fill = pcv.fill(ab, ab_cnt, 500, device, args.debug)

  # Apply mask (for vis images, mask_color=white)
  device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug)

  # Select area with black bars and find overlapping plant material
  device, roi1, roi_hierarchy1 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 0, 0,
                                                -1700, 0)
  device, id_objects1, obj_hierarchy1 = pcv.find_objects(masked2, ab_fill, device, args.debug)
  device, roi_objects1, hierarchy1, kept_mask1, obj_area1 = pcv.roi_objects(masked2, 'cutto', roi1, roi_hierarchy1,
                                                                            id_objects1, obj_hierarchy1, device,
                                                                            args.debug)
  device, masked3 = pcv.apply_mask(masked2, kept_mask1, 'white', device, args.debug)
  device, masked_a1 = pcv.rgb2gray_lab(masked3, 'a', device, args.debug)
  device, masked_b1 = pcv.rgb2gray_lab(masked3, 'b', device, args.debug)
  device, maskeda_thresh1 = pcv.binary_threshold(masked_a1, 122, 255, 'dark', device, args.debug)
  device, maskedb_thresh1 = pcv.binary_threshold(masked_b1, 170, 255, 'light', device, args.debug)
  device, ab1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug)
  device, ab_cnt1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug)
  device, ab_fill1 = pcv.fill(ab1, ab_cnt1, 300, device, args.debug)

  device, roi2, roi_hierarchy2 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 1700,
                                                0, 0, 0)
  device, id_objects2, obj_hierarchy2 = pcv.find_objects(masked2, ab_fill, device, args.debug)
  device, roi_objects2, hierarchy2, kept_mask2, obj_area2 = pcv.roi_objects(masked2, 'cutto', roi2, roi_hierarchy2,
                                                                            id_objects2, obj_hierarchy2, device,
                                                                            args.debug)
  device, masked4 = pcv.apply_mask(masked2, kept_mask2, 'white', device, args.debug)
  device, masked_a2 = pcv.rgb2gray_lab(masked4, 'a', device, args.debug)
  device, masked_b2 = pcv.rgb2gray_lab(masked4, 'b', device, args.debug)
  device, maskeda_thresh2 = pcv.binary_threshold(masked_a2, 122, 255, 'dark', device, args.debug)
  device, maskedb_thresh2 = pcv.binary_threshold(masked_b2, 170, 255, 'light', device, args.debug)
  device, ab2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug)
  device, ab_cnt2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug)
  device, ab_fill2 = pcv.fill(ab2, ab_cnt2, 200, device, args.debug)

  device, ab_cnt3 = pcv.logical_or(ab_fill1, ab_fill2, device, args.debug)
  device, masked3 = pcv.apply_mask(masked2, ab_cnt3, 'white', device, args.debug)

  # Identify objects
  device, id_objects3, obj_hierarchy3 = pcv.find_objects(masked2, ab_fill, device, args.debug)

  # Define ROI
  device, roi3, roi_hierarchy3 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, True, 650, 0,
                                                -450, -250)

  # Decide which objects to keep and combine with objects overlapping with black bars
  device, roi_objects3, hierarchy3, kept_mask3, obj_area1 = pcv.roi_objects(img, 'cutto', roi3, roi_hierarchy3,
                                                                            id_objects3, obj_hierarchy3, device,
                                                                            args.debug)
  device, kept_mask4_1 = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
  device, kept_cnt = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
  device, kept_mask4 = pcv.fill(kept_mask4_1, kept_cnt, 200, device, args.debug)
  device, masked5 = pcv.apply_mask(masked2, kept_mask4, 'white', device, args.debug)
  device, id_objects4, obj_hierarchy4 = pcv.find_objects(masked5, kept_mask4, device, args.debug)
  device, roi4, roi_hierarchy4 = pcv.define_roi(masked2, 'rectangle', device, None, 'default', args.debug, False, 0, 0,
                                                0, 0)
  device, roi_objects4, hierarchy4, kept_mask4, obj_area = pcv.roi_objects(img, 'partial', roi4, roi_hierarchy4,
                                                                           id_objects4, obj_hierarchy4, device,
                                                                           args.debug)

  # Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects4, hierarchy4, device, args.debug)

  ############## VIS Analysis ################
  
  outfile=False
  if args.writeimg==True:
    outfile=args.outdir+"/"+filename
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile)
  
  # Shape properties relative to user boundary line (optional)
  device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 384, device,args.debug,outfile)
  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile)
  
  # Output shape and color data
  result=open(args.result,"a")
  result.write('\t'.join(map(str,shape_header)))
  result.write("\n")
  result.write('\t'.join(map(str,shape_data)))
  result.write("\n")
  for row in shape_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.write('\t'.join(map(str,color_header)))
  result.write("\n")
  result.write('\t'.join(map(str,color_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_header)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_img1)))
  result.write("\n")
  for row in color_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.close()
def main():
    # Get options
    args = options()
    path_mask = '/home/mfeldman/tester/mask/mask_brass_tv_z1000_L0.png'

    # Read image
    img, path, filename = pcv.readimage(args.image)
    brass_mask = cv2.imread(path_mask)

    # Pipeline step
    device = 0

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light',
                                                device, args.debug)
    device, brass_inv = pcv.invert(brass_thresh, device, args.debug)
    device, masked_image = pcv.apply_mask(img, brass_inv, 'white', device,
                                          args.debug)

    # Looks like we can detect very bright soil particles with the h channel
    device, h = pcv.rgb2gray_hsv(masked_image, 'h', device, args.debug)
    h_bkg = cv2.inRange(h, 100, 255)

    # Make an image mask to cover these points
    h_bkg_inv = cv2.bitwise_not(h_bkg)

    # We can do a pretty good job of identifying the plant from the a channel
    device, a = pcv.rgb2gray_lab(masked_image, 'a', device, args.debug)
    a_thresh = cv2.inRange(a, 95, 117)

    # Lets blur the result a bit to get rid of unwanted noise
    blur = cv2.medianBlur(a_thresh, 5)

    # Now lets set of a series of filters to remove unwanted background
    plant_shape = cv2.bitwise_and(blur, h_bkg_inv)

    # Now remove all remaining small points using erosion with a 3 x 3 kernel
    kernel = np.ones((3, 3), np.uint8)
    erosion = cv2.erode(plant_shape, kernel, iterations=1)

    # Now dilate to fill in small holes
    kernel = np.ones((3, 3), np.uint8)
    dilation = cv2.dilate(erosion, kernel, iterations=1)

    # Apply mask to the background image
    device, masked = pcv.apply_mask(img, dilation, 'white', device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked, dilation, device, args.debug)

    # Get ROI contours
    device, roi, roi_hierarchy = pcv.define_roi(masked_image,
                                                'circle',
                                                device,
                                                None,
                                                'default',
                                                args.debug,
                                                True,
                                                x_adj=0,
                                                y_adj=0,
                                                w_adj=0,
                                                h_adj=-1200)

    # ROI
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        masked_image, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy,
        device, args.debug)

    # Get object contour and masked object
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############## Landmarks    ################

    device, points = pcv.acute_vertex(obj, 20, 40, 40, img, device, args.debug)
    boundary_line = 'NA'
    # Use acute fxn to estimate tips
    device, points_r, centroid_r, bline_r = pcv.scale_features(
        obj, mask, points, boundary_line, device, args.debug)
    # Get number of points
    tips = len(points_r)
    # Use turgor_proxy fxn to get distances
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy(
        points_r, centroid_r, bline_r, device, args.debug)
    # Get pseudomarkers along the y-axis
    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, img, device, args.debug)
    # Re-scale the points
    device, left_r, left_cr, left_br = pcv.scale_features(
        obj, mask, left, boundary_line, device, args.debug)
    device, right_r, right_cr, right_br = pcv.scale_features(
        obj, mask, right, boundary_line, device, args.debug)
    device, center_hr, center_hcr, center_hbr = pcv.scale_features(
        obj, mask, center_h, boundary_line, device, args.debug)

    # Get pseudomarkers along the x-axis
    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, img, device, args.debug)

    # Re-scale the points
    device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top,
                                                       boundary_line, device,
                                                       args.debug)
    device, bottom_r, bottom_cr, bottom_br = pcv.scale_features(
        obj, mask, bottom, boundary_line, device, args.debug)
    device, center_vr, center_vcr, center_vbr = pcv.scale_features(
        obj, mask, center_v, boundary_line, device, args.debug)

    ## Need to convert the points into a list of tuples format to match the scaled points
    points = points.reshape(len(points), 2)
    points = points.tolist()
    temp_out = []
    for p in points:
        p = tuple(p)
        temp_out.append(p)
    points = temp_out
    left = left.reshape(20, 2)
    left = left.tolist()
    temp_out = []
    for l in left:
        l = tuple(l)
        temp_out.append(l)
    left = temp_out
    right = right.reshape(20, 2)
    right = right.tolist()
    temp_out = []
    for r in right:
        r = tuple(r)
        temp_out.append(r)
    right = temp_out
    center_h = center_h.reshape(20, 2)
    center_h = center_h.tolist()
    temp_out = []
    for ch in center_h:
        ch = tuple(ch)
        temp_out.append(ch)
    center_h = temp_out
    ## Need to convert the points into a list of tuples format to match the scaled points
    top = top.reshape(20, 2)
    top = top.tolist()
    temp_out = []
    for t in top:
        t = tuple(t)
        temp_out.append(t)
    top = temp_out
    bottom = bottom.reshape(20, 2)
    bottom = bottom.tolist()
    temp_out = []
    for b in bottom:
        b = tuple(b)
        temp_out.append(b)
    bottom = temp_out
    center_v = center_v.reshape(20, 2)
    center_v = center_v.tolist()
    temp_out = []
    for cvr in center_v:
        cvr = tuple(cvr)
        temp_out.append(cvr)
    center_v = temp_out

    #Store Landmark Data
    landmark_header = ('HEADER_LANDMARK', 'tip_points', 'tip_points_r',
                       'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c',
                       'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b',
                       'hori_ave_b', 'euc_ave_b', 'ang_ave_b', 'left_lmk',
                       'right_lmk', 'center_h_lmk', 'left_lmk_r',
                       'right_lmk_r', 'center_h_lmk_r', 'top_lmk',
                       'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
                       'bottom_lmk_r', 'center_v_lmk_r')

    landmark_data = ('LANDMARK_DATA', points, points_r, centroid_r, bline_r,
                     tips, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c,
                     vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, left, right,
                     center_h, left_r, right_r, center_hr, top, bottom,
                     center_v, top_r, bottom_r, center_vr)

    ############## VIS Analysis ################

    outfile = False
    #if args.writeimg==True:
    #outfile=args.outdir+"/"+filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 330, device, args.debug, outfile)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300,
        outfile)

    # Output shape and color data

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_header)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_img1)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    result.close()
Esempio n. 36
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    debug = args.debug

    # print('Original image')
    # pcv.plot_image(img)

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)
    # print('Convert RGB to HSV and extract the Saturation channel')
    # plt.imshow(s)
    # plt.show()

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 100, 255, 'light', device,
                                            debug)
    # print('Threshold the Saturation image')
    # plt.imshow(s_thresh)
    # plt.show()
    #
    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)
    # print('Median Filter')
    # plt.imshow(s_mblur)
    # plt.show()
    #
    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)
    # print('Convert RGB to LAB and extract the Blue channel')
    # plt.imshow(b)
    # plt.show()

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 160, 255, 'light', device,
                                            debug)
    device, b_cnt = pcv.binary_threshold(b, 160, 255, 'light', device, debug)
    # print('Threshold the blue image')
    # plt.imshow(b_cnt)
    # plt.show()
    # Fill small objects
    #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug)
    #

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug)

    # print('Join the thresholded saturation and blue-yellow images')
    # plt.imshow(bs)
    # plt.show()
    #
    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)
    # print('Apply Mask 1 (for vis images, mask_color=white)')
    # plt.imshow(masked)
    # plt.show()
    #
    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark',
                                                  device, debug)
    device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light',
                                                   device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug)

    # Fill small objects
    device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug)
    # print('Apply Mask 2 (for vis images, mask_color=white)')
    # plt.imshow(masked2)
    # plt.show()
    #
    #Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, debug)

    #
    # Define ROI
    # device, roi1, roi_hierarchy= pcv.define_roi(masked2, 'rectangle', device, None, 'default', debug, True, 67, 377, -125, -368)
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', debug, True,
                                                 1, 1, -1, -1)
    #
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        debug)
    #
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, debug)

    ############### Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, debug,
        args.outdir + '/' + filename)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 1680, device, debug,
        args.outdir + '/' + filename)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, kept_mask, 256, device, debug, 'all', 'v', 'img', 300,
        args.outdir + '/' + filename)

    # Write shape and color data to results file
    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    brass_mask = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, "s", device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 49, 255, "light", device, args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)

    # Fill small objects
    device, s_fill = pcv.fill(s_mblur, s_cnt, 150, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, "b", device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 138, 255, "light", device, args.debug)
    device, b_cnt = pcv.binary_threshold(b, 138, 255, "light", device, args.debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, "white", device, args.debug)

    # Mask pesky brass piece
    device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, "v", device, args.debug)
    device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, "light", device, args.debug)
    device, brass_inv = pcv.invert(brass_thresh, device, args.debug)
    device, brass_masked = pcv.apply_mask(masked, brass_inv, "white", device, args.debug)

    # Further mask soil and car
    device, masked_a = pcv.rgb2gray_lab(brass_masked, "a", device, args.debug)
    device, soil_car = pcv.binary_threshold(masked_a, 128, 255, "dark", device, args.debug)
    device, soil_masked = pcv.apply_mask(brass_masked, soil_car, "white", device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, soil_a = pcv.rgb2gray_lab(soil_masked, "a", device, args.debug)
    device, soil_b = pcv.rgb2gray_lab(soil_masked, "b", device, args.debug)

    # Threshold the green-magenta and blue images
    device, soila_thresh = pcv.binary_threshold(soil_a, 118, 255, "dark", device, args.debug)
    device, soilb_thresh = pcv.binary_threshold(soil_b, 150, 255, "light", device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, soil_ab = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)
    device, soil_ab_cnt = pcv.logical_or(soila_thresh, soilb_thresh, device, args.debug)

    # Fill small objects
    device, soil_cnt = pcv.fill(soil_ab, soil_ab_cnt, 75, device, args.debug)

    # Median Filter
    # device, soil_mblur = pcv.median_blur(soil_fill, 5, device, args.debug)
    # device, soil_cnt = pcv.median_blur(soil_fill, 5, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(soil_masked, soil_cnt, "white", device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(
        img, "circle", device, None, "default", args.debug, True, 0, 0, -200, -200
    )

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug
    )

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)

    ############## VIS Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile
    )

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, "v", "img", 300, outfile
    )

    # Output shape and color data

    result = open(args.result, "a")
    result.write("\t".join(map(str, shape_header)))
    result.write("\n")
    result.write("\t".join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.write("\t".join(map(str, color_header)))
    result.write("\n")
    result.write("\t".join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write("\t".join(map(str, row)))
        result.write("\n")
    result.close()

    ############################# Use VIS image mask for NIR image#########################
    # Find matching NIR image
    device, nirpath = pcv.get_nir(path, filename, device, args.debug)
    nir, path1, filename1 = pcv.readimage(nirpath)
    nir2 = cv2.imread(nirpath, -1)

    # Flip mask
    device, f_mask = pcv.flip(mask, "horizontal", device, args.debug)

    # Reize mask
    device, nmask = pcv.resize(f_mask, 0.1304, 0.1304, device, args.debug)

    # position, and crop mask
    device, newmask = pcv.crop_position_mask(nir, nmask, device, 9, 12, "top", "left", args.debug)

    # Identify objects
    device, nir_objects, nir_hierarchy = pcv.find_objects(nir, newmask, device, args.debug)

    # Object combine kept objects
    device, nir_combined, nir_combinedmask = pcv.object_composition(nir, nir_objects, nir_hierarchy, device, args.debug)

    ####################################### Analysis #############################################
    outfile1 = False
    if args.writeimg == True:
        outfile1 = args.outdir + "/" + filename1

    device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
        nir2, filename1, nir_combinedmask, 256, device, False, args.debug, outfile1
    )
    device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
        nir2, filename1, nir_combined, nir_combinedmask, device, args.debug, outfile1
    )

    coresult = open(args.coresult, "a")
    coresult.write("\t".join(map(str, nhist_header)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nhist_data)))
    coresult.write("\n")
    for row in nir_imgs:
        coresult.write("\t".join(map(str, row)))
        coresult.write("\n")

    coresult.write("\t".join(map(str, nshape_header)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nshape_data)))
    coresult.write("\n")
    coresult.write("\t".join(map(str, nir_shape)))
    coresult.write("\n")
    coresult.close()
Esempio n. 38
0
def back_for_ground_sub(img, sliders):
    args = options()
    debug = args.debug
    stop = 0
    sat_thresh = 90
    blue_thresh = 135
    green_magenta_dark_thresh = 124
    green_magenta_light_thresh = 180
    blue_yellow_thresh = 128

    def nothing(x):
        pass

    if sliders == True:
        Stop = np.zeros((100, 512, 3), np.uint8)
        cv2.namedWindow('Saturation', cv2.WINDOW_NORMAL)
        cv2.namedWindow('Blue', cv2.WINDOW_NORMAL)
        cv2.namedWindow('Green_magenta_dark', cv2.WINDOW_NORMAL)
        cv2.namedWindow('Green_magenta_light', cv2.WINDOW_NORMAL)
        cv2.namedWindow('Blue_yellow_light', cv2.WINDOW_NORMAL)
        cv2.namedWindow('Stop')
        cv2.createTrackbar('sat_thresh', 'Saturation', 85, 255, nothing)
        cv2.createTrackbar('blue_thresh', 'Blue', 135, 255, nothing)
        cv2.createTrackbar('green_magenta_dark_thresh', 'Green_magenta_dark',
                           117, 255, nothing)
        cv2.createTrackbar('green_magenta_light_thresh', 'Green_magenta_light',
                           180, 255, nothing)
        cv2.createTrackbar('blue_yellow_thresh', 'Blue_yellow_light', 128, 255,
                           nothing)
        cv2.createTrackbar('stop', 'Stop', 0, 1, nothing)
    while (stop == 0):

        if sliders == True:
            # get current positions of five trackbars
            sat_thresh = cv2.getTrackbarPos('sat_thresh', 'Saturation')
            blue_thresh = cv2.getTrackbarPos('blue_thresh', 'Blue')
            green_magenta_dark_thresh = cv2.getTrackbarPos(
                'green_magenta_dark_thresh', 'Green_magenta_dark')
            green_magenta_light_thresh = cv2.getTrackbarPos(
                'green_magenta_light_thresh', 'Green_magenta_light')
            blue_yellow_thresh = cv2.getTrackbarPos('blue_yellow_thresh',
                                                    'Blue_yellow_light')

        # Pipeline step
        device = 0
        # Convert RGB to HSV and extract the Saturation channel
        # Extract the light and dark form the image
        device, s = pcv.rgb2gray_hsv(img, 's', device)
        device, s_thresh = pcv.binary_threshold(s, sat_thresh, 255, 'light',
                                                device)
        # device, s_thresh = pcv.otsu_auto_threshold(s, 255, 'light', device, debug)
        device, s_mblur = pcv.median_blur(s_thresh, 5, device)
        device, s_cnt = pcv.median_blur(s_thresh, 5, device)

        # Convert RGB to LAB and extract the Blue channel
        # Threshold the blue image
        # Combine the threshed saturation and the blue theshed image with the logical or
        device, b = pcv.rgb2gray_lab(img, 'b', device)
        device, b_thresh = pcv.otsu_auto_threshold(b, 255, 'light', device,
                                                   debug)
        device, b_cnt = pcv.otsu_auto_threshold(b, 255, 'light', device, debug)
        device, b_cnt_2 = pcv.binary_threshold(b, 100, 255, 'light', device,
                                               debug)

        device, bs = pcv.logical_or(s_mblur, b_cnt, device)
        # Mask the original image with the theshed combination of the blue&saturation
        device, masked = pcv.apply_mask(img, bs, 'white', device)

        # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
        device, masked_a = pcv.rgb2gray_lab(masked, 'a', device)
        device, masked_b = pcv.rgb2gray_lab(masked, 'b', device)

        # Focus on capturing the plant from the masked image 'masked'
        # Extract plant green-magenta and blue-yellow channels
        # Channels are threshold to cap different portions of the plant
        # Threshold the green-magenta and blue images
        # Images joined together
        # device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark', device)
        device, maskeda_thresh = pcv.binary_threshold(
            masked_a, green_magenta_dark_thresh, 255, 'dark', device,
            debug)  # Original 115 New 125
        device, maskeda_thresh1 = pcv.binary_threshold(
            masked_a, green_magenta_light_thresh, 255, 'light', device,
            debug)  # Original 135 New 170
        device, maskedb_thresh = pcv.binary_threshold(
            masked_b, blue_yellow_thresh, 255, 'light', device,
            debug)  # Original 150`, New 165
        device, maskeda_thresh2 = pcv.binary_threshold(
            masked_a, green_magenta_dark_thresh, 255, 'dark', device,
            debug)  # Original 115 New 125

        # Join the thresholded saturation and blue-yellow images (OR)
        device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                     debug)
        device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug)
        device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug)
        device, ab_cnt_2 = pcv.logical_and(b_cnt_2, maskeda_thresh2, device,
                                           debug)
        # Fill small objects
        device, ab_fill = pcv.fill(ab, ab_cnt, 200, device,
                                   debug)  # Original 200 New: 120
        # cv2.imwrite("yucca_1.jpg",s)
        device, mask_new = pcv.logical_and(maskeda_thresh2, maskedb_thresh,
                                           device, debug)

        # Apply mask (for vis images, mask_color=white)
        device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device,
                                         debug)
        device, masked3 = pcv.apply_mask(masked, ab_cnt_2, 'white', device,
                                         debug)
        # Identify objects
        device, id_objects, obj_hierarchy = pcv.find_objects(
            masked2, ab_fill, device, debug)
        # Define ROI

        # Plant extracton done-----------------------------------------------------------------------------------

        if sliders == True:
            stop = cv2.getTrackbarPos('stop', 'Stop')
            cv2.imshow('Stop', Stop)
            cv2.imshow('Saturation', s_thresh)
            cv2.imshow('Blue', b_thresh)
            cv2.imshow('Green_magenta_dark', maskeda_thresh)
            cv2.imshow('Green_magenta_light', maskeda_thresh1)
            cv2.imshow('Blue_yellow_light', maskedb_thresh)
            cv2.imshow('Mask', mask_new)
            cv2.imshow('Mask', masked)
            cv2.imshow('Mask2', masked2)
            cv2.imshow('Mask3', masked3)
            cv2.imshow('masked_a', masked_a)
            cv2.imshow('masked_b', masked_b)
            cv2.imshow('fill', ab_fill)
            cv2.imshow('ab_cnt', ab)
            cv2.imshow('ab1', ab1)
            cv2.imshow('ab_cnt2', ab_cnt_2)

            k = cv2.waitKey(1) & 0xFF
            if k == 27:
                break

        else:
            stop = 1

    device, roi1, roi_hierarchy = pcv.define_roi(masked2,
                                                 'rectangle',
                                                 device,
                                                 roi=None,
                                                 roi_input='default',
                                                 debug=False,
                                                 adjust=True,
                                                 x_adj=100,
                                                 y_adj=50,
                                                 w_adj=-150,
                                                 h_adj=-50)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img,
        'partial',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=False)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=False)
Esempio n. 39
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)
    # roi = cv2.imread(args.roi)

    # Pipeline step
    device = 0

    ## Convert RGB to HSV and extract the Saturation channel
    # device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
    #
    ## Threshold the Saturation image
    # device, s_thresh = pcv.binary_threshold(s, 90, 255, 'dark', device, args.debug)
    #
    ## Median Filter
    # device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
    # device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
    #
    ## Fill small objects
    ##device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
    #
    ## Convert RGB to LAB and extract the Blue channel
    # device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
    #
    ## Threshold the blue image
    # device, b_thresh = pcv.binary_threshold(b, 135, 255, 'light', device, args.debug)
    # device, b_cnt = pcv.binary_threshold(b, 135, 255, 'light', device, args.debug)
    #
    ##Fill small objects
    # device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)
    #
    ## Join the thresholded saturation and blue-yellow images
    # device, bs = pcv.logical_or(s_mblur, b_cnt, device, args.debug)
    #
    ## Apply Mask (for vis images, mask_color=white)
    # device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(img, "a", device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(img, "b", device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 135, 255, "dark", device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 140, 255, "light", device, args.debug)
    #
    #  # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)

    # Fill small objects
    device, ab_fill = pcv.fill(ab, ab_cnt, 1000, device, args.debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(img, ab_fill, "white", device, args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(
        masked2, "rectangle", device, None, "default", args.debug, True, 550, 0, -500, -300
    )

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi1, roi_hierarchy, id_objects, obj_hierarchy, device, args.debug
    )

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)

    ############### Analysis ################

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, args.outdir + "/" + filename
    )

    # Shape properties relative to user boundary line (optional)
    # device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 1680, device,args.debug,args.outdir+'/'+filename)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, norm_slice = pcv.analyze_color(
        img, args.image, kept_mask, 256, device, args.debug, "all", "rgb", "v", "img", 300, args.outdir + "/" + filename
    )

    # Output shape and color data
    pcv.print_results(args.image, shape_header, shape_data)
    pcv.print_results(args.image, color_header, color_data)
Esempio n. 40
0
def main():
    # Initialize device
    device = 0

    # Parse command-line options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(filename=args.image, debug=args.debug)

    # Convert RGB to LAB and extract the Green-Magenta channel
    device, green_channel = pcv.rgb2gray_lab(img=img,
                                             channel="a",
                                             device=device,
                                             debug=args.debug)

    # Invert the Green-Magenta image because the plant is dark green
    device, green_inv = pcv.invert(img=green_channel,
                                   device=device,
                                   debug=args.debug)

    # Threshold the inverted Green-Magenta image to mostly isolate green pixels
    device, green_thresh = pcv.binary_threshold(img=green_inv,
                                                threshold=134,
                                                maxValue=255,
                                                object_type="light",
                                                device=device,
                                                debug=args.debug)

    # Extract core plant region from the image to preserve delicate plant features during filtering
    device += 1
    plant_region = green_thresh[100:2000, 250:2250]
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_extract_plant_region.png",
                        img=plant_region)

    # Use a Gaussian blur to disrupt the strong edge features in the cabinet
    device, blur_gaussian = pcv.gaussian_blur(device=device,
                                              img=green_thresh,
                                              ksize=(7, 7),
                                              sigmax=0,
                                              sigmay=None,
                                              debug=args.debug)

    # Threshold the blurred image to remove features that were blurred
    device, blur_thresholded = pcv.binary_threshold(img=blur_gaussian,
                                                    threshold=250,
                                                    maxValue=255,
                                                    object_type="light",
                                                    device=device,
                                                    debug=args.debug)

    # Add the plant region back in to the filtered image
    device += 1
    blur_thresholded[100:2000, 250:2250] = plant_region
    if args.debug is not None:
        pcv.print_image(filename=str(device) + "_replace_plant_region.png",
                        img=blur_thresholded)

    # Use a median blur to breakup the horizontal and vertical lines caused by shadows from the track edges
    device, med_blur = pcv.median_blur(img=blur_thresholded,
                                       ksize=5,
                                       device=device,
                                       debug=args.debug)

    # Fill in small contours
    device, green_fill_50 = pcv.fill(img=np.copy(med_blur),
                                     mask=np.copy(med_blur),
                                     size=50,
                                     device=device,
                                     debug=args.debug)

    # Define an ROI for the brass stopper
    device, stopper_roi, stopper_hierarchy = pcv.define_roi(
        img=img,
        shape="rectangle",
        device=device,
        roi=None,
        roi_input="default",
        debug=args.debug,
        adjust=True,
        x_adj=1420,
        y_adj=890,
        w_adj=-920,
        h_adj=-1040)

    # Identify all remaining contours in the binary image
    device, contours, hierarchy = pcv.find_objects(img=img,
                                                   mask=np.copy(green_fill_50),
                                                   device=device,
                                                   debug=args.debug)

    # Remove contours completely contained within the stopper region of interest
    device, remove_stopper_mask = remove_countors_roi(mask=green_fill_50,
                                                      contours=contours,
                                                      hierarchy=hierarchy,
                                                      roi=stopper_roi,
                                                      device=device,
                                                      debug=args.debug)

    # Define an ROI for a screw hole
    device, screw_roi, screw_hierarchy = pcv.define_roi(img=img,
                                                        shape="rectangle",
                                                        device=device,
                                                        roi=None,
                                                        roi_input="default",
                                                        debug=args.debug,
                                                        adjust=True,
                                                        x_adj=1870,
                                                        y_adj=1010,
                                                        w_adj=-485,
                                                        h_adj=-960)

    # Remove contours completely contained within the screw region of interest
    device, remove_screw_mask = remove_countors_roi(mask=remove_stopper_mask,
                                                    contours=contours,
                                                    hierarchy=hierarchy,
                                                    roi=screw_roi,
                                                    device=device,
                                                    debug=args.debug)

    # Identify objects
    device, contours, contour_hierarchy = pcv.find_objects(
        img=img, mask=remove_screw_mask, device=device, debug=args.debug)

    # Define ROI
    device, roi, roi_hierarchy = pcv.define_roi(img=img,
                                                shape="rectangle",
                                                device=device,
                                                roi=None,
                                                roi_input="default",
                                                debug=args.debug,
                                                adjust=True,
                                                x_adj=565,
                                                y_adj=200,
                                                w_adj=-490,
                                                h_adj=-250)

    # Decide which objects to keep
    device, roi_contours, roi_contour_hierarchy, _, _ = pcv.roi_objects(
        img=img,
        roi_type="partial",
        roi_contour=roi,
        roi_hierarchy=roi_hierarchy,
        object_contour=contours,
        obj_hierarchy=contour_hierarchy,
        device=device,
        debug=args.debug)

    # If there are no contours left we cannot measure anything
    if len(roi_contours) > 0:
        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(
            img=img,
            contours=roi_contours,
            hierarchy=roi_contour_hierarchy,
            device=device,
            debug=args.debug)

        outfile = False
        if args.writeimg:
            outfile = args.outdir + "/" + filename

        # Find shape properties, output shape image (optional)
        device, shape_header, shape_data, shape_img = pcv.analyze_object(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images (optional)
        device, color_header, color_data, color_img = pcv.analyze_color(
            img=img,
            imgname=args.image,
            mask=plant_mask,
            bins=256,
            device=device,
            debug=args.debug,
            hist_plot_type=None,
            pseudo_channel="v",
            pseudo_bkg="img",
            resolution=300,
            filename=outfile)

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()

        # Find matching NIR image
        device, nirpath = pcv.get_nir(path=path,
                                      filename=filename,
                                      device=device,
                                      debug=args.debug)
        nir_rgb, nir_path, nir_filename = pcv.readimage(nirpath)
        nir_img = cv2.imread(nirpath, 0)

        # Make mask glovelike in proportions via dilation
        device, d_mask = pcv.dilate(plant_mask,
                                    kernel=1,
                                    i=0,
                                    device=device,
                                    debug=args.debug)

        # Resize mask
        prop2, prop1 = conv_ratio()
        device, nmask = pcv.resize(img=d_mask,
                                   resize_x=prop1,
                                   resize_y=prop2,
                                   device=device,
                                   debug=args.debug)

        # Convert the resized mask to a binary mask
        device, bmask = pcv.binary_threshold(img=nmask,
                                             threshold=0,
                                             maxValue=255,
                                             object_type="light",
                                             device=device,
                                             debug=args.debug)

        device, crop_img = crop_sides_equally(mask=bmask,
                                              nir=nir_img,
                                              device=device,
                                              debug=args.debug)

        # position, and crop mask
        device, newmask = pcv.crop_position_mask(img=nir_img,
                                                 mask=crop_img,
                                                 device=device,
                                                 x=0,
                                                 y=1,
                                                 v_pos="bottom",
                                                 h_pos="right",
                                                 debug=args.debug)

        # Identify objects
        device, nir_objects, nir_hierarchy = pcv.find_objects(img=nir_rgb,
                                                              mask=newmask,
                                                              device=device,
                                                              debug=args.debug)

        # Object combine kept objects
        device, nir_combined, nir_combinedmask = pcv.object_composition(
            img=nir_rgb,
            contours=nir_objects,
            hierarchy=nir_hierarchy,
            device=device,
            debug=args.debug)

        if args.writeimg:
            outfile = args.outdir + "/" + nir_filename

        # Analyze NIR signal data
        device, nhist_header, nhist_data, nir_imgs = pcv.analyze_NIR_intensity(
            img=nir_img,
            rgbimg=nir_rgb,
            mask=nir_combinedmask,
            bins=256,
            device=device,
            histplot=False,
            debug=args.debug,
            filename=outfile)

        # Analyze the shape of the plant contour from the NIR image
        device, nshape_header, nshape_data, nir_shape = pcv.analyze_object(
            img=nir_img,
            imgname=nir_filename,
            obj=nir_combined,
            mask=nir_combinedmask,
            device=device,
            debug=args.debug,
            filename=outfile)

        # Write NIR data to co-results file
        coresult = open(args.coresult, "a")
        coresult.write('\t'.join(map(str, nhist_header)) + "\n")
        coresult.write('\t'.join(map(str, nhist_data)) + "\n")
        for row in nir_imgs:
            coresult.write('\t'.join(map(str, row)) + "\n")
        coresult.write('\t'.join(map(str, nshape_header)) + "\n")
        coresult.write('\t'.join(map(str, nshape_data)) + "\n")
        coresult.write('\t'.join(map(str, nir_shape)) + "\n")
        coresult.close()
Esempio n. 41
0
def main():

    # Get options
    args = options()
    if args.debug:
        print("Analyzing your image dude...")
    # Read image
    device = 0
    img = cv2.imread(args.image, flags=0)
    path, img_name = os.path.split(args.image)
    # Read in image which is average of average of backgrounds
    img_bkgrd = cv2.imread("bkgrd_ave_z2500.png", flags=0)

    # NIR images for burnin2 are up-side down. This may be fixed in later experiments
    img = ndimage.rotate(img, 180)
    img_bkgrd = ndimage.rotate(img_bkgrd, 180)

    # Subtract the image from the image background to make the plant more prominent
    device, bkg_sub_img = pcv.image_subtract(img, img_bkgrd, device, args.debug)
    if args.debug:
        pcv.plot_hist(bkg_sub_img, "bkg_sub_img")
    device, bkg_sub_thres_img = pcv.binary_threshold(bkg_sub_img, 145, 255, "dark", device, args.debug)
    bkg_sub_thres_img = cv2.inRange(bkg_sub_img, 30, 220)
    if args.debug:
        cv2.imwrite("bkgrd_sub_thres.png", bkg_sub_thres_img)

    # device, bkg_sub_thres_img = pcv.binary_threshold_2_sided(img_bkgrd, 50, 190, device, args.debug)

    # if a region of interest is specified read it in
    roi = cv2.imread(args.roi)

    # Start by examining the distribution of pixel intensity values
    if args.debug:
        pcv.plot_hist(img, "hist_img")

    # Will intensity transformation enhance your ability to isolate object of interest by thesholding?
    device, he_img = pcv.HistEqualization(img, device, args.debug)
    if args.debug:
        pcv.plot_hist(he_img, "hist_img_he")

    # Laplace filtering (identify edges based on 2nd derivative)
    device, lp_img = pcv.laplace_filter(img, 1, 1, device, args.debug)
    if args.debug:
        pcv.plot_hist(lp_img, "hist_lp")

    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    device, lp_shrp_img = pcv.image_subtract(img, lp_img, device, args.debug)
    if args.debug:
        pcv.plot_hist(lp_shrp_img, "hist_lp_shrp")

    # Sobel filtering
    # 1st derivative sobel filtering along horizontal axis, kernel = 1, unscaled)
    device, sbx_img = pcv.sobel_filter(img, 1, 0, 1, 1, device, args.debug)
    if args.debug:
        pcv.plot_hist(sbx_img, "hist_sbx")

    # 1st derivative sobel filtering along vertical axis, kernel = 1, unscaled)
    device, sby_img = pcv.sobel_filter(img, 0, 1, 1, 1, device, args.debug)
    if args.debug:
        pcv.plot_hist(sby_img, "hist_sby")

    # Combine the effects of both x and y filters through matrix addition
    # This will capture edges identified within each plane and emphesize edges found in both images
    device, sb_img = pcv.image_add(sbx_img, sby_img, device, args.debug)
    if args.debug:
        pcv.plot_hist(sb_img, "hist_sb_comb_img")

    # Use a lowpass (blurring) filter to smooth sobel image
    device, mblur_img = pcv.median_blur(sb_img, 1, device, args.debug)
    device, mblur_invert_img = pcv.invert(mblur_img, device, args.debug)

    # combine the smoothed sobel image with the laplacian sharpened image
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169
    device, edge_shrp_img = pcv.image_add(mblur_invert_img, lp_shrp_img, device, args.debug)
    if args.debug:
        pcv.plot_hist(edge_shrp_img, "hist_edge_shrp_img")

    # Perform thresholding to generate a binary image
    device, tr_es_img = pcv.binary_threshold(edge_shrp_img, 145, 255, "dark", device, args.debug)

    # Prepare a few small kernels for morphological filtering
    kern = np.zeros((3, 3), dtype=np.uint8)
    kern1 = np.copy(kern)
    kern1[1, 1:3] = 1
    kern2 = np.copy(kern)
    kern2[1, 0:2] = 1
    kern3 = np.copy(kern)
    kern3[0:2, 1] = 1
    kern4 = np.copy(kern)
    kern4[1:3, 1] = 1

    # Prepare a larger kernel for dilation
    kern[1, 0:3] = 1
    kern[0:3, 1] = 1

    # Perform erosion with 4 small kernels
    device, e1_img = pcv.erode(tr_es_img, kern1, 1, device, args.debug)
    device, e2_img = pcv.erode(tr_es_img, kern2, 1, device, args.debug)
    device, e3_img = pcv.erode(tr_es_img, kern3, 1, device, args.debug)
    device, e4_img = pcv.erode(tr_es_img, kern4, 1, device, args.debug)

    # Combine eroded images
    device, c12_img = pcv.logical_or(e1_img, e2_img, device, args.debug)
    device, c123_img = pcv.logical_or(c12_img, e3_img, device, args.debug)
    device, c1234_img = pcv.logical_or(c123_img, e4_img, device, args.debug)

    # Perform dilation
    # device, dil_img = pcv.dilate(c1234_img, kern, 1, device, args.debug)
    device, comb_img = pcv.logical_or(c1234_img, bkg_sub_thres_img, device, args.debug)

    # Get masked image
    # The dilated image may contain some pixels which are not plant
    device, masked_erd = pcv.apply_mask(img, comb_img, "black", device, args.debug)
    # device, masked_erd_dil = pcv.apply_mask(img, dil_img, 'black', device, args.debug)

    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (254 X 320)
    # mask for the bottom of the image
    device, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img, (120, 184), (215, 252), device, args.debug)
    # mask for the left side of the image
    device, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img, (1, 1), (85, 252), device, args.debug)
    # mask for the right side of the image
    device, box3_img, rect_contour3, hierarchy3 = pcv.rectangle_mask(img, (240, 1), (318, 252), device, args.debug)
    # mask the edges
    device, box4_img, rect_contour4, hierarchy4 = pcv.border_mask(img, (1, 1), (318, 252), device, args.debug)

    # combine boxes to filter the edges and car out of the photo
    device, bx12_img = pcv.logical_or(box1_img, box2_img, device, args.debug)
    device, bx123_img = pcv.logical_or(bx12_img, box3_img, device, args.debug)
    device, bx1234_img = pcv.logical_or(bx123_img, box4_img, device, args.debug)
    device, inv_bx1234_img = pcv.invert(bx1234_img, device, args.debug)

    # Make a ROI around the plant, include connected objects
    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)

    device, edge_masked_img = pcv.apply_mask(masked_erd, inv_bx1234_img, "black", device, args.debug)

    device, roi_img, roi_contour, roi_hierarchy = pcv.rectangle_mask(img, (120, 75), (200, 184), device, args.debug)

    plant_objects, plant_hierarchy = cv2.findContours(edge_masked_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    device, roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(
        img, "partial", roi_contour, roi_hierarchy, plant_objects, plant_hierarchy, device, args.debug
    )

    # Apply the box mask to the image
    # device, masked_img = pcv.apply_mask(masked_erd_dil, inv_bx1234_img, 'black', device, args.debug)
    device, masked_img = pcv.apply_mask(kept_mask, inv_bx1234_img, "black", device, args.debug)
    rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    # Generate a binary to send to the analysis function
    device, mask = pcv.binary_threshold(masked_img, 1, 255, "light", device, args.debug)
    mask3d = np.copy(mask)
    plant_objects_2, plant_hierarchy_2 = cv2.findContours(mask3d, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    device, o, m = pcv.object_composition(rgb, roi_objects, hierarchy5, device, args.debug)

    ### Analysis ###
    device, hist_header, hist_data, h_norm = pcv.analyze_NIR_intensity(
        img, args.image, mask, 256, device, args.debug, args.outdir + "/" + img_name
    )
    device, shape_header, shape_data, ori_img = pcv.analyze_object(
        rgb, args.image, o, m, device, args.debug, args.outdir + "/" + img_name
    )

    pcv.print_results(args.image, hist_header, hist_data)
    pcv.print_results(args.image, shape_header, shape_data)
Esempio n. 42
0
def main():
  # Get options
  args = options()
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
  brass_mask = cv2.imread(args.roi)
  
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 49, 255, 'light', device, args.debug)
  
   #Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
  
   #Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, s_mblur, 'white', device, args.debug)
  
#   Convert RGB to LAB and extract the Green-Magenta 
  device, soil_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
#  
#   Threshold the green-magenta 
  device, soila_thresh = pcv.binary_threshold(soil_a, 133, 255, 'light', device, args.debug)
  device, soila_cnt = pcv.binary_threshold(soil_a, 133, 255, 'light', device, args.debug)

#
#   Fill small objects
  device, soil_fill = pcv.fill(soila_thresh, soila_cnt, 200, device, args.debug)
#
#   Median Filter
  device, soil_mblur = pcv.median_blur(soil_fill, 13, device, args.debug)
  device, soil_cnt = pcv.median_blur(soil_fill, 13, device, args.debug)
#  
#   Apply mask (for vis images, mask_color=white)
  device, masked2 = pcv.apply_mask(soil_mblur, soil_cnt, 'white', device, args.debug)
#  
#   Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(masked2, soil_cnt, device, args.debug)
#
#   Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,True, 400,400,-400,-400)
#  
#   Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
#  
#   Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
#  
############## Analysis ################  
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename)
   
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename)
  
  # Output shape and color data
  pcv.print_results(args.image, shape_header, shape_data)
  pcv.print_results(args.image, color_header, color_data)
Esempio n. 43
0
def main():
  # Get options
  args = options()

  
  # Read image
  img = cv2.imread(args.image)
  roi = cv2.imread(args.roi)
  
  # Pipeline step
  device = 0

  # Convert RGB to HSV and extract the Saturation channel
  device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
  
  # Threshold the Saturation image
  device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug)
  
  # Median Filter
  device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
  device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
  
  # Fill small objects
  device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
  
  # Convert RGB to LAB and extract the Blue channel
  device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
  
  # Threshold the blue image
  device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
  device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
  
  # Fill small objects
  device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images
  device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)
  
  # Apply Mask (for vis images, mask_color=white)
  device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
  
  # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
  device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
  device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)
  
  # Threshold the green-magenta and blue images
  device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark', device, args.debug)
  device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light', device, args.debug)
  
  # Join the thresholded saturation and blue-yellow images (OR)
  device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
  
  # Fill small objects
  device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug)
  
  # Apply mask (for vis images, mask_color=white)
  device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug)
  
  # Identify objects
  device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug)
  
 # Define ROI
  device, roi1, roi_hierarchy= pcv.define_roi(img,'rectangle', device, None, 'default', args.debug,False, 0, 0,0,0)
  
  # Decide which objects to keep
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug)
   
  # Object combine kept objects
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
  
############## Analysis ################  
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,True)
  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','rgb','v')
  
  # Output shape and color data
  pcv.print_results(args.image, shape_header, shape_data)
  pcv.print_results(args.image, color_header, color_data)
Esempio n. 44
0
def main():

    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    debug = args.debug

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 85, 255, 'light', device, debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
    device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 160, 255, 'light', device,
                                            debug)
    device, b_cnt = pcv.binary_threshold(b, 160, 255, 'light', device, debug)

    # Fill small objects
    device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_or(s_mblur, b_cnt, device, debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark',
                                                  device, debug)
    device, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255, 'light',
                                                   device, debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
    device, ab = pcv.logical_or(maskeda_thresh1, ab1, device, debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, device, debug)

    # Fill small objects
    device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, debug)

    # Apply mask (for vis images, mask_color=white)
    device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', debug, True,
                                                 550, 0, -500, -1900)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, debug)