Exemple #1
0
def main():
  # Get options
  args = options()
  path_mask = '/home/mfeldman/tester/mask/mask_brass_tv_z1_L0.png'
  
  # Read image
  img, path, filename = pcv.readimage(args.image)
  brass_mask = cv2.imread(path_mask)
  
  # Pipeline step
  device = 0

  # Mask pesky brass piece
  device, brass_mask1 = pcv.rgb2gray_hsv(brass_mask, 'v', device, args.debug)
  device, brass_thresh = pcv.binary_threshold(brass_mask1, 0, 255, 'light', device, args.debug)
  device, brass_inv=pcv.invert(brass_thresh, device, args.debug)
  device, masked_image = pcv.apply_mask(img, brass_inv, 'white', device, args.debug)
  
  # We can do a pretty good job of identifying the plant from the s channel
  device, s = pcv.rgb2gray_hsv(masked_image, 's', device, args.debug)
  s_thresh = cv2.inRange(s, 100, 190)

  # Lets blur the result a bit to get rid of unwanted noise
  s_blur = cv2.medianBlur(s_thresh,5)
  
  # The a channel is good too
  device, a = pcv.rgb2gray_lab(masked_image, 'a', device, args.debug)
  a_thresh = cv2.inRange(a, 90, 120)
  a_blur = cv2.medianBlur(a_thresh,5)
  
  # Now lets set of a series of filters to remove unwanted background
  plant_shape = cv2.bitwise_and(a_blur, s_blur)
  
  # Lets remove all the crap on the sides of the image
  plant_shape[:,:330] = 0
  plant_shape[:,2100:] = 0
  plant_shape[:200,:] = 0
  
  # Now remove all remaining small points using erosion with a 3 x 3 kernel
  kernel = np.ones((3,3),np.uint8)
  erosion = cv2.erode(plant_shape ,kernel,iterations = 1)
  
  # Now dilate to fill in small holes
  kernel = np.ones((3,3),np.uint8)
  dilation = cv2.dilate(erosion ,kernel,iterations = 1)
  
  # Apply mask to the background image
  device, masked = pcv.apply_mask(masked_image, plant_shape, 'white', device, args.debug)
  
  # Identify objects
  device, id_objects, obj_hierarchy = pcv.find_objects(masked, dilation, device, args.debug)
  
  # Get ROI contours
  device, roi, roi_hierarchy = pcv.define_roi(masked_image, 'circle', device, None, 'default', args.debug, True, x_adj=0, y_adj=0, w_adj=0, h_adj=-1200)
  
  # ROI
  device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(masked_image,'partial',roi, roi_hierarchy, id_objects,obj_hierarchy,device, args.debug)
  
  # Get object contour and masked object
  device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug)
  
  ############## Landmarks    ################
  
  device, points = pcv.acute_vertex(obj, 40, 40, 40, img, device, args.debug)
  boundary_line = 'NA'
  # Use acute fxn to estimate tips
  device, points_r, centroid_r, bline_r = pcv.scale_features(obj, mask, points, boundary_line, device, args.debug)
    # Get number of points
  tips = len(points_r)
  # Use turgor_proxy fxn to get distances 
  device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy(points_r, centroid_r, bline_r, device, args.debug)
  # Get pseudomarkers along the y-axis
  device, left, right, center_h = pcv.y_axis_pseudolandmarks(obj, mask, img, device, args.debug)
  # Re-scale the points
  device, left_r, left_cr, left_br = pcv.scale_features(obj, mask, left, boundary_line, device, args.debug)
  device, right_r, right_cr, right_br = pcv.scale_features(obj, mask, right, boundary_line, device, args.debug)
  device, center_hr, center_hcr, center_hbr = pcv.scale_features(obj, mask, center_h, boundary_line, device, args.debug)
  
  # Get pseudomarkers along the x-axis
  device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj, mask, img, device, args.debug)
  
  # Re-scale the points
  device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top, boundary_line, device, args.debug)
  device, bottom_r, bottom_cr, bottom_br = pcv.scale_features(obj, mask, bottom, boundary_line, device, args.debug)
  device, center_vr, center_vcr, center_vbr = pcv.scale_features(obj, mask, center_v, boundary_line, device, args.debug)
  
  ## Need to convert the points into a list of tuples format to match the scaled points
  points = points.reshape(len(points),2)
  points = points.tolist()
  temp_out = []
  for p in points:
    p = tuple(p)
    temp_out.append(p)
  points = temp_out
  left = left.reshape(20,2)
  left = left.tolist()
  temp_out = []
  for l in left:
    l = tuple(l)
    temp_out.append(l)
  left = temp_out
  right = right.reshape(20,2)
  right = right.tolist()
  temp_out = []
  for r in right:
    r = tuple(r)
    temp_out.append(r)
  right = temp_out
  center_h = center_h.reshape(20,2)
  center_h = center_h.tolist()
  temp_out = []
  for ch in center_h:
    ch = tuple(ch)
    temp_out.append(ch)
  center_h = temp_out
  ## Need to convert the points into a list of tuples format to match the scaled points
  top = top.reshape(20,2)
  top = top.tolist()
  temp_out = []
  for t in top:
    t = tuple(t)
    temp_out.append(t)
  top = temp_out
  bottom = bottom.reshape(20,2)
  bottom = bottom.tolist()
  temp_out = []
  for b in bottom:
    b = tuple(b)
    temp_out.append(b)
  bottom = temp_out
  center_v = center_v.reshape(20,2)
  center_v = center_v.tolist()
  temp_out = []
  for cvr in center_v:
    cvr = tuple(cvr)
    temp_out.append(cvr)
  center_v = temp_out
  
  #Store Landmark Data
  landmark_header=(
    'HEADER_LANDMARK',
    'tip_points',
    'tip_points_r',
    'centroid_r',
    'baseline_r',
    'tip_number',
    'vert_ave_c',
    'hori_ave_c',
    'euc_ave_c',
    'ang_ave_c',
    'vert_ave_b',
    'hori_ave_b',
    'euc_ave_b',
    'ang_ave_b',
    'left_lmk',
    'right_lmk',
    'center_h_lmk',
    'left_lmk_r',
    'right_lmk_r',
    'center_h_lmk_r',
    'top_lmk',
    'bottom_lmk',
    'center_v_lmk',
    'top_lmk_r',
    'bottom_lmk_r',
    'center_v_lmk_r'
    )

  landmark_data = (
    'LANDMARK_DATA',
    points,
    points_r,
    centroid_r,
    bline_r,
    tips,
    vert_ave_c,
    hori_ave_c,
    euc_ave_c,
    ang_ave_c,
    vert_ave_b,
    hori_ave_b,
    euc_ave_b,
    ang_ave_b,
    left,
    right,
    center_h,
    left_r,
    right_r,
    center_hr,
    top,
    bottom,
    center_v,
    top_r,
    bottom_r,
    center_vr
    )
    
  
  
 ############## VIS Analysis ################
  
  outfile=False
  #if args.writeimg==True:
  #  outfile=args.outdir+"/"+filename
  
  # Find shape properties, output shape image (optional)
  device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,outfile)
  
  # Shape properties relative to user boundary line (optional)
  device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 330, device,args.debug,outfile)
  
  # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
  device, color_header,color_data,color_img= pcv.analyze_color(img, args.image, mask, 256, device, args.debug,None,'v','img',300,outfile)
  
  # Output shape and color data

  result=open(args.result,"a")
  result.write('\t'.join(map(str,shape_header)))
  result.write("\n")
  result.write('\t'.join(map(str,shape_data)))
  result.write("\n")
  for row in shape_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.write('\t'.join(map(str,color_header)))
  result.write("\n")
  result.write('\t'.join(map(str,color_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_header)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_data)))
  result.write("\n")
  result.write('\t'.join(map(str,boundary_img1)))
  result.write("\n")
  for row in color_img:
    result.write('\t'.join(map(str,row)))
    result.write("\n")
  result.write('\t'.join(map(str,landmark_header)))
  result.write("\n")
  result.write('\t'.join(map(str,landmark_data)))
  result.write("\n")
  result.close()
Exemple #2
0
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, debug=False)
    return device, ab_fill, mask, obj



# Read image
img, path, filename = pcv.readimage("yucca3.jpg")
img = cv2.resize(img, (0, 0), fx=0.2, fy=0.2)

device, ab_fill, masked2,obj = back_for_ground_sub(img, False)
device = 1
# obj, win, thresh, sep, img, device, debug=None

device, list_of_acute_points = pcv.acute_vertex(obj, 30, 90, 50, img, device, debug='plot')
# Segment image with watershed function
# device, watershed_header, watershed_data,analysis_images=pcv.watershed_segmentation(device, img,masked2,10,'./examples',debug=False)

# print(watershed_header)
#print(watershed_data)


# Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
# device, color_header, color_data, color_img = pcv.analyze_color(img, args.image, kept_mask, 256, device, False,'all', 'v', 'img', 300,False)
# plt.plot(shape_img)
# plt.show()
# cv2.imshow('shape',shape_img)
# cv2.imshow('color',color_img)
# cv2.imshow('boundry',boundary_img1)
Exemple #3
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    # Convert RGB to HSV and extract the Saturation channel
    device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)

    # Threshold the Saturation image
    device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device,
                                            args.debug)

    # Median Filter
    device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug)
    device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug)

    # Fill small objects
    #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)

    # Convert RGB to LAB and extract the Blue channel
    device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)

    # Threshold the blue image
    device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device,
                                            args.debug)
    device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device,
                                         args.debug)

    # Fill small objects
    #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)

    # Join the thresholded saturation and blue-yellow images
    device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug)

    # Apply Mask (for vis images, mask_color=white)
    device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
    device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark',
                                                  device, args.debug)
    device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                  device, args.debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                args.debug)
    device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device,
                                    args.debug)

    # Fill small noise
    device, ab_fill1 = pcv.fill(ab, ab_cnt, 2, device, args.debug)

    # Dilate to join small objects with larger ones
    device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)
    device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, args.debug)

    # Fill dilated image mask
    device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, args.debug)
    img2 = np.copy(img)
    device, masked2 = pcv.apply_mask(img2, ab_cnt3, 'white', device,
                                     args.debug)

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, args.debug)
    device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, args.debug)

    # Threshold the green-magenta and blue images
    device, masked2a_thresh = pcv.binary_threshold(masked2_a, 127, 255, 'dark',
                                                   device, args.debug)
    device, masked2b_thresh = pcv.binary_threshold(masked2_b, 128, 255,
                                                   'light', device, args.debug)
    device, ab_fill = pcv.logical_or(masked2a_thresh, masked2b_thresh, device,
                                     args.debug)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(
        masked2, ab_fill, device, args.debug)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(masked2, 'rectangle', device,
                                                 None, 'default', args.debug,
                                                 True, 550, 10, -600, -907)

    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy, id_objects, obj_hierarchy, device,
        args.debug)

    # Object combine kept objects
    device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3,
                                               device, args.debug)

    ############## Landmarks    ################

    device, points = pcv.acute_vertex(obj, 40, 40, 40, img, device, args.debug)
    boundary_line = 900
    # Use acute fxn to estimate tips
    device, points_r, centroid_r, bline_r = pcv.scale_features(
        obj, mask, points, boundary_line, device, args.debug)
    # Get number of points
    tips = len(points_r)
    # Use turgor_proxy fxn to get distances
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.turgor_proxy(
        points_r, centroid_r, bline_r, device, args.debug)
    # Get pseudomarkers along the y-axis
    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, img, device, args.debug)
    # Re-scale the points
    device, left_r, left_cr, left_br = pcv.scale_features(
        obj, mask, left, boundary_line, device, args.debug)
    device, right_r, right_cr, right_br = pcv.scale_features(
        obj, mask, right, boundary_line, device, args.debug)
    device, center_hr, center_hcr, center_hbr = pcv.scale_features(
        obj, mask, center_h, boundary_line, device, args.debug)

    # Get pseudomarkers along the x-axis
    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, img, device, args.debug)

    # Re-scale the points
    device, top_r, top_cr, top_br = pcv.scale_features(obj, mask, top,
                                                       boundary_line, device,
                                                       args.debug)
    device, bottom_r, bottom_cr, bottom_br = pcv.scale_features(
        obj, mask, bottom, boundary_line, device, args.debug)
    device, center_vr, center_vcr, center_vbr = pcv.scale_features(
        obj, mask, center_v, boundary_line, device, args.debug)

    ## Need to convert the points into a list of tuples format to match the scaled points
    points = points.reshape(len(points), 2)
    points = points.tolist()
    temp_out = []
    for p in points:
        p = tuple(p)
        temp_out.append(p)
    points = temp_out
    left = left.reshape(20, 2)
    left = left.tolist()
    temp_out = []
    for l in left:
        l = tuple(l)
        temp_out.append(l)
    left = temp_out
    right = right.reshape(20, 2)
    right = right.tolist()
    temp_out = []
    for r in right:
        r = tuple(r)
        temp_out.append(r)
    right = temp_out
    center_h = center_h.reshape(20, 2)
    center_h = center_h.tolist()
    temp_out = []
    for ch in center_h:
        ch = tuple(ch)
        temp_out.append(ch)
    center_h = temp_out
    ## Need to convert the points into a list of tuples format to match the scaled points
    top = top.reshape(20, 2)
    top = top.tolist()
    temp_out = []
    for t in top:
        t = tuple(t)
        temp_out.append(t)
    top = temp_out
    bottom = bottom.reshape(20, 2)
    bottom = bottom.tolist()
    temp_out = []
    for b in bottom:
        b = tuple(b)
        temp_out.append(b)
    bottom = temp_out
    center_v = center_v.reshape(20, 2)
    center_v = center_v.tolist()
    temp_out = []
    for cvr in center_v:
        cvr = tuple(cvr)
        temp_out.append(cvr)
    center_v = temp_out

    #Store Landmark Data
    landmark_header = ('HEADER_LANDMARK', 'tip_points', 'tip_points_r',
                       'centroid_r', 'baseline_r', 'tip_number', 'vert_ave_c',
                       'hori_ave_c', 'euc_ave_c', 'ang_ave_c', 'vert_ave_b',
                       'hori_ave_b', 'euc_ave_b', 'ang_ave_b', 'left_lmk',
                       'right_lmk', 'center_h_lmk', 'left_lmk_r',
                       'right_lmk_r', 'center_h_lmk_r', 'top_lmk',
                       'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
                       'bottom_lmk_r', 'center_v_lmk_r')

    landmark_data = ('LANDMARK_DATA', points, points_r, centroid_r, bline_r,
                     tips, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c,
                     vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, left, right,
                     center_h, left_r, right_r, center_hr, top, bottom,
                     center_v, top_r, bottom_r, center_vr)

    ############## VIS Analysis ################

    outfile = False
    #if args.writeimg==True:
    #outfile=args.outdir+"/"+filename

    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, args.image, obj, mask, device, args.debug, outfile)

    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        img, args.image, obj, mask, 935, device, args.debug, outfile)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        img, args.image, mask, 256, device, args.debug, None, 'v', 'img', 300,
        outfile)

    # Output shape and color data

    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_header)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_data)))
    result.write("\n")
    result.write('\t'.join(map(str, boundary_img1)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    result.close()
def get_feature(img):
    print("step one")
    """
    Step one: Background forground substraction 
    """
    # Get options
    args = options()
    debug = args.debug
    # Read image
    filename = args.result
    # img, path, filename = pcv.readimage(args.image)
    # Pipeline step
    device = 0
    device, resize_img = pcv.resize(img, 0.4, 0.4, device, debug)
    # Classify the pixels as plant or background
    device, mask_img = pcv.naive_bayes_classifier(
        resize_img,
        pdf_file=
        "/home/matthijs/PycharmProjects/SMR1/src/vision/ML_background/Trained_models/model_3/naive_bayes_pdfs.txt",
        device=0,
        debug='print')

    # Median Filter
    device, blur = pcv.median_blur(mask_img.get('plant'), 5, device, debug)
    print("step two")
    """
    Step one: Identifiy the objects, extract and filter the objects
    """

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         blur,
                                                         device,
                                                         debug=None)

    # Define ROI
    device, roi1, roi_hierarchy = pcv.define_roi(resize_img,
                                                 'rectangle',
                                                 device,
                                                 roi=True,
                                                 roi_input='default',
                                                 debug=True,
                                                 adjust=True,
                                                 x_adj=50,
                                                 y_adj=10,
                                                 w_adj=-100,
                                                 h_adj=0)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img, 'cutto', roi1, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)
    # print(roi_objects[0])
    # cv2.drawContours(resize_img, [roi_objects[0]], 0, (0, 255, 0), 3)
    # cv2.imshow("img",resize_img)
    # cv2.waitKey(0)
    area_oud = 0
    i = 0
    index = 0
    object_list = []
    # a = np.array([[hierarchy3[0][0]]])
    hierarchy = []
    for cnt in roi_objects:
        area = cv2.contourArea(cnt)
        M = cv2.moments(cnt)
        if M["m10"] or M["m01"]:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # check if the location of the contour is between the constrains
            if cX > 200 and cX < 500 and cY > 25 and cY < 400:
                # cv2.circle(resize_img, (cX, cY), 5, (255, 0, 255), thickness=1, lineType=1, shift=0)
                # check if the size of the contour is bigger than 250
                if area > 450:
                    obj = np.vstack(roi_objects)
                    object_list.append(roi_objects[i])
                    hierarchy.append(hierarchy3[0][i])
                    print(i)
        i = i + 1
    a = np.array([hierarchy])
    # a = [[[-1,-1,-1,-1][-1,-1,-1,-1][-1,-1,-1,-1]]]
    # Object combine kept objects
    # device, obj, mask_2 = pcv.object_composition(resize_img, object_list, a, device, debug)

    mask_contours = np.zeros(resize_img.shape, np.uint8)
    cv2.drawContours(mask_contours, object_list, -1, (255, 255, 255), -1)
    gray_image = cv2.cvtColor(mask_contours, cv2.COLOR_BGR2GRAY)
    ret, mask_contours = cv2.threshold(gray_image, 127, 255, cv2.THRESH_BINARY)

    # Identify objects
    device, id_objects, obj_hierarchy = pcv.find_objects(resize_img,
                                                         mask_contours,
                                                         device,
                                                         debug=None)
    # Decide which objects to keep
    device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        resize_img,
        'cutto',
        roi1,
        roi_hierarchy,
        id_objects,
        obj_hierarchy,
        device,
        debug=None)
    # Object combine kept objects
    device, obj, mask = pcv.object_composition(resize_img,
                                               roi_objects,
                                               hierarchy3,
                                               device,
                                               debug=None)
    ############### Analysis ################
    masked = mask.copy()

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    print("step three")
    """
    Step three: Calculate all the features
    """
    # Find shape properties, output shape image (optional)
    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        resize_img, args.image, obj, mask, device, debug, filename="/file")
    print(shape_img)
    # Shape properties relative to user boundary line (optional)
    device, boundary_header, boundary_data, boundary_img1 = pcv.analyze_bound(
        resize_img, args.image, obj, mask, 1680, device)

    # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
    device, color_header, color_data, color_img = pcv.analyze_color(
        resize_img, args.image, kept_mask, 256, device, debug, 'all', 'v',
        'img', 300)
    maks_watershed = mask.copy()
    kernel = np.zeros((5, 5), dtype=np.uint8)
    device, mask_watershed, = pcv.erode(maks_watershed, 5, 1, device, debug)

    device, watershed_header, watershed_data, analysis_images = pcv.watershed_segmentation(
        device, resize_img, mask, 50, './examples', debug)
    device, list_of_acute_points = pcv.acute_vertex(obj, 30, 60, 10,
                                                    resize_img, device, debug)

    device, top, bottom, center_v = pcv.x_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, left, right, center_h = pcv.y_axis_pseudolandmarks(
        obj, mask, resize_img, device, debug)

    device, points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(
        obj, mask, list_of_acute_points, 225, device, debug)

    # Identify acute vertices (tip points) of an object
    # Results in set of point values that may indicate tip points
    device, vert_ave_c, hori_ave_c, euc_ave_c, ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b = pcv.landmark_reference_pt_dist(
        points_rescaled, centroid_rescaled, bottomline_rescaled, device, debug)

    landmark_header = [
        'HEADER_LANDMARK', 'tip_points', 'tip_points_r', 'centroid_r',
        'baseline_r', 'tip_number', 'vert_ave_c', 'hori_ave_c', 'euc_ave_c',
        'ang_ave_c', 'vert_ave_b', 'hori_ave_b', 'euc_ave_b', 'ang_ave_b',
        'left_lmk', 'right_lmk', 'center_h_lmk', 'left_lmk_r', 'right_lmk_r',
        'center_h_lmk_r', 'top_lmk', 'bottom_lmk', 'center_v_lmk', 'top_lmk_r',
        'bottom_lmk_r', 'center_v_lmk_r'
    ]
    landmark_data = [
        'LANDMARK_DATA', 0, 0, 0, 0,
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0
    ]
    shape_data_train = list(shape_data)
    shape_data_train.pop(0)
    shape_data_train.pop(10)
    watershed_data_train = list(watershed_data)
    watershed_data_train.pop(0)
    landmark_data_train = [
        len(list_of_acute_points), vert_ave_c, hori_ave_c, euc_ave_c,
        ang_ave_c, vert_ave_b, hori_ave_b, euc_ave_b, ang_ave_b
    ]
    X = shape_data_train + watershed_data_train + landmark_data_train
    print("len X", len(X))
    print(X)
    # Write shape and color data to results fil
    result = open(args.result, "a")
    result.write('\t'.join(map(str, shape_header)))
    result.write("\n")
    result.write('\t'.join(map(str, shape_data)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_header)))
    result.write("\n")
    result.write('\t'.join(map(str, watershed_data)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_header)))
    result.write("\n")
    result.write('\t'.join(map(str, landmark_data)))
    result.write("\n")
    for row in shape_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.write('\t'.join(map(str, color_header)))
    result.write("\n")
    result.write('\t'.join(map(str, color_data)))
    result.write("\n")
    for row in color_img:
        result.write('\t'.join(map(str, row)))
        result.write("\n")
    result.close()
    print("done")
    print(shape_img)
    return X, shape_img, masked