예제 #1
0
def kmeans_img(img):
  km = KMeans(MAX_EPOCH, False)

  pts = []
  for i in range(RAW_IMG_SIZE_X):
    for j in range(RAW_IMG_SIZE_Y):
      if (img[j][i] == 255):
        pts.append((i, j))

  km.add_data_pts(pts)

  km.add_cluster_pt([random.randint(0, RAW_IMG_SIZE_X),
                     random.randint(0, RAW_IMG_SIZE_Y)], "red")

  km.add_cluster_pt([random.randint(0, RAW_IMG_SIZE_X),
                     random.randint(0, RAW_IMG_SIZE_Y)], "green")

  km.add_cluster_pt([random.randint(0, RAW_IMG_SIZE_X),
                     random.randint(0, RAW_IMG_SIZE_Y)], "blue")

  km.add_cluster_pt([random.randint(0, RAW_IMG_SIZE_X),
                     random.randint(0, RAW_IMG_SIZE_Y)], "yellow")
  km.run_alg()
  km.print_cluster_pts()
예제 #2
0
def center_contours(img):
  #trace_cnt = get_num_clusters_from_trace(img)

  img_orig = img.copy()
  RAW_IMG_SIZE_X = len(img[0])
  RAW_IMG_SIZE_Y = len(img)

  #img = adjust_gamma(img, 2.5)
  gray    = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  #gray    = erode_img(img)
  blurred = cv2.GaussianBlur(gray, (3, 3), 0)

  # fixed threshold used for contour mapping
  thresh   = cv2.threshold(blurred, 195, 255, cv2.THRESH_BINARY)[1]
  # adaptive threshold used for finding number of cluster points
  thresh_a = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
                                 cv2.THRESH_BINARY, 1001, 35)

  # get contours from threshold
  cnts = cv2.findContours(thresh.copy(),
                          cv2.RETR_EXTERNAL,
                          cv2.CHAIN_APPROX_SIMPLE)
  cnts = imutils.grab_contours(cnts)

  # TODO: rework this too
  # need at least 2 contours so if fixed-threshold contours are not found,
  # decrease threshold filter until found
  tv = 255
  while(len(cnts) < 2 and tv > 0):
    thresh = cv2.threshold(blurred, tv, 255, cv2.THRESH_BINARY)[1]
    cnts = cv2.findContours(thresh.copy(),
                            cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    tv -= 10

  # adaptive as fallback
  if (tv < 0):
    #thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
    #                               cv2.THRESH_BINARY, 1001, 35)

    cnts = cv2.findContours(thresh_a.copy(),
                            cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

  # get non-zero ratio
  num_nonzeros  = np.count_nonzero(thresh_a)
  nonzero_ratio = num_nonzeros / (RAW_IMG_SIZE_X * RAW_IMG_SIZE_Y)
  zero_ratio    = ((RAW_IMG_SIZE_X * RAW_IMG_SIZE_Y) - num_nonzeros) / \
                  (RAW_IMG_SIZE_X * RAW_IMG_SIZE_Y)
  #print(nonzero_ratio)

  #cv2.imshow("thresh", thresh)
  #cv2.waitKey(0)

  # TODO: need better way to automatically pick cluster amount
  # set number of clusters based on amount of contours
  num_clusters = int(MAX_NUM_CLUSTERS * nonzero_ratio)
  #num_clusters = int((np.log(nonzero_ratio) + 1) * MAX_NUM_CLUSTERS)
  if (num_clusters > len(cnts)):
    num_clusters = int(len(cnts)/2)

  if (num_clusters < 1):
    num_clusters = 1

  #print("num_clusters = ", num_clusters)

  # store list of contour center pts
  contour_pts = []
  contour_map = {}

  for c in cnts:
    area = cv2.contourArea(c)

    # dropout contours that are too small/noise
    if (area < 5):
      continue

    # compute the center of the contour
    M = cv2.moments(c)

    if (M["m00"] != 0):
      cX = int(M["m10"] / M["m00"])
      cY = int(M["m01"] / M["m00"])
    else:
      cX = 0
      cY = 0

    contour_pts.append((cX, cY))
    contour_map[(cX, cY)] = area

    # draw the contour and center of the shape on the image
    cv2.drawContours(img, [c], -1, (255, 0, 0), 2)
    cv2.circle(img, (cX, cY), 7, (255, 255, 255), -1)
    cv2.putText(img, "center", (cX - 20, cY - 20),
      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

  # DEFAULT: if no area, add a center point to be the middle of the image
  if (len(contour_pts) == 0):
    pt = (RAW_IMG_SIZE_X/2, RAW_IMG_SIZE_Y/2)
    contour_pts.append(pt)
    contour_map[pt] = [RAW_IMG_SIZE_X * RAW_IMG_SIZE_Y]

  # show image
  #cv2.imshow("contours", img)
  #cv2.waitKey(0)

  # perform kmeans on contour points
  km = KMeans(MAX_EPOCH, False)
  km.add_data_pts(contour_pts)

  # get the num_clusters largest contour points and store the points
  # as a list
  largest_contour_pts = \
    list(dict(Counter(contour_map).most_common(num_clusters)).keys())

  #print(len(contour_pts))

  if (num_clusters > len(contour_pts)):
    num_clusters = len(contour_pts)

  # create n clusters (based on non-zero pixel ratio)
  for n in range(num_clusters):
    # using random points
    #km.add_cluster_pt([random.randint(0, RAW_IMG_SIZE_X-1),
    #                   random.randint(0, RAW_IMG_SIZE_Y-1)], "CENTER"+str(n))

    # initialize initial cluster points to be the locations of the LARGEST
    # contours.
    try:             # TODO: revise this whole concept
      km.add_cluster_pt([largest_contour_pts[n][0],
                         largest_contour_pts[n][1]], "CENTER"+str(n))
    except:          # random point if out of range (?)
      km.add_cluster_pt([random.randint(0, RAW_IMG_SIZE_X),
                         random.randint(0, RAW_IMG_SIZE_Y)], "CENTER"+str(n))

  km.run_alg()
  #km.print_cluster_pts()

  i = 0
  segment_imgs = []
  segment_pts  = []

  #print(len(km.cluster_pts))

  for cp in km.cluster_pts:
    farthest_pt = km.get_farthest_x_and_y(i)

    fX = farthest_pt[0] if farthest_pt[0] != 0 else RAW_IMG_SIZE_X
    fY = farthest_pt[1] if farthest_pt[1] != 0 else RAW_IMG_SIZE_Y
    cN = cp[1]

    # skip cluster in case nan
    try:
      cX = int(cp[0][0])
      cY = int(cp[0][1])
    except IndexError:
      i += 1
      continue

    # draw the contour and center of the shape on the image
    cv2.circle(img, (cX, cY), 10, (255, 0, 0), -1)
    cv2.putText(img, cN, (cX - 20, cY - 20),
      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

    # coordinates for top-left and bottom-right points of the  rectangle
    # centered at cluster point
    pt1 = [int(cX-fX), int(cY-fY)]
    pt2 = [int(cX+fX), int(cY+fY)]

    # restrict coordinates to be within original images resolution
    if (pt1[0] < 0):
      pt1[0] = 0
    if (pt2[0] < 0):
      pt2[0] = 0
    if (pt1[1] < 0):
      pt1[1] = 0
    if (pt2[1] < 0):
      pt2[1] = 0

    if (pt1[0] >= RAW_IMG_SIZE_X):
      pt1[0] = RAW_IMG_SIZE_X-1
    if (pt1[1] >= RAW_IMG_SIZE_Y):
      pt1[1] = RAW_IMG_SIZE_Y-1
    if (pt2[0] >= RAW_IMG_SIZE_X):
      pt2[0] = RAW_IMG_SIZE_X-1
    if (pt2[1] >= RAW_IMG_SIZE_Y):
      pt2[1] = RAW_IMG_SIZE_Y-1

    pt1 = tuple(pt1)
    pt2 = tuple(pt2)
    #print(pt1, pt2)

    # draw on image
    cv2.rectangle(img, pt1, pt2, (0, 0, 255), 3)

    # crop original image and store in array
    crop_img = img_orig[pt1[1]:pt2[1], pt1[0]:pt2[0]]

    # exclude empty segments
    if (crop_img.shape[1] * crop_img.shape[2] != 0):
      segment_imgs.append(crop_img)
      segment_pts.append((pt1, pt2))

    #cv2.imshow("cropped", crop_img)
    #cv2.waitKey(0)

    i += 1

  # show image
  #cv2.imshow("Image", img)
  #cv2.waitKey(0)

  return segment_imgs, segment_pts