def main(): two_path = 'pgm/two_objects.pgm' many_path = 'pgm/many_objects_1.pgm' # many_path = 'pgm/many_objects_2.pgm' # Load the image in grayscale gray_in = cv2.imread(two_path, 0) many_in = cv2.imread(many_path, 0) # Convert a gray-level image to a binary one binary_in = p1(gray_in, 120) many_in = p1(many_in, 120) # Segment the binary image into several connected regions two_labels = p2(binary_in) many_labels = p2(many_in) # Display the colored the image # plt.imshow(two_labels) # plt.show() # Computes object attributes, and generate the objects database [database_two, overlays] = p3(two_labels) # Recognize objects from the database overlays_out = p4(many_labels, database_two) # Display the results plt.axis("off") plt.imshow(overlays_out) plt.show()
def p4(labels_in, database_in): [database_all, overlays_out] = p3(labels_in) for obj in database_all: for target in database_in: # Compare the object with to recognition target if abs(target['min_moment']-obj['min_moment']) / obj['min_moment'] < 0.2 and \ abs(target['roundness']-obj['roundness']) / obj['roundness'] < 0.2: # Draw a circle center = (obj['x_position'], obj['y_position']) radius = obj['radius'] color = 200 cv2.circle(overlays_out, center, radius, color) # Draw lines to show orientation theta = obj['orientation'] (x, y) = center endpt = (int(x + radius * math.cos(theta)), int(y + radius * math.sin(theta))) cv2.line(overlays_out, center, endpt, color) # cv2.line(overlays_out, center, (x+radius, y), color) return overlays_out
def test_p3_example(self): self.assertEqual(p3('example.txt', (1, 3)), 7)