Exemple #1
0
	def __init__(self, image_path, space, segment_radius = 3, segment_friction = 1, tolerance = 150):
		remove_base_points = False
		make_loop = False
		order_verts = False

		''' # not reduced
		layers = contours.find_contours(image_path)
		'''
		# reduced
		tmp = contours.find_contours(image_path)
		layers = []
		for layer in tmp:
			if remove_base_points:
				for pos in layer[:]:
					if pos[1] <= 2:
						layer.remove(pos)
				
			layer = pymunk.util.reduce_poly(layer, tolerance=tolerance)
			layers.append(layer)

		radius = segment_radius
		friction = segment_friction
		elasticity = 1

		self.points = []
		'''
		if order_verts:
			new_layers = []
			for layer in layers:
				new_layers.append(sorted(layer, key=lambda a: a[0]))
		else:
			new_layers = layers
		'''
		self.points = []

		for layer in layers:
			i = 0
			for point in layer:
				if point == layer[0]:
					self.points.append(point)

					seg = pymunk.Segment(space.static_body, point, layer[-1], radius)
					seg.group = 2
					seg.elasticity = elasticity
					seg.friction = friction
					space.add(seg)

				else:
					seg = pymunk.Segment(space.static_body, point, layer[i-1], radius)
					seg.group = 2
					seg.elasticity = elasticity
					seg.friction = friction
					space.add(seg)
	
					self.points.append(point)
				i += 1
Exemple #2
0
	def create_polys(self, space, friction, elasticity):
		shapes = contours.find_contours(self.image_path)
		for shape in shapes:
			i = 0
			for point in shape:
				shape[i] = point[0]-(self.img_width/2)+.5, point[1]-(self.img_height/2)-.5
				i += 1

		for shape in shapes:
			tris = pymunk.util.triangulate(shape)
			for tri in tris:
				print tri
				poly = pymunk.Poly(space.static_body, tri)
				poly.group = 2
				poly.elasticity = elasticity
				poly.friction = friction
				space.add(poly)
Exemple #3
0
	def __init__(self, image_path):
		self.image_path = image_path
		img = pyglet.image.load(image_path)
		self.img_width, self.img_height = img.width, img.height

		tmp = contours.find_contours(image_path)
		self.layers = []
		
		for layer in tmp:
			layer.append(layer[0])
			self.layers.append(layer)

		for layer in self.layers:
			i = 0
			for point in layer:
				layer[i] = point[0]-(self.img_width/2)+.5, point[1]-(self.img_height/2)-.5
				i += 1
Exemple #4
0
    def create_polys(self, space, friction, elasticity):
        shapes = contours.find_contours(self.image_path)
        for shape in shapes:
            i = 0
            for point in shape:
                shape[i] = point[0] - (self.img_width / 2) + .5, point[1] - (
                    self.img_height / 2) - .5
                i += 1

        for shape in shapes:
            tris = pymunk.util.triangulate(shape)
            for tri in tris:
                print tri
                poly = pymunk.Poly(space.static_body, tri)
                poly.group = 2
                poly.elasticity = elasticity
                poly.friction = friction
                space.add(poly)
Exemple #5
0
    def __init__(self, image_path):
        self.image_path = image_path
        img = pyglet.image.load(image_path)
        self.img_width, self.img_height = img.width, img.height

        tmp = contours.find_contours(image_path)
        self.layers = []

        for layer in tmp:
            layer.append(layer[0])
            self.layers.append(layer)

        for layer in self.layers:
            i = 0
            for point in layer:
                layer[i] = point[0] - (self.img_width / 2) + .5, point[1] - (
                    self.img_height / 2) - .5
                i += 1
Exemple #6
0
def find_chars_water_extract(img, char_size, num_chars):
    char_size = (40, 60)
    '''
    img[0,255] 通过imread(file,0)读入
    '''
    num_chars = 4

    # Extract image contours
    contours0 = find_contours(img)
    contours = []

    # Remove contours which we predict that dont have any char inside
    contours = [contour for contour in contours0 if contour.num_chars > 0]
    assert len(contours) > 0

    k = len(contours)

    # Sort frames by its horizontal position (from left to right)
    contours.sort(key=lambda contour: contour.bbox.left)

    # Now we create a 2D matrix where the element at index i,j
    # will be the probability of the frame i to contain j characters inside
    P = np.array([contour.num_chars_proba for contour in contours])

    # If n0, n1, ..., nk are the number of predicted characters inside each frame, we find the best configuration so that n0 + n1 + ... + nk = num_chars
    # and ensure that P[0, n[0]] * P[1, n[1]] * ... * P[k, n[k]] is maximized

    # All valid configurations (n0, n1, ..., nk) such that n0 + n1 + ... + nk = num_chars
    configs = filter(lambda x: np.sum(x) == num_chars,
                     combinations_with_replacement(range(0, num_chars + 1), k))
    configs = list(
        frozenset(
            chain.from_iterable(
                map(lambda config: permutations(config, k), configs))))

    configs = np.array(configs, dtype=np.uint8)
    nc = configs.shape[0]

    # Calculate a score function for each configuration
    scores = np.zeros([nc]).astype(np.float32)

    for i in range(0, nc):
        scores[i] = np.prod(P[np.arange(0, k), configs[i]])

    # Get the best configuration
    best_config = configs[np.argmax(scores)]

    # Split the contours into frames
    img = (img).astype(np.uint8)
    frames = []

    for k in range(0, k):
        if best_config[k] == 0:
            continue

        elif best_config[k] == 1:
            # Contour boundaries only holds 1 char.
            frame = contours[k].extract_bbox_pixels(img)
            frames.append(frame / 255)
        else:
            # Contour holds more than 1 char
            # Split it into multiple frames
            separators = find_char_separators(contours[k].bbox_mask,
                                              best_config[k])
            ### 在这里应用水滴算法###
            con = 1 - contours[k].extract_bbox_pixels(img) / 255
            pic = dropFall(con, separators, best_config[k])
            extract = extract_contour(pic)
            frames.extend(extract)

    processed_frames = map(partial(process_image, dsize=char_size), frames)
    chars = np.stack([(frame.astype(np.float16))
                      for frame in processed_frames],
                     axis=0)
    chars = chars.astype(np.uint8)

    return chars
Exemple #7
0
'''
'''
def generate(operation_func, operation_name, *a):
    logging.basicConfig(filename=r"Logs" "\\" + operation_name + ".log",
                        filemode="w",
                        level=logging.INFO,
                        format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')

    for filename in os.listdir(r"Pics" "\\" + operation_name):
        if filename.endswith(".bmp"):
            img = Image.open(r"Pics" "\\" + operation_name + "\\" + filename)
            new_img = operation_func(img, *a)
            new_img.save(r"Results" "\\" + operation_name + r"\new_" + filename)
        else:
            print("There are no .bmp files")
'''

if __name__ == '__main__':
    img = Image.open('Img2.bmp')
    res_imgs = contours.find_contours(img)
    res_imgs[0].save('new_img2_ht.bmp')
    res_imgs[1].save('new_img2.bmp')
    res_imgs[2].save('new_img2_x.bmp')
    res_imgs[3].save('new_img2_y.bmp')
    res_imgs[4].save('new_img2_BIN.bmp')
    #generate(filter.median_filter_mid, 'Median Filter Mid', 5)
    #generate(filter.median_filter_edg, 't3', 5)
    #Image.open("Img6.jpg").save("Img6.bmp")
    #img = Image.open('Img6.bmp')
    #print(np.array(img).shape)
    #filter.median_filter_edg(img, 3).save('NEW.bmp')
Exemple #8
0
def find_chars(img, char_size, num_chars=5):
    '''
    This function takes a gray scaled image and detects text characters
    (Is optimized for the captcha dataset)
    :param img: Must be a gray scaled image (2D array with float32 values in the range [0, 1])
    :param num_chars: Number of characters to be extracted from the image
    :return Returns a 3D array of size num_chars x n x m
    Where n and m indicates the shape of the output images (can be specified using char_size)
    '''
    # Extract image contours
    contours = find_contours(img)

    # Remove contours which we predict that dont have any char inside
    contours = [contour for contour in contours if contour.num_chars > 0]
    assert len(contours) > 0

    k = len(contours)

    # Sort frames by its horizontal position (from left to right)
    contours.sort(key=lambda contour: contour.bbox.left)

    # Now we create a 2D matrix where the element at index i,j
    # will be the probability of the frame i to contain j characters inside
    P = np.array([contour.num_chars_proba for contour in contours])

    # If n0, n1, ..., nk are the number of predicted characters inside each frame, we find the best configuration so that n0 + n1 + ... + nk = num_chars
    # and ensure that P[0, n[0]] * P[1, n[1]] * ... * P[k, n[k]] is maximized

    # All valid configurations (n0, n1, ..., nk) such that n0 + n1 + ... + nk = num_chars
    configs = filter(lambda x: np.sum(x) == num_chars,
                     combinations_with_replacement(range(0, num_chars + 1), k))
    configs = list(
        frozenset(
            chain.from_iterable(
                map(lambda config: permutations(config, k), configs))))
    assert len(configs) > 0

    configs = np.array(configs, dtype=np.uint8)
    nc = configs.shape[0]

    # Calculate a score function for each configuration
    scores = np.zeros([nc]).astype(np.float32)

    for i in range(0, nc):
        scores[i] = np.prod(P[np.arange(0, k), configs[i]])

    # Get the best configuration
    best_config = configs[np.argmax(scores)]

    # Split the contours into frames
    img = (img * 255).astype(np.uint8)
    frames = []

    for k in range(0, k):
        if best_config[k] == 0:
            continue

        elif best_config[k] == 1:
            # Contour boundaries only holds 1 char.
            frame = contours[k].extract_bbox_pixels(img)
            frames.append(frame)
        else:
            # Contour holds more than 1 char
            # Split it into multiple frames
            separators = find_char_separators(contours[k].bbox_mask,
                                              best_config[k])
            splits = split_array(contours[k].extract_bbox_pixels(img),
                                 separators,
                                 axis=1)
            frames.extend(splits)

    processed_frames = map(partial(process_image, dsize=char_size), frames)

    return np.stack([(frame.astype(np.float32) / 255)
                     for frame in processed_frames],
                    axis=0)
if __name__ == '__main__':
    from input import InputFlow
    from dataset import CaptchaDataset
    from utils import waitKey
    from contours import find_contours
    import cv2 as cv
    import numpy as np

    dataset = CaptchaDataset()
    batch_generator = iter(InputFlow(dataset.X, dataset.y, batch_size=1))

    while True:
        X_batch, y_batch = next(batch_generator)
        img = X_batch[0, :, :, 0]
        contours = find_contours(img)

        for contour in contours:
            cv.imshow(
                'Contour',
                contour.draw_bbox(
                    cv.cvtColor((img * 255).astype(np.uint8),
                                cv.COLOR_GRAY2RGB)))

            while True:
                ch = waitKey()
                if ch == 'q':
                    exit()
                elif ord(ch) >= ord('0') and ord(ch) <= ord('5'):
                    num_chars = ord(ch) - ord('0')
                    attrs = contour.properties