Example #1
0
def isolate_object_of_interest(points, image, cam_matrix, trans, rot):
    segmented_image, found_box = segment_image(image)
    if not found_box:
        return None, found_box
    points = segment_pointcloud(points, segmented_image, cam_matrix, trans,
                                rot)
    return points, found_box
Example #2
0
def isolate_objects_of_interest(points, image, cam_matrix, trans, rot):
    segmented_image = segment_image(image)
    cloudList = []
    for mask in segmented_image:
        cloud = segment_pointcloud(np.copy(points), mask, cam_matrix, trans,
                                   rot)
        cloudList.append(cloud)
    return cloudList
Example #3
0
def solve_captcha(image):
    # Load up the model labels
    with open(MODEL_LABELS_FILENAME, "rb") as f:
        lb = pickle.load(f)

    # Load up the trained model
    model = load_model(MODEL_FILENAME)

    # We do not know the number of characters here
    chars = segment_image(image, -1)

    if len(chars) > 0:
        output = cv2.merge([image] * 3)
        predictions = []

        # Loop over the characters
        for bounding_box in chars:
            x, y, w, h = bounding_box
            # Extract the char from the input image
            char_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
            # Re-size the letter image to 60x60 pixels to match training data
            char_image = resize_to_fit(char_image, 60, 60)

            if char_image is not None:
                # Expand dimensions
                char_image = np.expand_dims(char_image, axis=2)
                char_image = np.expand_dims(char_image, axis=0)

                # Use the model to make a prediction
                prediction = model.predict(char_image)

                # Convert the encoded prediction to specific label
                label = lb.inverse_transform(prediction)[0]
                predictions.append(label)

                # draw the prediction on the output image
                cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4),
                              (0, 255, 0), 1)
                cv2.putText(output, label, (x - 5, y - 5),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)

        # Print captcha
        captcha_text = "".join(predictions)
        print("CAPTCHA is: {}".format(captcha_text))

        return output, captcha_text

    return None, ''
Example #4
0
def segmentation():
    print("Segmenting data")
    image_qty = 0
    char_qty = 0
    image_files = glob.glob(os.path.join(TRAINING_FOLDER, "*"))
    counts = {}

    for (i, captcha_file) in enumerate(image_files):
        print("[INFO] processing image {}/{}".format(i + 1, len(image_files)))

        img = load_image(captcha_file)

        # Get the base filename without the extension ("0147.jpeg" as "0147)
        filename = os.path.basename(captcha_file)
        captcha_text = os.path.splitext(filename)[0]

        chars = segment_image(img, len(filename) - 5)

        if len(chars) > 0:
            image_qty += 1
            char_qty += len(chars)

            # Save out each letter as a single image
            for letter_bounding_box, letter_text in zip(chars, captcha_text):
                # Grab the coordinates of the letter in the image
                x, y, w, h = letter_bounding_box

                # Extract the letter from the original image with a 2-pixel margin around the edge
                letter_image = img[y - 2:y + h + 2, x - 2:x + w + 2]

                # Get the folder to save the image in
                save_path = os.path.join(OUTPUT_FOLDER, letter_text)

                # if the output directory does not exist, create it
                if not os.path.exists(save_path):
                    os.makedirs(save_path)

                # write the letter image to a file
                count = counts.get(letter_text, 1)
                p = os.path.join(save_path,
                                 "{}.png".format(str(count).zfill(6)))
                cv2.imwrite(p, letter_image)

                # increment the count for the current key
                counts[letter_text] = count + 1

    print(image_qty, char_qty)
Example #5
0
def isolate_object_of_interest(points, image, cam_matrix, trans, rot):
    segmented_image = segment_image(image)
    points = segment_pointcloud(points, segmented_image, cam_matrix, trans, rot)
    return points
Example #6
0
File: main.py Project: Qube5/Molkky
def isolate_object_of_interest(points, image, cam_matrix, trans, rot):
    segmented_image, info = segment_image(image, num_pins + 1)
    segmented_image_binary = segmented_image_to_binary(segmented_image)
    points = segment_pointcloud(points, segmented_image_binary, cam_matrix, trans, rot)
    return points, segmented_image, info
Example #7
0
	with open(parameter_file, 'r') as yml_file:
		params = yaml.load(yml_file)
	sigma = params["segmentation_params"]["sigma"]
	min_size = params["segmentation_params"]["min_size"]
	th = params["segmentation_params"]["th"]
	print('Using Found Parameters: sigma={}, min_size={}, th={}'.format(sigma,min_size,th))

try:
	im = Image.open(image_name)
except IOError:
	print("Image name {} is not a valid image format.".format(image_name))
	sys.exit(2)	
im = im.convert('RGB')
im = np.array(im)

seg_image, pixel_class, disjoint_set = image_segmentation.segment_image(im, sigma, th, min_size)

print("Getting simularity set...")
sim_set = simularity_set.simularity_set(region_image=pixel_class, image=im, \
										disjoint_set=disjoint_set, seg_image=seg_image)
print("Got simularity set.")

print('Getting the region information...')
bounding_boxes = set()
# Get all of the initial regions
for region, bbox in sim_set.bounding_box.items():
	bounding_boxes.add(bbox)

# Get the rest of the regions as you combine the best ones
while sim_set.disjoint_set.num_sets > 1:
	reg_a, reg_b = sim_set.get_most_similar_regions()