コード例 #1
0
    def test_calc_hist_in_area(self):
        ip = Image_operations()

        p0 = (0, 0)
        p1 = (10, 10)
        p2 = (8, 2)
        poly = [p0, p1, p2]

        img_black = np.zeros((20, 20, 3))
        cv2.fillConvexPoly(img_black, np.array(poly, 'int32'), (0, 0, 0))

        color = (255, 255, 255)
        img_color = np.zeros((20, 20, 3))
        cv2.fillConvexPoly(img_color, np.array(poly, 'int32'), color)

        hist_black = ip.calc_hist_in_area(img_black, poly)
        hist_color = ip.calc_hist_in_area(img_color, poly)

        match = ip.compare_hists(hist_black, hist_color)
        print match

        match = ip.compare_hists(hist_color, hist_color)
        print match

        match = ip.compare_hists(hist_black, hist_black)
        print match
コード例 #2
0
    def test_calc_bounding_box(self):
        ip = Image_operations()
        min_x, max_x, min_y, max_y = ip.calc_bounding_box([(0, 0), (1, 4), (2, -1), (3, 3)])

        self.assertEqual(min_x, 0)
        self.assertEqual(max_x, 3)
        self.assertEqual(min_y, -1)
        self.assertEqual(max_y, 4)
コード例 #3
0
ファイル: input_process.py プロジェクト: AlexSchwank/CV_Game
    def __init__(self, create_windows=True, difficulty=1):
        # create windows for output
        if create_windows:
            self.input_window = self.create_window("Input")
            self.shape_window = self.create_window("Shape")
            self.output_window = self.create_window("Processed Output")

        self.img_ops = Image_operations()
        self.mode = "ShapeMatch"
        self.color_calibration = None
        self.difficulty = difficulty

        self.shape_collection = Shape_Collection(difficulty)
        self.init_camera()

        try:
            if difficulty == 2:
                filename = SHAPES_FILENAME_PARTS[0] + str(5) + SHAPES_FILENAME_PARTS[1]
            else:
                filename = SHAPES_FILENAME_PARTS[0] + str(4) + SHAPES_FILENAME_PARTS[1]
            with open(filename):
                self.shape_collection.deserialize()
        except IOError:
            print "No valid pkl found. Generating shapes..."
            self.shape_collection.serialize()
            self.shape_collection.deserialize()
コード例 #4
0
    def test_compare_colors(self):
        ip = Image_operations()

        color0 = (255, 255, 0)
        res = ip.compare_colors(color0, color0)
        self.assertTrue(res == 0)

        color0 = (255, 255, 0)
        color1 = (255, 254, 0)
        res = ip.compare_colors(color0, color1)
        self.assertTrue(res < 2)

        color0 = (230, 230, 10)
        color1 = (255, 255, 0)
        res = ip.compare_colors(color0, color1)
        self.assertTrue(5 < res < 20)

        color0 = (255, 0, 0)
        color1 = (255, 255, 0)
        res = ip.compare_colors(color0, color1)
        self.assertTrue(res > 20)
コード例 #5
0
ファイル: cam_process.py プロジェクト: AlexSchwank/CV_Game
    def calibrate(self):
        img_ops = Image_operations()
        calib_window = cv2.namedWindow("CALIBRATION", cv2.CV_WINDOW_AUTOSIZE)

        self.color_calibration = {"background": None,
                                  "color_triangle": None}

        # find middle
        stream, img = self.read_cam()
        print "shape", img.shape
        x = int(img.shape[1] * 0.5)
        y = int(img.shape[0] * 0.5)
        print x, y
        radius = 20
        detection_poly = [(x + radius, y), (x, y + radius), (x - radius, y), (x, y - radius)]

        mask = np.zeros(img.shape, dtype="uint8")
        cv2.fillConvexPoly(mask, np.array([detection_poly], 'int32'), (1, 1, 1))

        for requested_color in self.color_calibration.keys():
            img = None
            while True:
                stream, img = self.read_cam()
                img *= 0.5
                show_img = img * mask + img
                cv2.putText(show_img, requested_color, (100, 100), cv2.FONT_HERSHEY_PLAIN, 3.0, (255, 0, 0),
                            thickness=2)
                cv2.imshow(calib_window, show_img)

                key = cv2.waitKey(100)
                # enter = calc color now
                if key == 13:
                    break
                    # esc = cancel
                if key == 27:
                    cv2.destroyWindow(calib_window)
                    return

            self.color_calibration[requested_color] = img_ops.calc_hist_in_area(img, detection_poly)
コード例 #6
0
ファイル: input_process.py プロジェクト: AlexSchwank/CV_Game
class Input_process():
    def __init__(self, create_windows=True, difficulty=1):
        # create windows for output
        if create_windows:
            self.input_window = self.create_window("Input")
            self.shape_window = self.create_window("Shape")
            self.output_window = self.create_window("Processed Output")

        self.img_ops = Image_operations()
        self.mode = "ShapeMatch"
        self.color_calibration = None
        self.difficulty = difficulty

        self.shape_collection = Shape_Collection(difficulty)
        self.init_camera()

        try:
            if difficulty == 2:
                filename = SHAPES_FILENAME_PARTS[0] + str(5) + SHAPES_FILENAME_PARTS[1]
            else:
                filename = SHAPES_FILENAME_PARTS[0] + str(4) + SHAPES_FILENAME_PARTS[1]
            with open(filename):
                self.shape_collection.deserialize()
        except IOError:
            print "No valid pkl found. Generating shapes..."
            self.shape_collection.serialize()
            self.shape_collection.deserialize()

    def init_camera(self):
        self.cam = cv2.VideoCapture(CAMERA_PORT)
        self.cam.set(3, CAM_RES_WIDTH)
        self.cam.set(4, CAM_RES_HEIGHT)
        self.debug_img = None

        stream = None
        cnt = 0
        while not stream and cnt < 5:
            print "Waiting for camera..."
            sleep(0.5)
            cnt += 1
            stream, img = self.read_cam()

    def read_cam(self):
        if DEBUG:
            if self.debug_img is None:
                print os.path.join("Resources", "Images", "debug_input.png")
                self.debug_img = cv2.imread(os.path.join("Resources", "Images", "debug_input.png"))
            return True, self.debug_img.copy()
        else:
            return self.cam.read()

    def run(self):
        pass

    def angle_between(self, a, b):
        # based on http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
        """ Returns the angle in radians between vectors 'v1' and 'v2'::
        """
        v0 = np.array([a.slope_v.x, a.slope_v.y])
        v1 = np.array([b.slope_v.x, b.slope_v.y])
        v0_u = unit_vector(v0)
        v1_u = unit_vector(v1)
        angle = np.arccos(np.dot(v0_u, v1_u))
        if np.isnan(angle):
            if (v0_u == v1_u).all():
                return 0.0
            else:
                return 360.0
        return np.degrees(angle)

    def calc_angles(self, lines):
        res = []
        for i in range(0, len(lines)):
            res.append(self.angle_between(lines[i], lines[(i + 1) % len(lines)]))
        return res

    def compare_both(self, angles0, angles1, length0, length1):
        best_angles_diff = None
        best_length_diff = None
        best_start = None
        count = len(angles0)

        for start in range(0, count):
            diff_angles_sum = 0
            diff_length_sum = 0
            found_too_big_angle = False
            found_too_big_length = False
            for i in range(0, count):
                # ANGLES
                diff_angles = abs(abs(angles0[i]) - abs(angles1[(i + start) % count]))
                if diff_angles < MAX_ANGLE_DIFF_FOR_MATCH:
                    diff_angles_sum += diff_angles
                else:
                    found_too_big_angle = True
                    break

                # LENGTH
                diff_length = abs(length0[i] - length1[(i + start) % count])
                if diff_length < MAX_EDGE_DIFF_FOR_MATCH:
                    diff_length_sum += diff_length
                else:
                    found_too_big_length = True
                    break

            if not found_too_big_angle \
                and not found_too_big_length \
                and (best_angles_diff is None or diff_angles_sum < best_angles_diff) \
                and (best_length_diff is None or diff_length_sum < best_length_diff):
                best_angles_diff = diff_angles_sum
                best_length_diff = diff_length_sum
                best_start = start

        if best_angles_diff is not None and best_angles_diff < MAX_ANGLE_DIFF_SUM_FOR_MATCH \
            and best_length_diff is not None and best_length_diff < MAX_EDGE_DIFF_SUM_FOR_MATCH:
            return True, best_start
        else:
            return False, None

    def compare_angles(self, a, b):
        best_diff = None
        best_start = None

        for start in range(0, len(a)):
            diff_sum = 0
            found_too_big_angle = False
            for i in range(0, len(a)):
                diff = abs(abs(b[i]) - abs(a[(i + start) % len(a)]))
                if diff < 20:
                    diff_sum += diff
                else:
                    found_too_big_angle = True
                    break
            if not found_too_big_angle and (best_diff is None or diff_sum < best_diff):
                best_diff = diff_sum
                best_start = start

        if best_diff is not None and best_diff < 50:
            return True, best_start
        else:
            return False, None

    def compare_length(self, shape, combined_lines):
        orig_length = shape.lengths

        orig_sum = sum(orig_length)
        for i, l in enumerate(orig_length):
            orig_length[i] = l / orig_sum

        input_length = []
        for line in combined_lines:
            length = abs(line.length())
            input_length.append(length)

        input_sum = sum(input_length)
        for i, l in enumerate(input_length):
            input_length[i] = l / input_sum

        best_diff = None
        for start in range(0, len(orig_length)):
            diff_sum = 0
            found_too_big_length = False
            for i in range(0, len(input_length)):
                diff = abs(input_length[i] - orig_length[(i + start) % len(orig_length)])
                if diff < 0.1:
                    diff_sum += diff
                else:
                    found_too_big_length = True
                    break
            if not found_too_big_length and (best_diff is None or diff_sum < best_diff):
                best_diff = diff_sum

        return best_diff is not None and best_diff < MAX_EDGE_DIFF_SUM_FOR_MATCH

    def process_shape_matching(self, img):
        succ_status = DETECTION_NONE
        out_img, polys = self.img_ops.exec_shape_match(img, show=True)
        combined_lines_img = img.copy()
        indices = set()

        for contour in polys:
            succ_status = max(DETECTION_SHAPE, succ_status)
            color = RED_BGR
            lines = []
            for i, point in enumerate(contour):
                lines.append(Line(Vector2D(x=point[0], y=point[1]),
                                  Vector2D(x=contour[(i + 1) % len(contour)][0], y=contour[(i + 1) % len(contour)][1])))

            combined_lines = self.combine_line(lines)

            # found the right count of edges.
            if len(combined_lines) == TOWER_EDGE_COUNT:
                index, start, succ_status = self.match_contour(combined_lines)
                if index is not None:
                    if MATCH_COLOR_TRIANGLE:
                        if self.difficulty == 0:
                            match_color, succ_status = self.match_color_triangle_simple(combined_lines_img, combined_lines,
                                                                             self.shape_collection.shapes[index], start)
                        else:
                            match_color, succ_status = self.match_color_triangle(combined_lines_img, combined_lines,
                                                                             self.shape_collection.shapes[index], start, self.difficulty)
                        if match_color:
                            color = GREEN_BGR
                            indices.add(index)
                        else:
                            color = RED_BGR
                    else:
                        color = GREEN_BGR
                        indices.add(index)

            for i, line in enumerate(combined_lines):
                line.draw(combined_lines_img, color)

        return combined_lines_img, indices, succ_status

    def process_img(self, img):
        out_img = None

        if self.mode == "Canny":
            out_img = self.img_ops.canny_filter(img)
        elif self.mode == "GoodFeaturesToTrack":
            out_img = self.img_ops.exec_goodFeaturesToTrack_filter(img)
        elif self.mode == "HoughCircles":
            out_img = self.img_ops.exec_houghCircles_filter(img)
        elif self.mode == "HoughLines":
            out_img, lines = self.img_ops.exec_houghLines_filter(img, show=True)
        elif self.mode == "HoughLinesP":
            out_img, lines = self.img_ops.exec_houghLinesP_filter(img, show=True)
        elif self.mode == "FindContours":
            out_img = self.img_ops.exec_find_contours(img)
        elif self.mode == "FindContoursMod":
            out_img = self.img_ops.exec_find_contours_mod(img)
        elif self.mode == "ContourArea":
            out_img = self.img_ops.exec_contour_area(img)
        elif self.mode == "ShapeMatch":
            out_img, polys = self.img_ops.exec_shape_match(img, self.shape_collection, show=True)

            cv2.imshow(self.shape_window, self.shape_collection.get_combined_img("original"))

            combined_lines_img = out_img.copy()

            for contour in polys:
                color = RED_BGR
                lines = []
                for i, point in enumerate(contour):
                    lines.append(Line(Vector2D(x=point[0], y=point[1]), Vector2D(x=contour[(i + 1) % len(contour)][0],
                                                                                 y=contour[(i + 1) % len(contour)][1])))

                combined_lines = self.combine_line(lines)

                # found the right count of edges.
                if len(combined_lines) == TOWER_EDGE_COUNT:
                    index, start, succ_status = self.match_contour(combined_lines)
                    if index is not None:
                        print "found match at", index
                        if MATCH_COLOR_TRIANGLE:
                            if self.match_color_triangle(img, combined_lines, self.shape_collection.shapes[index],
                                                         start):
                                print "found color triangle match"
                                color = GREEN_BGR
                            else:
                                print "found contour but no color match"
                                color = RED_BGR
                        else:
                            color = GREEN_BGR

                for i, line in enumerate(combined_lines):
                    line.draw(combined_lines_img, color)

            out_comb = self.combine_image_output([out_img, combined_lines_img])
            cv2.imshow(self.output_window, out_comb)
        if self.mode != "ShapeMatch":
            cv2.imshow(self.output_window, out_img)
        cv2.imshow(self.input_window, img)


    def save_last_images(self, images):
        # save last images as file

        directory = "../tmp/images/"
        if not os.path.exists(directory):
            os.makedirs(directory)

        for i, image in enumerate(images):
            filename = directory + str(i) + ".png"
            print "saving ", filename
            cv2.imwrite(filename, image)

    def create_window(self, name):
        cv2.namedWindow(name, cv2.CV_WINDOW_AUTOSIZE)
        return name

    def combine_image_output(self, imgs):
        # first img in collection defines height.
        h = imgs[0].shape[0]
        w = 0
        for img in imgs:
            w += img.shape[1]

        all_shapes = np.zeros((h, w, 3), np.uint8)
        current_w = 0
        for img in imgs:
            all_shapes[:h, current_w:current_w + img.shape[1], :] = img
            current_w += img.shape[1]
        return all_shapes

    def process_key_input(self):
        key = cv2.waitKey(10)
        if key != -1:
            if key == ord('1'):
                self.mode = "Canny"
            elif key == ord('2'):
                self.mode = "GoodFeaturesToTrack"
            elif key == ord('3'):
                self.mode = "HoughCircles"
            elif key == ord('4'):
                self.mode = "HoughLines"
            elif key == ord('5'):
                self.mode = "HoughLinesP"
            elif key == ord('6'):
                self.mode = "FindContours"
            elif key == ord('7'):
                self.mode = "FindContoursMod"
            elif key == ord('8'):
                self.mode = "ShapeMatch"
            elif key == ord('9'):
                self.mode = "ShapeMatchExtended"
            elif key == ESC_KEY:
                return True

            print "Using now: " + self.mode
        return False


    def combine_line(self, lines):
        finished = False

        # remove too short lines
        for i, line in enumerate(lines):
            if line.length() < 5:
                lines[(i + 1) % len(lines)].start = line.start
                lines.remove(line)

        # merge all similar lines
        while not finished:
            finished = True
            for line in lines:
                for other in lines:
                    if line is not other:
                        if line.is_similar(other):
                            line.merge(other)
                            lines.remove(other)
                            finished = False

        return lines

    def shutdown(self):
        cv2.destroyAllWindows()

    def match_contour(self, combined_lines):
        succ_status = DETECTION_EDGECOUNT
        combined_lines_angles = self.calc_angles(combined_lines)

        combined_lines_length = []
        for line in combined_lines:
            length = abs(line.length())
            combined_lines_length.append(length)

        input_sum = sum(combined_lines_length)
        for i, l in enumerate(combined_lines_length):
            combined_lines_length[i] = l / input_sum

        for i, shape in enumerate(self.shape_collection.shapes):
            shape_length = shape.lengths

            orig_sum = sum(shape_length)
            for k, l in enumerate(shape_length):
                shape_length[k] = l / orig_sum

            match, start = self.compare_both(shape.angles, combined_lines_angles, shape_length, combined_lines_length)
            if match:
                return i, start, DETECTION_LENGTH
        return None, None, succ_status

    def match_color_triangle_simple(self, img, combined_lines, shape, start_index):
        p0 = combined_lines[(start_index + shape.triangle_start + 1) % len(combined_lines)].start
        p1 = combined_lines[(start_index + shape.triangle_start + 1) % len(combined_lines)].end
        p2 = combined_lines[(start_index + shape.triangle_start) % len(combined_lines)].start
        triangle = [(p0.x, p0.y), (p1.x, p1.y), (p2.x, p2.y)]

        poly = []
        for p in combined_lines:
            poly.append((p.start.x, p.start.y))

        hist_color_triangle = self.img_ops.calc_hist_in_area(img, triangle)
        hist_shape = self.img_ops.calc_hist_in_shape_outside_area(img, poly, triangle)
        color_match = self.img_ops.compare_hists(hist_color_triangle, hist_shape)
        print color_match
        if color_match < MAX_CORRELATION_COEFFICIENT:
            return True, DETECTION_COLOR_REST
        else:
            return False, DETECTION_LENGTH



    def match_color_triangle(self, img, combined_lines, shape, start_index, difficulty=0):
        p0 = combined_lines[(start_index + shape.triangle_start + 1) % len(combined_lines)].start
        p1 = combined_lines[(start_index + shape.triangle_start + 1) % len(combined_lines)].end
        p2 = combined_lines[(start_index + shape.triangle_start) % len(combined_lines)].start
        triangle = [(p0.x, p0.y), (p1.x, p1.y), (p2.x, p2.y)]

        hist_color_triangle = self.img_ops.calc_hist_in_area(img, triangle)
        color_match = self.img_ops.compare_hists(hist_color_triangle, self.color_calibration['color_triangle'])
        cv2.fillConvexPoly(img, np.array([triangle], 'int32'), (255, 0, 0))
        if color_match > MIN_CORRELATION_COEFFICIENT * (difficulty + 1):
            poly = []
            for p in combined_lines:
                poly.append((p.start.x, p.start.y))
            hist_shape = self.img_ops.calc_hist_in_shape_outside_area(img, poly, triangle)
            color_match = self.img_ops.compare_hists(hist_shape, self.color_calibration['background'])
            if color_match > MIN_CORRELATION_COEFFICIENT * (difficulty + 1):
                return True, DETECTION_COLOR_REST
            else:
                return False, DETECTION_COLOR_TRIANGLE
        return False, DETECTION_LENGTH
コード例 #7
0
ファイル: recognizer.py プロジェクト: AlexSchwank/CV_Game
    def calibrate_pygame(self, screen, player):
        font = pygame.font.SysFont("sourcecodepro", 32)
        img_ops = Image_operations()

        # find middle
        stream = None
        while not stream:
            stream, img = self.read_cam()
            print stream
        x = int(img.shape[1] * 0.5)
        y = int(img.shape[0] * 0.5)
        radius = 20
        detection_poly = [(x + radius, y), (x, y + radius), (x - radius, y), (x, y - radius)]

        mask = np.zeros(img.shape, dtype="uint8")
        cv2.fillConvexPoly(mask, np.array([detection_poly], 'int32'), (1, 1, 1))
        clock = pygame.time.Clock()

        for requested_color in player.color_calibration.keys():
            waiting_for_user = True
            color_taken = False
            while waiting_for_user:
                clock.tick(FPS)
                stream, img = self.read_cam()
                img *= 0.5
                show_img = img * mask + img
                cv2.polylines(show_img, np.array([detection_poly], 'int32'), True, RED_BGR, thickness=1)
                show_img = cv2.resize(show_img, WINDOW_SIZE)
                screen.blit(pygame.surfarray.make_surface(np.rot90(show_img[:, :, ::-1])), (0, 0))

                text0 = font.render("Please define your color for the ", True, GREEN_RGB)
                text0_rect = text0.get_rect()
                text0_rect.center = (WIDTH / 2, 200)
                screen.blit(text0, text0_rect)

                text1 = font.render(requested_color, True, GREEN_RGB)
                text1_rect = text1.get_rect()
                text1_rect.center = (WIDTH / 2, 240)
                screen.blit(text1, text1_rect)

                if color_taken:
                    text2 = font.render("Color already taken.", True, RED_RGB)
                    text2_rect = text2.get_rect()
                    text2_rect.center = (WIDTH / 2, 540)
                    screen.blit(text2, text2_rect)

                pygame.display.update(text0_rect)
                pygame.display.update(text1_rect)

                for event in pygame.event.get():
                    if event.type == pygame.KEYDOWN:
                        if event.key == pygame.K_RETURN:
                            player.color_calibration[requested_color] = img_ops.calc_hist_in_area(img, detection_poly)

                            #check if new color is already in calibrated colors
                            for other in player.color_calibration.keys():
                                if other != requested_color:
                                    h0 = player.color_calibration[requested_color]
                                    h1 = player.color_calibration[other]
                                    if h1 is None or img_ops.compare_hists(h0, h1) < MIN_CORRELATION_COEFFICIENT:
                                        waiting_for_user = False
                                    else:
                                        color_taken = True
                        if event.key == pygame.K_ESCAPE:
                            return -1
                    if event.type == pygame.QUIT:
                        return -1

                pygame.display.update()

        self.color_calibration = player.color_calibration
        return 0