コード例 #1
0
    def __init__(self, start=pi_point.Point(), end=pi_point.Point()):
        self.end = None
        self.start = None
        self.ref_point = None

        self.line = []

        self.m = 0.0
        self.angle = 0.0
        self.length = 0.0

        self.start = start
        self.end = end

        self.compute()
コード例 #2
0
    def translate(self, x, y, z=0):
        translation_vector = pi_point.Point(x=x, y=y, z=z)

        self.start = self.start + translation_vector
        self.end = self.start + translation_vector

        return self.compute()
コード例 #3
0
	def __rotate_coord(self, coord, angle, about_point):
		c1 = about_point.x
		c2 = about_point.y
		c3 = about_point.z

		x_old = coord.x
		y_old = coord.y
		z_old = coord.z

		x = np.matrix([
			[c1, c2]
			])

		y = np.matrix([
			[math.cos(angle), -1.0 * math.sin(angle)],
			[math.sin(angle),        math.cos(angle)]
			])

		z = np.matrix([
			[x_old - c1, y_old - c2]
			])

		a = x + (y * z)

		return pi_point.Point(x=a[0,0], y=a[1,0])
コード例 #4
0
    def get_intersecting_point(self, other):
        x1 = self.start.x
        x2 = self.end.x
        y1 = self.start.y
        y2 = self.end.y

        x3 = other.start.x
        x4 = other.end.x
        y3 = other.start.y
        y4 = other.end.y

        a = np.matrix([[x1, y1], [x2, y2]])

        b = np.matrix([[x1, 1], [x2, 1]])

        c = np.matrix([[x3, y3], [x4, y4]])

        d = np.matrix([[x3, 1], [x4, 1]])

        e = np.matrix([[y1, 1], [y2, 1]])

        f = np.matrix([[y3, 1], [y4, 1]])

        A = np.matrix([[np.linalg.det(a), np.linalg.det(b)],
                       [np.linalg.det(c), np.linalg.det(d)]])

        B = np.matrix([[np.linalg.det(b), np.linalg.det(e)],
                       [np.linalg.det(d), np.linalg.det(f)]])

        C = np.matrix([[np.linalg.det(a), np.linalg.det(e)],
                       [np.linalg.det(c), np.linalg.det(f)]])

        x = y = 0
        with warnings.catch_warnings(record=True) as w:
            x = np.linalg.det(A) / np.linalg.det(B)
            y = np.linalg.det(C) / np.linalg.det(B)

        point = pi_point.Point(x, y)
        if not (self.is_on_line(point) and other.is_on_line(point)):
            new_point = pi_point.Point()
            new_point.set_inf()

            return new_point

        return point
コード例 #5
0
ファイル: pi_path.py プロジェクト: EurobotMDX/eurobot2020_ws
    def calculate(self):
        if not self.closed: self.data_points.append(self.data_points[0])

        for i in range(len(self.data_points)):
            point = self.data_points[i]
            self.xs.append(point.x)
            self.ys.append(point.y)

        self.boundary_lines = []
        perimeter = 0

        for i in range(len(self.data_points) - 1):
            start = self.data_points[i]
            end = self.data_points[i + 1]

            self.line = pi_line.Line(start, end)
            self.boundary_lines.append(self.line)

            perimeter += abs(self.line.get_length())

            self.rect_info.top_left = pi_point.Point(min(self.xs),
                                                     min(self.ys))
            self.rect_info.width = max(self.xs) - self.rect_info.top_left.x
            self.rect_info.height = max(self.ys) - self.rect_info.top_left.y
            self.rect_info.bottom_right = pi_point.Point(
                self.rect_info.top_left.x + self.rect_info.width,
                self.rect_info.top_left.y + self.rect_info.height)
            self.rect_info.area = self.rect_info.width * self.rect_info.height
            self.rect_info.radius = math.sqrt(
                pow(self.rect_info.width / 2.0, 2) +
                pow(self.rect_info.height / 2.0, 2)) / 2.0
            self.rect_info.center = pi_point.Point(
                self.rect_info.top_left.x + (self.rect_info.width / 2.0),
                self.rect_info.top_left.y + (self.rect_info.height / 2.0))
            self.rect_info.perimeter = perimeter

            mn = min(self.rect_info.width, self.rect_info.height)
            mx = max(self.rect_info.width, self.rect_info.height)

            self.ratio = 0 if ((mn <= 0) or (mx <= 0)) else mn / mx

        return True
コード例 #6
0
ファイル: pi_path.py プロジェクト: EurobotMDX/eurobot2020_ws
    def is_within(self, pi_object, resolution=0.0001):
        if isinstance(pi_object, pi_point.Point):
            # uses ray casting algorithm to solve point in polygon

            inf_point = pi_point.Point(
                pi_object.x + self.rect_info.top_left.x +
                self.rect_info.width + 10, pi_object.y)
            ray_line = pi_line.Line(pi_object, inf_point)

            intersecting_points = self.get_intersecting_points(ray_line)

            if len(intersecting_points) % 2 == 0:
                return False

            elif len(intersecting_points) == 1:
                if intersecting_points[i].equal(pi_object):
                    # it's just touching
                    return False

            return True
        elif isinstance(pi_object, pi_line.Line):
            points = self.get_intersecting_points(pi_object)

            if (len(points)) == 1:
                if (self.is_touching(pi_object.start)
                        and not (pi_object.start.equal(points[0]))):
                    return True

                elif (self.is_touching(pi_object.end)
                      and not (pi_object.end.equal(points[0]))):
                    return True
            elif len(points) == 2:
                other_points = [pi_object.start, pi_object.end]

                sorted(other_points, key=self.__filter_x_function)
                sorted(other_points, key=self.__filter_x_function)

                dx_0 = other_points[0].x - points[0].x
                dx_1 = other_points[1].x - points[1].x

                resolution = -1.0 * resolution
                return (dx_0 >= resolution) and (dx_1 >= resolution)

            if self.is_within(pi_object.start) or self.is_within(
                    pi_object.end):
                return True
            elif self.is_touching(pi_object.start) and self.is_touching(
                    pi_object.end):
                return True
            else:
                return False

        return False
コード例 #7
0
    def scale(self, **kwargs):
        mag = None
        mag_x = None
        mag_y = None
        mag_z = None
        about_point = None

        items_gen = None

        if sys.version.startswith("2"):
            items_gen = kwargs.iteritems()
        else:
            items_gen = kwargs.items()

        for key, value in items_gen:
            if key.lower() == "mag":
                mag = value
            elif key.lower() == "mag_x":
                mag_x = value
            elif key.lower() == "mag_y":
                mag_y = value
            elif key.lower() == "mag_z":
                mag_z = value
            elif key.lower() == "about_point":
                about_point = about_point

        if (mag_x is not None) and (mag_y
                                    is not None) and (about_point is None):
            scale_vector = None
            if mag_z is not None:
                scale_vector = pi_point.Point(x=mag_x, y=mag_y, z=mag_z)
            else:
                scale_vector = pi_point.Point(x=mag_x, y=mag_y)

            self.start *= scale_vector
            self.end *= scale_vector
        elif (mag_x is not None) and (mag_y is not None) and (about_point
                                                              is not None):
            dx = mag_x * self.length * 0.5 * math.cos(angle)
            dy = mag_y * self.length * 0.5 * math.sin(angle)

            self.start = pi_point.Point(about_point.x - dx, about_point.y - dy)
            self.end = pi_point.Point(about_point.x + dx, about_point.y + dy)
        elif mag is not None:
            if about_point is None:
                about_point = get_midpoint()

            new_length = mag * length * 0.5
            dx = new_length * math.cos(angle)
            dy = new_length * math.sin(angle)

            self.start = pi_point.Point(about_point.x - dx, about_point.y - dy)
            self.end = pi_point.Point(about_point.x + dx, about_point.y + dy)
        else:
            raise ValueError(
                "Incorrect variables received use 'mag' or ('mag_x', 'mag_y', ['mag_z']) and or 'about_point'"
            )

        return self.compute()
コード例 #8
0
ファイル: pi_path.py プロジェクト: EurobotMDX/eurobot2020_ws
    def get_shading_lines(self, spacing=10.0, angle=0.0, padding=0.0):
        lines = []

        min_x = self.rect_info.top_left.x - padding
        min_y = self.rect_info.top_left.y - padding

        max_x = self.rect_info.bottom_right.x + padding
        max_y = self.rect_info.bottom_right.y + padding

        cx = (max_x - min_x) / 2.0 + min_x
        cy = (max_y - min_y) / 2.0 + min_y

        about_point = pi_point.Point(cx, cy)

        for y in range(min_y, max_y, spacing):
            start = pi_point.Point(min_x, y)
            end = pi_point.Point(max_x, y)
            line = pi_line.Line(start, end)

            line.rotate(angle, about_point)
            lines.append(line)

        return lines
コード例 #9
0
ファイル: pi_path.py プロジェクト: EurobotMDX/eurobot2020_ws
    def calculate_centroid(self):
        # for a non-self-intersecting path

        cx = cy = 0
        scale = 1.0 / (6.0 * self.calculate_signed_area())

        for i in range(len(self.data_points)):
            next_index = (i + 1) % len(self.data_points)
            cx += (self.xs[i] + self.xs[next_index]) * (
                (self.xs[i] * self.ys[next_index]) -
                (self.xs[next_index] * self.ys[i]))
            cy += (self.ys[i] + self.ys[next_index]) * (
                (self.xs[i] * self.ys[next_index]) -
                (self.xs[next_index] * self.ys[i]))

        return pi_point.Point(cx * scale, cy * scale)
コード例 #10
0
ファイル: pi_path.py プロジェクト: EurobotMDX/eurobot2020_ws
    def is_touching(self, pi_object):
        if isinstance(pi_object, pi_point.Point):
            # uses raying casting algorithm to solve point in polygon
            inf_point = pi_point.Point(
                pi_object.x + self.rect_info.top_left.x +
                self.rect_info.width + 10, pi_object.y)
            ray_line = pi_line.Line(pi_object, inf_point)

            intersecting_points = self.get_intersecting_points(ray_line)
            return not (len(intersecting_points) % 2 == 0)
        elif isinstance(pi_object, pi_line.Line):
            points = self.get_intersecting_points(pi_object)
            return self.is_touching(pi_object.start) or self.is_touching(
                pi_object.end) or (len(points) > 0)
        else:
            return False

        return False
コード例 #11
0
    def process_contours(self, contours, in_img, out_img):
        # This method FILTERS and LABELS the given contours ----- THIS is the method that handles the card prediction
        
        def _get_coutour_image(img, path, contour):
            #  This function returns the subset (with padding) of the given image that contains the contour
            #    Two subsets are returned, (1. subset in original image), (2. subset in original image as a mask)

            width = path.rect_info.width
            height = path.rect_info.height

            im_h, im_w = img.shape[:2]
            mask = np.zeros((im_h, im_w))
            cv2.drawContours(mask, [contour], -1, 255, -1)

            padding = 10
            top_x = max(path.rect_info.top_left.x - padding, 0)
            bottom_x = min(path.rect_info.bottom_right.x + padding, im_w)

            top_y = max(path.rect_info.top_left.y - padding, 0)
            bottom_y = min(path.rect_info.bottom_right.y + padding, im_h)

            n_mask = mask[top_y:bottom_y, top_x:bottom_x]
            n_img  = img[top_y:bottom_y, top_x:bottom_x]
            
            return n_img, n_mask
        
        def _constrain(x, mnx, mxx):
            return min(mxx, max(x, mnx))
        
        def determine_prediction(labels, probabilities):
            #  Given a random assortment of labels and probabilties, this function returns a list contain each label and its mean probability
            #  Don't try to comprehend how this works --> you can't :) (it's magic)

            f = lambda a, b : [list(filter(lambda x: x[0] == i, sorted(list(zip(a, b)), key=lambda x: x[0]))) for i in list(set(a))]

            def _get_prediction(foo, n):
                label, preds = list(zip(*foo))
                label = label[0]
                preds = list(sorted(preds)[-n:])
                return label, sum(preds) / float(len(preds))

            data = f(labels, probabilities)
            n_data = []

            for d in data:
                r = _get_prediction(d, 3)
                n_data.append((r[0], r[1]))
            
            # rearrange the list to ensure that label with the highest probability is first
            return list(sorted(n_data, key=lambda x : x[1], reverse=True))
        
        contours_list = [contour.reshape((contour.shape[0], 2)).tolist() for contour in contours]
        paths_list    = [pi_path.Path(raw_point_data=[pi_point.Point(x=point[0], y=point[1]) for point in contour_points], is_closed=True) for contour_points in contours_list]

        filtered_paths = []
        filtered_cnts = []
        labels = []
        probs = []


        # do not consider contours that have areas less than {@ min_allowed_area}
        min_allowed_area = 100
        paths_attributes  = [(path, path.rect_info.area, path.rect_info.perimeter) for path in paths_list if path.rect_info.area > min_allowed_area]
        paths_list, area_list, perimeter_list = zip(*paths_attributes)

        for path in paths_list:
            rect_info = path.rect_info

            # do not add contours with invalid an axes ratio, or area
            if path.ratio < 0.6: continue
            if rect_info.area < 800: continue
            if rect_info.area > 7000: continue
            
            # convert path to contour
            cnt = path.get_as_contour()

            # extract a padded section of image with the contour
            n_img, n_mask = _get_coutour_image(in_img, path, cnt)


            # THIS IS WHERE THE PREDICTION IS DONE
            if self.current_classifier is not None:
                label, prob = self.current_classifier.predict(n_img)
                
                if prob < 0.9: continue # Do not accept contours with probabilities < than 0.9

                labels.append(label)
                probs.append(prob)

            filtered_cnts.append(cnt)
            filtered_paths.append(path)
        
        preds = determine_prediction(labels, probs)
        number_labels = ["Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten"]

        if len(preds) > 0:
            label, prob = preds[0] # select the prediction with the highest probability
            
            self.ed_card_suit.setText(label.capitalize())

            # set card number to a selection from {@ number_labels}, based on the number of filitered paths
            self.ed_card_number.setText(number_labels[_constrain(len(filtered_paths)-1, 0, len(number_labels)-1)])
        else:
            self.ed_card_suit.setText("None")
            self.ed_card_number.setText("None")
        
        cv2.drawContours(out_img, filtered_cnts, -1, (0,0,255), 1)
        return out_img, filtered_paths, filtered_cnts
コード例 #12
0
 def get_point(self, position):
     x = self.start.x + ((self.end.x - self.start.x) * position)
     y = self.start.y + ((self.end.y - self.start.y) * position)
     z = self.start.z + ((self.end.z - self.start.z) * position)
     return pi_point.Point(x, y, z)
コード例 #13
0
def process_contours(contours, in_img, out_img, prediction_model=None):
    def _get_coutour_image(img, path, contour):
        width = path.rect_info.width
        height = path.rect_info.height

        im_h, im_w = img.shape[:2]
        mask = np.zeros((im_h, im_w))
        cv2.drawContours(mask, [contour], -1, 255, -1)

        padding = 10
        top_x = max(path.rect_info.top_left.x - padding, 0)
        bottom_x = min(path.rect_info.bottom_right.x + padding, im_w)

        top_y = max(path.rect_info.top_left.y - padding, 0)
        bottom_y = min(path.rect_info.bottom_right.y + padding, im_h)

        n_mask = mask[top_y:bottom_y, top_x:bottom_x]
        n_img = img[top_y:bottom_y, top_x:bottom_x]

        return n_img, n_mask

    in_img_w, in_img_h, _ = (0, 0, 0)

    if len(in_img.shape) == 3:
        in_img_w, in_img_h, _ = in_img.shape
    else:
        raise ValueError("The input image must be of type cv2::mat bgr")

    contours_list = [
        contour.reshape((contour.shape[0], 2)).tolist() for contour in contours
    ]
    paths_list = [
        pi_path.Path(raw_point_data=[
            pi_point.Point(x=point[0], y=point[1]) for point in contour_points
        ],
                     is_closed=True) for contour_points in contours_list
    ]

    filtered_paths = []
    filtered_cnts = []
    labels = []
    probs = []

    min_allowed_area = (in_img_w * in_img_h) * (500.0 / 66240.0)
    paths_attributes = [(path, path.rect_info.area, path.rect_info.perimeter)
                        for path in paths_list
                        if path.rect_info.area > min_allowed_area]
    paths_list, area_list, perimeter_list = zip(*paths_attributes)

    for path in paths_list:
        rect_info = path.rect_info

        if path.ratio < 0.6: continue
        if rect_info.area > (in_img_w * in_img_h) * (7000.0 / 66240.0):
            continue

        cnt = path.get_as_contour()

        n_img, n_mask = _get_coutour_image(in_img, path, cnt)
        if prediction_model is not None:
            label, prob = prediction_model.predict(n_img)
            if prob < 0.9: continue

            labels.append(label)
            probs.append(prob)

        filtered_cnts.append(cnt)
        filtered_paths.append(path)

    def _constrain(x, mnx, mxx):
        return min(mxx, max(x, mnx))

    def determine_prediction(labels, probabilities):
        f = lambda a, b: [
            list(
                filter(lambda x: x[0] == i,
                       sorted(list(zip(a, b)), key=lambda x: x[0])))
            for i in list(set(a))
        ]

        def _get_prediction(foo, n):
            label, preds = list(zip(*foo))
            label = label[0]
            preds = list(sorted(preds)[-n:])
            return label, sum(preds) / float(len(preds))

        data = f(labels, probabilities)
        n_data = []

        for d in data:
            r = _get_prediction(d, 3)
            n_data.append((r[0], r[1]))

        return list(sorted(n_data, key=lambda x: x[1], reverse=True))

    preds = determine_prediction(labels, probs)
    number_labels = [
        "Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
        "Ten"
    ]

    label = "None"
    number = "None"
    probability = 1.0

    if len(preds) > 0:
        label, probability = preds[0]
        label = label.capitalize()
        number = number_labels[_constrain(
            len(filtered_paths) - 1, 0,
            len(number_labels) - 1)]

    cv2.drawContours(out_img, filtered_cnts, -1, (0, 255, 0), 1)
    return out_img, [label, number, probability]
コード例 #14
0
def process_contours(contours, in_img, out_img):
    # contours = [approx_contour(contour) for contour in contours]
    contours_list = [
        contour.reshape((contour.shape[0], 2)).tolist() for contour in contours
    ]
    paths_list = [
        pi_path.Path(raw_point_data=[
            pi_point.Point(x=point[0], y=point[1]) for point in contour_points
        ],
                     is_closed=True) for contour_points in contours_list
    ]

    filtered_paths = []
    c_h = []
    c_s = []
    c_v = []

    min_allowed_area = 100
    paths_attributes = [(path, path.rect_info.area, path.rect_info.perimeter)
                        for path in paths_list
                        if path.rect_info.area > min_allowed_area]
    paths_list, area_list, perimeter_list = zip(*paths_attributes)

    area_list = list(area_list)
    perimeter_list = list(perimeter_list)

    if len(paths_list) > 2:
        area_std = np.std(area_list)
        perimeter_std = np.std(perimeter_list)

        print "area_std: ", area_std, " vs perimeter_std: ", perimeter_std, " # ", len(
            paths_list)

        area_list.remove(max(area_list))
        area_list.remove(min(area_list))

    area_list = np.array(area_list)
    perimeter_list = np.array(perimeter_list)

    area_mean = np.mean(area_list)
    perimeter_mean = np.mean(perimeter_list)

    th = 0.95
    area_max = area_mean + (th * area_mean)
    area_min = area_mean - (th * area_mean)

    perimeter_max = perimeter_mean + (th * perimeter_mean)
    perimeter_min = perimeter_mean - (th * perimeter_mean)

    # print "-"*10

    for path in paths_list:
        rect_info = path.rect_info

        # if path.ratio < 0.6: continue
        if (rect_info.area < area_min or rect_info.area > area_max):
            continue

        if (rect_info.perimeter < perimeter_min
                or rect_info.perimeter > perimeter_max) < 0:
            continue

        cnt = path.get_as_contour()
        h, s, v = get_contour_color_from_image(cnt, in_img)

        filtered_paths.append(path)
        c_h.append(h)
        c_s.append(s)
        c_v.append(v)

        print("[INFO] ({}, {}, {})".format(len(filtered_paths), h, s, v))

        cv2.drawContours(out_img, [cnt], -1, (0, 0, 255), 1)
        # print path.count_edges(math.radians(100))
        # print ratio, rect_info.area, rect_info.perimeter, cv2.moments(cnt), cv2.isContourConvex(cnt)

        save_image(in_img, path, cnt)

    # text = ""

    # if len(c_h) > 0:
    #     text = "#{} ".format(len(filtered_paths))
    #     if np.mean(c_s) < 100:
    #         text += "SPADES or THREE-SISTERS"
    #         # print (text, "c ({}, {}, {})".format(len(filtered_paths), np.mean(c_h), np.mean(c_s), np.mean(c_v)))
    #     else:
    #         text += "HEART or DIAMOND"
    #         # print (text, "c ({}, {}, {})".format(len(filtered_paths), np.mean(c_h), np.mean(c_s), np.mean(c_v)))

    # cv2.putText(out_img,text,(0,20), font, 0.4,(255,255,255),1,cv2.LINE_AA)

    return filtered_paths
コード例 #15
0
ファイル: pi_path.py プロジェクト: EurobotMDX/eurobot2020_ws
    def __init__(self, **kwargs):
        self.__filter_x_function = lambda cur_point: cur_point.x
        self.__filter_y_function = lambda cur_point: cur_point.y
        self.__filter_z_function = lambda cur_point: cur_point.z

        self.ratio = 0

        self.data_points = []
        self.xs = []
        self.ys = []
        self.boundary_lines = []

        self.closed = True
        self.rect_info = pi_arithmetic.RectInfo()

        items_gen = None

        if sys.version.startswith("2"):
            items_gen = kwargs.iteritems()
        else:
            items_gen = kwargs.items()

        _raw_point_data = None
        _is_closed = None

        _rect = None

        _center = None
        _radius = None
        _angle = 2 * math.pi
        _steps = 32

        _rx = None
        _ry = None

        def __throw_error():
            raise ValueError(
                "Incorrect variables given used ['raw_point_data' or 'rect' or ['center' and 'radius'] or ['center' and 'rx' and 'ry']] and 'is_closed' and or [angle, count]"
            )

        for key, value in items_gen:
            if key.lower() == "raw_point_data":
                _raw_point_data = value
            elif key.lower() == "is_closed":
                _is_closed = value
            elif key.lower() == "rect":
                _rect = value
            elif key.lower() == "center":
                _center = value
            elif key.lower() == "radius":
                _radius = value
            elif key.lower() == "angle":
                _angle = value
            elif key.lower() == "steps":
                _steps = value
            elif key.lower() == "rx":
                _rx = value
            elif key.lower() == "ry":
                _ry = value

        if (_raw_point_data is not None) and (_is_closed is not None):
            if (len(_raw_point_data) > 0) and (not isinstance(
                    _raw_point_data[0], pi_point.Point)):
                points = []
                for i in range(len(_raw_point_data)):
                    cv_point = _raw_point_data[i]
                    points.append(pi_point.Point(x=cv_point.x, y=cv_point.y))

                self.data_points = points
            else:
                self.data_points = _raw_point_data

            self.closed = _is_closed

        elif (_raw_point_data is not None) and (_is_closed is None):
            raise ValueError(
                "Incorrect variables given - must include is_closed")

        elif _rect is not None:
            point_1 = pi_point.Point(_rect.x, _rect.y)
            point_2 = pi_point.Point(_rect.x + _rect.width, _rect.y)
            point_3 = pi_point.Point(_rect.x + _rect.width,
                                     _rect.y + _rect.height)
            point_4 = pi_point.Point(_rect.x, _rect.y + _rect.height)

            points = [point_1, point_2, point_3, point_4]

            self.data_points = points
            self.closed = False
        elif _center is not None:
            points = []

            if _radius is not None:
                rx = ry = _radius
            elif (rx is not None) and (ry is not None):
                pass
            else:
                __throw_error()

            theta = 0.0
            increment = 2.0 * math.pi / _steps
            _rx = abs(_rx)
            _ry = abs(_ry)

            while theta < _angle:
                x = _center.x + _rx * math.cos(theta)
                y = _center.y + _ry * math.sin(theta)

                points.append(pi_point.Point(x, y))
                theta += increment

            self.data_points = points
            self.closed = False

        self.calculate()
コード例 #16
0
ファイル: pi_line.py プロジェクト: EurobotMDX/eurobot2020_ws
 def get_midpoint(self):
     x = self.start.x + ((self.end.x - self.start.x) * 0.5)
     y = self.start.y + ((self.end.y - self.start.y) * 0.5)
     return pi_point.Point(x, y)
コード例 #17
0
    def predict_card_from_image(self, card_img):
        result = {
            "suit": None,
            "number": None,
            "probability": 1.0,
            "status": False,
            "error_code": -404,
            "error_msg": "No card found"
        }

        if not self.is_image_valid(card_img):
            return result

        frame = card_img.copy()

        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0)

        gray_mean = int(np.mean(gray_frame.ravel()))
        _, gray_th = cv2.threshold(gray_frame, gray_mean, 255,
                                   cv2.THRESH_BINARY)

        kernel = np.ones((3, 3), np.uint8)
        gray_th = cv2.erode(gray_th, kernel, iterations=1)

        _, contours, _ = cv2.findContours(gray_th, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

        def __get_image_in_contour(img, path, contour):
            '''
                Returns the sub-section (with padding) of the given image contained within the
                given contour, alongside its mask.
            '''

            im_h, im_w = img.shape[:2]
            mask = np.zeros((im_h, im_w))
            cv2.drawContours(mask, [contour], -1, 255, -1)

            padding = 10
            top_x = max(path.rect_info.top_left.x - padding, 0)
            bottom_x = min(path.rect_info.bottom_right.x + padding, im_w)

            top_y = max(path.rect_info.top_left.y - padding, 0)
            bottom_y = min(path.rect_info.bottom_right.y + padding, im_h)

            n_mask = mask[top_y:bottom_y, top_x:bottom_x]
            n_img = img[top_y:bottom_y, top_x:bottom_x]

            return n_img, n_mask

        def __constrain(x, mn_x, mx_x):
            return min(mx_x, max(x, mn_x))

        def __determine_prediction(labels, probabilities):
            #  Given a random assortment of labels and probabilties, this function returns a list containing each label and its mean probability
            #  Don't try to comprehend how this works --> (it's magic)
            #
            # e.g takes [a, b, c, a, c, b, a, b ...], [0.1, 0.5, 0.2, 0.3, 0.4, 0.1, 0.9, 0.1 ...]
            # and returns [(a, prob), (b, prob), (c, prob)]    # something like this --> :)

            f = lambda a, b: [
                list(
                    filter(lambda x: x[0] == i,
                           sorted(list(zip(a, b)), key=lambda x: x[0])))
                for i in list(set(a))
            ]

            def __get_prediction(foo, n):
                label, preds = list(zip(*foo))
                label = label[0]
                preds = list(sorted(preds)[-n:])
                return label, sum(preds) / float(len(preds))

            # pre-format data
            data = f(labels, probabilities)
            n_data = []

            for datum in data:
                r = __get_prediction(datum, 3)
                n_data.append((r[0], r[1]))

            # rearrange the list to ensure that label with the highest probability is first
            return list(sorted(n_data, key=lambda x: x[1], reverse=True))

        in_img_w, in_img_h, _ = frame.shape

        contours_list = [
            contour.reshape((contour.shape[0], 2)).tolist()
            for contour in contours
        ]
        paths_list = [
            pi_path.Path(raw_point_data=[
                pi_point.Point(x=point[0], y=point[1])
                for point in contour_points
            ],
                         is_closed=True) for contour_points in contours_list
        ]

        filtered_paths = []
        filtered_cnts = []
        labels = []
        probs = []

        # do not consider contours that have areas less than {@ min_allowed_area}
        min_allowed_area = (in_img_w * in_img_h) * (500.0 / 66240.0)
        paths_attributes = [(path, path.rect_info.area,
                             path.rect_info.perimeter) for path in paths_list
                            if path.rect_info.area > min_allowed_area]
        paths_list, area_list, perimeter_list = zip(*paths_attributes)

        # getting a prediction for every shape found in the image
        for path in paths_list:
            rect_info = path.rect_info

            # do not add contours with invalid an axes ratio, or area
            if path.ratio < 0.6: continue
            if rect_info.area > (in_img_w * in_img_h) * (7000.0 / 66240.0):
                continue

            # convert path to contour
            cnt = path.get_as_contour()

            # extract a padded section of image with the contour
            n_img, n_mask = __get_image_in_contour(frame, path, cnt)

            # THIS IS WHERE THE PREDICTION IS DONE
            if self.current_classifier is not None:
                label, prob = self.current_classifier.predict(n_img)

                if prob < 0.9:
                    continue  # Do not accept contours with probabilities < than 0.9

                labels.append(label)
                probs.append(prob)

            filtered_cnts.append(cnt)
            filtered_paths.append(path)

        preds = __determine_prediction(labels, probs)
        number_labels = [
            "Ace", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight",
            "Nine", "Ten"
        ]

        if len(preds) > 0:
            label, prob = preds[
                0]  # select the prediction with the highest probability

            result["suit"] = label
            result["number"] = number_labels[__constrain(
                len(filtered_paths) - 1, 0,
                len(number_labels) - 1)]
            result["probability"] = prob
            result["status"] = True
            result["error_code"] = 1
            result["error_msg"] = ""

            out_img = frame.copy()
            cv2.drawContours(out_img, filtered_cnts, -1, (0, 0, 255), 1)
            result["img_out"] = out_img

        return result