Пример #1
0
    def show_gray_histogram(self):
        numbins = 256
        ranges = [0.0, 255.0]

        width = 256
        height = 256

        bytes_per_line = 3 * width

        hist_image = np.zeros([height, width, 3], np.uint8)

        # hist_image = np.zeros((256,256,3)) #创建用于绘制直方图的全0图像

        bins = np.arange(numbins).reshape(numbins, 1)  # 直方图中各bin的顶点位置

        color = [(255, 0, 0)]  # BGR三种颜色

        for ch, col in enumerate(color):
            origin_hist = cv2.calcHist([self.gray_image], [ch], None, [numbins], ranges)
            cv2.normalize(origin_hist, origin_hist, 0, 255 * 0.9, cv2.NORM_MINMAX)
            hist = np.int32(np.around(origin_hist))
            pts = np.column_stack((bins, hist))
            cv2.polylines(hist_image, [pts], False, col)

        # print(type(hist_image.data))
        hist_image = np.flipud(hist_image)
        # cv2.imshow("histogram", hist_image)
        demo_utils.show_cvimage_to_label(hist_image, self.gray_histogram_label)
Пример #2
0
def draw_ackerman_model(image, center, object, mtp_ratio, pursuit_point=None):

    corners = object.get_axel_corners()
    corners = center + np.array(corners) * mtp_ratio
    corners = corners.reshape((-1, 1, 2))

    # Drawing axel frame
    cv2.polylines(image, [corners.astype(np.int32)], True, (255, 50, 255), 1)

    front_left_tyre = center + np.array(object.front_left_tyre) * mtp_ratio
    front_right_tyre = center + np.array(object.front_right_tyre) * mtp_ratio
    rear_left_tyre = center + np.array(object.rear_left_tyre) * mtp_ratio
    rear_right_tyre = center + np.array(object.rear_right_tyre) * mtp_ratio

    cv2.line(image, tuple(front_left_tyre[0].astype(np.int32)),
             tuple(front_left_tyre[1].astype(np.int32)), (0, 0, 255), 3,
             cv2.LINE_AA)
    cv2.line(image, tuple(front_right_tyre[0].astype(np.int32)),
             tuple(front_right_tyre[1].astype(np.int32)), (0, 0, 255), 3,
             cv2.LINE_AA)
    cv2.line(image, tuple(rear_left_tyre[0].astype(np.int32)),
             tuple(rear_left_tyre[1].astype(np.int32)), (255, 0, 0), 3,
             cv2.LINE_AA)
    cv2.line(image, tuple(rear_right_tyre[0].astype(np.int32)),
             tuple(rear_right_tyre[1].astype(np.int32)), (255, 0, 0), 3,
             cv2.LINE_AA)

    if not pursuit_point is None:
        cv2.circle(
            image,
            tuple(center + (pursuit_point * mtp_ratio).astype(np.int32)), 3,
            (0, 100, 255), -1)
Пример #3
0
    def road_mask(self) -> Mask:
        canvas = self.make_empty_mask()
        # FIXME Refactor that crap
        for road_waypoints in self._each_road_waypoints:
            road_left_side = [
                lateral_shift(w.transform, -w.lane_width * 0.5)
                for w in road_waypoints
            ]
            road_right_side = [
                lateral_shift(w.transform, w.lane_width * 0.5)
                for w in road_waypoints
            ]

            polygon = road_left_side + [x for x in reversed(road_right_side)]
            polygon = [self.location_to_pixel(x) for x in polygon]
            if len(polygon) > 2:
                polygon = np.array([polygon], dtype=np.int32)

                # FIXME Hard to notice the difference without polylines
                cv.polylines(img=canvas,
                             pts=polygon,
                             isClosed=True,
                             color=COLOR_ON,
                             thickness=5)
                cv.fillPoly(img=canvas, pts=polygon, color=COLOR_ON)
        return canvas
def create_emoticon():
    emoticon = np.zeros([512, 512, 3], np.uint8)
    emoticon = cv2.circle(emoticon, (256, 256), 200, (0, 215, 255), -1)
    emoticon = cv2.ellipse(emoticon, (166, 200), (40, 30), 90, 0, 360,
                           (0, 0, 0), -1)
    emoticon = cv2.ellipse(emoticon, (166, 200), (40, 30), 90, 0, 360,
                           (255, 255, 255), 1)
    emoticon = cv2.ellipse(emoticon, (346, 200), (40, 30), 90, 0, 360,
                           (0, 0, 0), -1)
    emoticon = cv2.ellipse(emoticon, (346, 200), (40, 30), 90, 0, 360,
                           (255, 255, 255), 1)
    emoticon = cv2.circle(emoticon, (180, 175), 7, (255, 255, 255), -1)
    emoticon = cv2.circle(emoticon, (360, 175), 7, (255, 255, 255), -1)
    pts = [(130, 260), (150, 390), (362, 390), (382, 260), (256, 248)]
    cv2.fillPoly(emoticon, np.array([pts]), (208, 224, 64))
    cv2.polylines(emoticon, np.array([pts]), True, (255, 255, 255), 2)
    cv2.line(emoticon, (150, 290), (362, 290), (209, 206, 0), 2)
    cv2.line(emoticon, (155, 330), (357, 330), (209, 206, 0), 2)
    cv2.line(emoticon, (160, 360), (352, 360), (209, 206, 0), 2)
    cv2.line(emoticon, (130, 260), (65, 200), (255, 255, 255), 2)
    cv2.line(emoticon, (382, 260), (447, 200), (255, 255, 255), 2)
    cv2.line(emoticon, (150, 390), (125, 405), (255, 255, 255), 2)
    cv2.line(emoticon, (362, 390), (387, 405), (255, 255, 255), 2)
    cv2.imshow('emoticon', emoticon)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #5
0
    def test_bbox_generator(self, img):
        img_name = format_name(img)
        full_img_name = os.path.join(self.labels_folder, img_name)
        img = self.datahandle.images[img_name][0]
        height, width = img.shape[:2]

        with open(full_img_name + '.txt', 'r') as file:
            coords = file.readlines()
        thick = 2
        color = (0, 0, 255)
        green = (0, 255, 0)
        for coord in coords:
            cds = np.array(coord.strip('\n').split(' ')).astype(float)[1:]
            x_tl = int((cds[0] - cds[2] / 2) * width)
            y_tl = int((cds[1] - cds[3] / 2) * height)
            x_br = int((cds[0] + cds[2] / 2) * width)
            y_br = int((cds[1] + cds[3] / 2) * height)
            start = (x_tl, y_tl)
            end = (x_br, y_br)
            img = cv2.rectangle(img, start, end, color, thick)

        coords_img = self.datahandle.images[img_name][2]
        for coords in coords_img:

            img_gate_old = np.array([(coords[0], coords[1]), \
                            (coords[2], coords[3]), \
                            (coords[4], coords[5]), \
                            (coords[6], coords[7]), \
                            (coords[0], coords[1])])
            cv2.polylines(img, [img_gate_old], False, green, 2)

        cv2.imshow('image', img)
        cv2.waitKey(0)
Пример #6
0
    def draw(self, output_image):
        for point in self.centroids:
            cv2.circle(output_image, point, 2, self.path_color, -1)
        cv2.polylines(output_image, [np.int32(self.centroids)], False,
                      self.path_color, 1)

        if len(self.speed) != 0:
            cv2.putText(output_image, ("%1.2f" % self.last_speed),
                        self.last_position_centroid, cv2.FONT_HERSHEY_PLAIN,
                        0.7, (127, 255, 255), 1)
Пример #7
0
def draw_solid_line(canvas, color, closed, points, width):
    """Draws solid lines in a surface given a set of points, width and color"""
    if len(points) >= 2:
        cv.polylines(
            img=canvas,
            pts=np.int32([points]),
            isClosed=closed,
            color=color,
            thickness=width,
        )
Пример #8
0
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2,
                                                              -1).astype(int)
    fx, fy = flow[y, x].T
    lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
    cv.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (_x2, _y2) in lines:
        cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
Пример #9
0
 def centerlines_mask(self) -> Mask:
     canvas = self.make_empty_mask()
     for road_waypoints in self._each_road_waypoints:
         polygon = [
             self.location_to_pixel(wp.transform.location) for wp in road_waypoints
         ]
         if len(polygon) > 2:
             polygon = np.array([polygon], dtype=np.int32)
             cv.polylines(
                 img=canvas, pts=polygon, isClosed=False, color=COLOR_ON, thickness=1
             )
     return canvas
Пример #10
0
    def __init__(self, path):
        # Clearing shapes from last time
        self.shapes = []
        self.points = []

        # Setting up the cv2 window
        self.path = path
        self.image = cv2.imread(path)
        cv2.namedWindow("Focus")
        cv2.setMouseCallback("Focus", self.create_polygon)
        
        # Infinite loop to run polygon creation
        while(True):
            # Drawing the original/modified image every time
            cv2.imshow('Focus', self.image)

            # Key presses
            command = cv2.waitKey(10) & 0xFF
            
            # Local parameters
            pts = np.asarray(self.points, np.int32).reshape(-1, 1, 2)
            color = (0, 0, 255)
            thickness = 2

            # Submit shape
            if command == ord('a'):
                # Save the points as a new shape
                self.shapes.append(Shape(len(self.shapes), self.points))

                # Debugging lines
                # print(self.shapes)
                
                # Draw the polygon
                cv2.polylines(self.image, [pts], True, color, thickness, lineType = cv2.LINE_AA)

                # Clear the points for a new polygon
                self.points = []

            # Delete previous shape if a mistake was made
            elif command == ord('d'):
                self.shapes = self.shapes[:-1]
                self.image = cv2.imread(path)
                for shape in self.shapes:
                    cv2.polylines(self.image, [np.asarray(shape.coordinates, np.int32).reshape(-1, 1, 2)], True, color, thickness, lineType = cv2.LINE_AA)
           
            # Exit condition
            elif command == ord('q'):
                break
            
        # Close all windows after exit
        cv2.destroyAllWindows() 
Пример #11
0
def upload():
    # file=request.files['temp']
    f = request.files['temp']
    tempname = request.form['tempname']
    temp_path = '../templates/'
    name = f.filename.replace(' ', '_')
    # print(tempname)
    f.save(secure_filename(f.filename))

    inputImage = cv2.imread(name)
    inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(inputImageGray, 150, 200, apertureSize=3)

    # print(edges)
    edges = abs(cv2.subtract(255, edges))

    minLineLength = 30
    maxLineGap = 5
    lines = cv2.HoughLinesP(edges, cv2.HOUGH_PROBABILISTIC, np.pi / 180, 30,
                            minLineLength, maxLineGap)
    for x in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[x]:
            pts = np.array([[x1, y1], [x2, y2]], np.int32)
            cv2.polylines(inputImage, [pts], True, (0, 255, 0))

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(inputImage, "Tracks Detected", (500, 250), font, 0.5, 255)

    os.remove(name)

    filename = tempname + '.png'

    #Following converts white pixels to transparent
    imagePIL = Image.fromarray(edges)
    imagePIL = imagePIL.convert("RGBA")
    datas = imagePIL.getdata()

    newData = []
    for item in datas:
        if item[0] == 255 and item[1] == 255 and item[2] == 255:
            newData.append((255, 255, 255, 0))
        else:
            if item[0] > 150:
                newData.append((0, 0, 0, 255))
            else:
                newData.append(item)

    imagePIL.putdata(newData)
    imagePIL.save(temp_path + filename, "PNG")

    return send_file(temp_path + filename, mimetype='image/png')
Пример #12
0
def hist_curve(im):
    h = np.zeros((300, 256, 3))
    if len(im.shape) == 2:
        color = [(255, 255, 255)]
    elif im.shape[2] == 3:
        color = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
    for ch, col in enumerate(color):
        hist_item = cv2.calcHist([im], [ch], None, [256], [0, 256])
        cv2.normalize(hist_item, hist_item, 0, 255, cv2.NORM_MINMAX)
        hist = np.int32(np.around(hist_item))
        pts = np.int32(np.column_stack((bins, hist)))
        cv2.polylines(h, [pts], False, col)
    y = np.flipud(h)
    return y
Пример #13
0
def draw_match(result_title, img1, img2, kp_pairs, status=None, H=None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]

    # Create visualized result image
    vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1 + w2] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32(
            cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(
                -1, 2) + (w1, 0))
        cv2.polylines(vis, [corners], True, (255, 255, 255))

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)
    p1, p2 = [], []  # python 2 / python 3 change of zip unpacking

    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))

    green = (0, 255, 0)
    red = (0, 0, 255)

    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            color = green
            cv2.circle(vis, (x1, y1), 2, color, -1)
            cv2.circle(vis, (x2, y2), 2, color, -1)
        else:
            color = red
            r = 2
            thickness = 3
            cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), color, thickness)
            cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), color, thickness)
            cv2.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), color, thickness)
            cv2.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), color, thickness)

    for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
        if inlier:
            cv2.line(vis, (x1, y1), (x2, y2), green)

    cv2.imshow(result_title, vis)

    return vis
Пример #14
0
def draw_object(img1, kp1, img2, kps2, matches):
    """
    Draws the object found on the second image

    :param img1: First image
    :param kp1: List of keypoints from the first image
    :param img2: Second image
    :param kps2: List of keypoints from the second image
    :param matches: List of matches between the two images
    :return: modified image
    """
    src_pts = np.float32([kp1[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kps2[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    M = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)[0]

    h, w = img1.shape[:2]
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                      [w - 1, 0]]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)
    img_res = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

    return img_res
Пример #15
0
def feature_match(img1, img2):
    originial = img2
    img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
    h, w = img1.shape
    h2, w2 = img2.shape

    if h > h2 and w > w2:
        img1 = resize_with_aspect_ratio(img1, height=h2 // 2)

    pts1, desc1 = ORB_descriptor(img1, 1000)
    pts2, desc2 = ORB_descriptor(img2, 10000)

    dmatches = get_matches(desc1, desc2)

    h, w = img1.shape
    dst = find_image_in_frame(dmatches, pts1, pts2, h, w)

    img2 = cv2.polylines(originial, [np.int32(dst)], True, (0, 0, 255), 10,
                         cv2.LINE_AA)
    res = cv2.drawMatches(img1,
                          pts1,
                          img2,
                          pts2,
                          dmatches[:5],
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)

    return res
Пример #16
0
def draw_broken_line(canvas, color, closed, points, width):
    """Draws broken lines in a surface given a set of points, width and color"""
    # Select which lines are going to be rendered from the set of lines
    broken_lines = [
        x for n, x in enumerate(zip(*(iter(points), ) * 20)) if n % 3 == 0
    ]

    # Draw selected lines
    for line in broken_lines:
        cv.polylines(
            img=canvas,
            pts=np.int32([line]),
            isClosed=closed,
            color=color,
            thickness=width,
        )
Пример #17
0
    def get_comp_outline(self, show=False):
        """
        Function to retrieve the outer contour of Connected Component.

        parameters
        --------------------------------------
        show : bool, optional, default : True
            Wether to show the annotated image with outline
        
        returns
        --------------------------------------
        tuple - (outlines, annotated_img)

        Returns outline and Annotated Image
        """

        if not hasattr(self, 'swt_mat'):
            raise Exception(
                "Call 'swttransform' on the image before calling this function"
            )

        outlines = []
        temp = self.swtlabelled_pruned13C.copy()
        for label, labelprops in self.components_props.items():
            loutline = labelprops['bbm_outline']
            outlines.append(loutline)

            temp = cv2.polylines(temp, loutline, True, (0, 0, 255), 1, 4)

        if show:
            imgshow(temp, 'Component Outlines')

        return outlines, temp
Пример #18
0
def plot_pose_box(image, P, kpt, color=(0, 255, 0), line_width=2):
    ''' Draw a 3D box as annotation of pose. Ref:https://github.com/yinguobing/head-pose-estimation/blob/master/pose_estimator.py
    Args: 
        image: the input image
        P: (3, 4). Affine Camera Matrix.
        kpt: (68, 3).
    '''
    image = image.copy()

    point_3d = []
    rear_size = 90
    rear_depth = 0
    point_3d.append((-rear_size, -rear_size, rear_depth))
    point_3d.append((-rear_size, rear_size, rear_depth))
    point_3d.append((rear_size, rear_size, rear_depth))
    point_3d.append((rear_size, -rear_size, rear_depth))
    point_3d.append((-rear_size, -rear_size, rear_depth))

    front_size = 105
    front_depth = 110
    point_3d.append((-front_size, -front_size, front_depth))
    point_3d.append((-front_size, front_size, front_depth))
    point_3d.append((front_size, front_size, front_depth))
    point_3d.append((front_size, -front_size, front_depth))
    point_3d.append((-front_size, -front_size, front_depth))
    point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)

    # Map to 2d image points
    point_3d_homo = np.hstack((point_3d, np.ones([point_3d.shape[0],
                                                  1])))  #n x 4
    point_2d = point_3d_homo.dot(P.T)[:, :2]
    point_2d[:, :2] = point_2d[:, :2] - np.mean(point_2d[:4, :2], 0) + np.mean(
        kpt[:27, :2], 0)
    point_2d = np.int32(point_2d.reshape(-1, 2))

    # Draw all the lines
    cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
    cv2.line(image, tuple(point_2d[1]), tuple(point_2d[6]), color, line_width,
             cv2.LINE_AA)
    cv2.line(image, tuple(point_2d[2]), tuple(point_2d[7]), color, line_width,
             cv2.LINE_AA)
    cv2.line(image, tuple(point_2d[3]), tuple(point_2d[8]), color, line_width,
             cv2.LINE_AA)

    return image
Пример #19
0
    def run(self):
        escape = False  # Flag to quit program
        # Let's create our working window and set a mouse callback to handle events
        cv2.namedWindow(self.window_name, flags=cv2.WINDOW_AUTOSIZE)
        #cv2.imshow(self.window_name, np.zeros(CANVAS_SIZE, np.uint8))

        frame = cv2.imread(basic_img)
        frame = frame[y:y + h, x:x + w]  # crop image
        cv2.imshow(self.window_name, frame)
        cv2.waitKey(1)
        cv2.setMouseCallback(self.window_name, self.on_mouse)

        while not escape:

            # This is our drawing loop, we just continuously draw new images
            # and show them in the named window
            frame = cv2.imread(basic_img)
            frame = frame[y:y + h, x:x + w]  # crop image
            canvas = frame
            if len(self.points) > 0:
                # Draw all the current polygon segments
                cv2.polylines(canvas, np.array([self.points]), False,
                              FINAL_LINE_COLOR, 3)
                # And also show what the current segment would look like
                cv2.line(canvas, self.points[-1], self.current,
                         WORKING_LINE_COLOR, 3)

            for i in range(len(polygon_points)):
                if len(polygon_points[i]) > 0:
                    cv2.fillPoly(canvas, np.array([polygon_points[i]]),
                                 FINAL_LINE_COLOR)

            # Update the window
            cv2.imshow(self.window_name, canvas)
            # And wait 50ms before next iteration (this will pump window messages meanwhile)
            if cv2.waitKey(50) == 27:  # ESC hit
                # todo: maybe add txt export here
                self.done = True
                escape = True

        # Waiting for the user to press any key
        cv2.waitKey()

        cv2.destroyWindow(self.window_name)
        return canvas
Пример #20
0
    def addLocalization(self, image: np.ndarray, localize_type: str,
                        fill: bool, radius_multiplier: float = 1.0) -> np.ndarray:
        """
        Add a specific `localize_type` of localization to the input `image`. `fill` parameter tells whether to
        fill the component or not.

        Args:
            image (np.ndarray) : Image on which localization needs to be added
            localize_type (str) : Type of the localization that will be added. Can be only one of
             ['min_bbox', 'ext_bbox', 'outline', 'circular']. Where :
                - `min_bbox` : Minimum Bounding Box
                - `ext_bbox` : External Bounding Box
                - `outline` : Contour
                - `circular` : Circle - With Minimum Bounding Box Centre coordinate and
                 radius = Minimum Bounding Box Circum Radius * radius_multiplier
            fill (bool) : Whether to fill the added localization or not
            radius_multiplier (float) : Minimum Bounding Box Circum Radius inflation parameter. [default = 1.0].
        Returns:
            (np.ndarray) - annotated image
        """
        _color = (0, 0, 255)
        if fill:
            _color = (255, 255, 255)
        _thickness = (np.sqrt(self.image_height ** 2 + self.image_width ** 2)) * (4 / np.sqrt(768 ** 2 + 1024 ** 2))
        _thickness = int(_thickness)
        if _thickness == 0:
            _thickness = 1
        if localize_type == 'min_bbox' and not fill:
            image = cv2.polylines(img=image, pts=[self.min_bbox], isClosed=True, color=_color, thickness=_thickness)
        elif localize_type == 'ext_bbox' and not fill:
            image = cv2.polylines(img=image, pts=[self.ext_bbox], isClosed=True, color=_color, thickness=_thickness)
        elif localize_type == 'outline' and not fill:
            image = cv2.polylines(img=image, pts=self.outline, isClosed=True, color=_color, thickness=_thickness)
        elif localize_type == 'min_bbox' and fill:
            image = cv2.fillPoly(img=image, pts=[self.min_bbox], color=_color)
        elif localize_type == 'ext_bbox' and fill:
            image = cv2.fillPoly(img=image, pts=[self.ext_bbox], color=_color)
        elif localize_type == 'outline' and fill:
            image = cv2.fillPoly(img=image, pts=self.outline, color=_color)
        elif localize_type == 'circular' and fill:
            image = cv2.circle(img=image, center=tuple(np.uint32(self.min_bbox_centre)),
                               radius=np.uint32(self.min_bbox_circum_radii * radius_multiplier), color=255,
                               thickness=-1)
        return image
Пример #21
0
    def test_threshold(self, img_name):
        img_name = format_name(img_name)
        full_img_name = os.path.join(self.folder_imgs, img_name)
        img = cv2.imread(full_img_name + '.png')
        height, width = img.shape[:2]

        with open(full_img_name + '.txt', 'r') as file:
            coords = file.readlines()
        thick = 2
        black = (0, 0, 255)
        green = (0, 255, 0)

        for gate in self.gate_pairs[img_name]:
            if gate[0] != None:
                coord_img = self.img_gates[img_name][int(gate[0])]
                coord_img_old = self.new_old_gate_match[img_name]\
                                                       [tuple(coord_img)]

                img_gate_old = np.array([(coord_img_old[0], coord_img_old[1]), \
                                (coord_img_old[2], coord_img_old[3]), \
                                (coord_img_old[4], coord_img_old[5]), \
                                (coord_img_old[6], coord_img_old[7]), \
                                (coord_img_old[0], coord_img_old[1])])
                cv2.polylines(img, [img_gate_old], False, green, 2)

            if gate[1] != None:
                coord_pred = self.pred_gates[img_name][int(gate[1]), 0]
                pred_lef = int((coord_pred[0]-coord_pred[2]/2/self\
                           .gate_area_rescale_x)*self.rx)
                pred_rig = int((coord_pred[0]+coord_pred[2]/2/self\
                           .gate_area_rescale_x)*self.rx)
                pred_bot = int((coord_pred[1]-coord_pred[3]/2/self\
                           .gate_area_rescale_y)*self.ry)
                pred_top = int((coord_pred[1]+coord_pred[3]/2/self\
                           .gate_area_rescale_y)*self.ry)

                pred_gate = np.array([(pred_lef, pred_top), (pred_rig, \
                            pred_top), (pred_rig, pred_bot), (pred_lef, \
                            pred_bot), (pred_lef, pred_top)])
                cv2.polylines(img, [pred_gate], False, black, 2)

        cv2.imshow('image', img)
        cv2.waitKey(0)
Пример #22
0
    def get_min_bbox(self, show=False, padding=5):
        """
        Function to retrieve the vetrices of BBox which occupy minimum
        area rectangle for a Connected Component.

        parameters
        --------------------------------------
        show : bool, optional, default : True
            Wether to show the annotated image with min_bboxes
        
        padding : int, optional, default : 5
            Expansion coefficient (in the diagonal direction) for each
            bbox
        
        returns
        --------------------------------------
        tuple - (min_bboxes, annotated_img)

        Returns Minimum Area BBoxes vertices and Annotated Image
        """
        if not hasattr(self, 'swt_mat'):
            raise Exception(
                "Call 'swttransform' on the image before calling this function"
            )

        min_bboxes = []
        annotated_img = self.swtlabelled_pruned13C.copy()

        for label, labelprops in self.components_props.items():
            bbm_bbox = np.int32(labelprops['bbm_bbox'])

            # Calculate centre coordinates
            _tr, _br, _bl, _tl = bbm_bbox.copy()
            _d1_vec = _tr - _bl
            _d2_vec = _tl - _br
            _d1_ang = -math.atan2(_d1_vec[1], _d1_vec[0])
            _d2_ang = -math.atan2(_d2_vec[1], _d2_vec[0])

            _tr = _tr + padding * np.array([np.cos(_d1_ang), -np.sin(_d1_ang)])
            _br = _br - padding * np.array(
                [-np.cos(np.pi - _d2_ang), -np.sin(np.pi - _d2_ang)])
            _bl = _bl - padding * np.array(
                [-np.cos(np.pi - _d1_ang), -np.sin(np.pi - _d1_ang)])
            _tl = _tl + padding * np.array([np.cos(_d2_ang), -np.sin(_d2_ang)])
            bbm_bbox = np.c_[_tr, _br, _bl, _tl].T.astype(int)

            min_bboxes.append(bbm_bbox)
            annotated_img = cv2.polylines(annotated_img, [bbm_bbox], True,
                                          (0, 0, 255), 1)

        if show:
            imgshow(annotated_img, 'Minimum Bounding Box')

        return min_bboxes, annotated_img
Пример #23
0
    def __draw_poly(self, image, filled, translation):
        params = self.dict_parameters
        keys = params.keys()

        assert all(key in keys for key in ['pts', 'color'])
        if filled:
            cv2.fillPoly(image,
                         pts=[params['pts']],
                         color=255,
                         lineType=params.get('lineType') or 8,
                         shift=params.get('shift') or 0,
                         offset=params.get('offset') or translation)
        else:
            cv2.polylines(image,
                          pts=[params['pts'] + translation],
                          isClosed=True,
                          color=params['color'],
                          thickness=params.get('thickness') or 1,
                          lineType=params.get('lineType') or 8,
                          shift=params.get('shift') or 0)
Пример #24
0
    def run_grouping(self):
        self.generate_comp_bubble()
        while len(self.ungrouped_labels) > 0:
            curr_label = list(self.ungrouped_labels)[0]
            curr_bucket = self.grouplabel(label=curr_label,
                                          bucket=[curr_label])
            self.grouped_labels.append(curr_bucket)
            self.ungrouped_labels = self.ungrouped_labels.difference(
                set(curr_bucket))

        self.grouped_bubblebbox = []

        self.grouped_annot_bubble = np.zeros(self.labelmask.shape,
                                             dtype=np.uint8)
        self.grouped_annot_bubble = cv2.cvtColor(self.grouped_annot_bubble,
                                                 cv2.COLOR_GRAY2BGR)

        self.grouped_annot = np.zeros(self.labelmask.shape, dtype=np.uint8)
        self.grouped_annot = cv2.cvtColor(self.grouped_annot,
                                          cv2.COLOR_GRAY2BGR)

        for each_group in self.grouped_labels:
            mask = np.zeros(self.labelmask.shape, dtype=np.uint8)
            for each_label in each_group:
                label_ct, label_bx = self.get_attr(each_label,
                                                   mode='proximity')
                radii = max([
                    np.linalg.norm(epnt[::-1] - label_ct) for epnt in label_bx
                ])
                mask += self.create_circular_mask(label_ct[::-1], radii)

            contours, hierarchy = cv2.findContours(mask.copy(),
                                                   cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_NONE)
            self.grouped_bubblebbox.append(contours)

            mask = np.zeros(self.labelmask.shape, dtype=np.uint8)
            for each_label in each_group:
                mask += self.labelmask == each_label
            mask *= 255

            self.grouped_annot_bubble += cv2.cvtColor(mask.copy(),
                                                      cv2.COLOR_GRAY2BGR)
            self.grouped_annot += cv2.cvtColor(mask.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(self.grouped_annot_bubble, contours, -1,
                             (0, 0, 255), self.bubble_width)

            rotrect = cv2.minAreaRect(contours[0])
            combbbox = cv2.boxPoints(rotrect)
            self.grouped_annot += cv2.polylines(self.grouped_annot,
                                                np.int32([combbbox]), True,
                                                (0, 0, 255), 2)

        return self.grouped_labels, self.grouped_bubblebbox, self.grouped_annot_bubble, self.grouped_annot, self.maskviz, self.maskcomb
Пример #25
0
    def generate_comp_bubble(self):

        for label, props in self.comp_props.items():

            label_ct = np.array([props['bbm_cy'],
                                 props['bbm_cx']]).astype(np.uint16)
            label_bx = props['bbm_bbox']
            label_an = props['bbm_anchor']
            radii = max(
                [np.linalg.norm(epnt[::-1] - label_ct)
                 for epnt in label_bx]) * self.lookup_radii_multiplier

            cv2.putText(self.maskviz, str(label), tuple(label_an),
                        cv2.FONT_HERSHEY_PLAIN, 5, 2, 1, cv2.LINE_AA)
            cv2.polylines(self.maskviz, np.int32([label_bx]), True, 1, 1)
            mask = self.create_circular_mask(label_ct[::-1], radii)
            self.comp_dstack.append(mask * label)
            self.maskcomb += mask

        self.comp_dstack = np.dstack(tuple(self.comp_dstack))
Пример #26
0
    def get_gaze_ratio(self, lmk):
        eye_region = np.array([(lmk[0].x, lmk[0].y), (lmk[1].x, lmk[1].y),
                               (lmk[2].x, lmk[2].y), (lmk[3].x, lmk[3].y),
                               (lmk[4].x, lmk[4].y), (lmk[5].x, lmk[5].y)],
                              np.int32)
        # cv.polylines(frame, [eye_region], True, (0, 0, 255), 2)
        height, width, _ = self.frame.shape
        mask = np.zeros((height, width), np.uint8)
        cv.polylines(mask, [eye_region], True, 255, 2)
        cv.fillPoly(mask, [eye_region], 255)

        gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
        eye = cv.bitwise_and(gray, gray, mask=mask)

        min_x = max(0, np.min(eye_region[:, 0]))
        max_x = np.max(eye_region[:, 0])
        min_y = max(0, np.min(eye_region[:, 1]))
        max_y = np.max(eye_region[:, 1])

        gray_eye = eye[min_y:max_y, min_x:max_x]
        _, threshold_eye = cv.threshold(gray_eye, 40, 255, cv.THRESH_BINARY)
        h, w = threshold_eye.shape

        left_side_threshold = threshold_eye[0:h, 0:int(w / 2)]
        left_side_white = cv.countNonZero(left_side_threshold)

        right_side_threshold = threshold_eye[0:h, int(w / 2):w]
        right_side_white = cv.countNonZero(right_side_threshold)

        # print('left white:', left_side_white)
        # print('right white:', right_side_white)

        if left_side_white == 0 and right_side_white == 0:
            gaze_ratio = 1
        elif left_side_white == 0:
            gaze_ratio = 0.1
        elif right_side_white == 0:
            gaze_ratio = 10
        else:
            gaze_ratio = left_side_white / right_side_white
        return gaze_ratio
Пример #27
0
    def show(image):
        result = Recognition().recognize(image)

        polygon = []
        for qr in result['qr']:
            pts = np.array(
                list(
                    map(lambda point: [point.x, point.y],
                        qr['location'].points)), np.int32).reshape((-1, 1, 2))
            polygon.append(pts)

        for ring in result['rings']:
            cv2.ellipse(img=image,
                        center=(int(ring.center.x), int(ring.center.y)),
                        axes=(int(ring.maxLength), int(ring.minLength)),
                        angle=ring.angle,
                        color=(0, 255, 255),
                        startAngle=0,
                        endAngle=360)

        cv2.polylines(image, polygon, True, (0, 255, 255))
        cv2.imshow('detected circles', image)
        cv2.moveWindow('detected circles', 20, 20)
Пример #28
0
    def addLocalization(self, image: np.ndarray, localize_type: str, fill: bool) -> np.ndarray:
        """
        Add a specific `localize_type` of localization to the input `image`. `fill` parameter tells whether to
        fill the component or not.

        Args:
            image (np.ndarray) : Image on which localization needs to be added
            localize_type (str) : Type of the localization that will be added. Can be only one of
             ['bbox', 'bubble', 'polygon']. Where
                - `bbox` : Bounding Box
                - `bubble` : Bubble Boundary
                - `polygon` : Contour Boundary
            fill (bool) : Whether to fill the added localization or not
        Returns:
            (np.ndarray) - annotated image
        """
        _color = (0, 0, 255)
        if fill:
            _color = (255, 255, 255)
        _thickness = (np.sqrt(self.image_height ** 2 + self.image_width ** 2)) * (4 / np.sqrt(768 ** 2 + 1024 ** 2))
        _thickness = int(_thickness)
        if _thickness == 0:
            _thickness = 1
        if localize_type == 'bbox' and not fill:
            image = cv2.polylines(img=image, pts=[self.bbox], isClosed=True, color=_color, thickness=_thickness)
        elif localize_type == 'bubble' and not fill:
            image = cv2.polylines(img=image, pts=self.bubble, isClosed=True, color=_color, thickness=_thickness)
        elif localize_type == 'polygon' and not fill:
            image = cv2.polylines(img=image, pts=self.polygon, isClosed=True, color=_color, thickness=_thickness)
        elif localize_type == 'bbox' and fill:
            image = cv2.fillPoly(img=image, pts=[self.bbox], color=_color)
        elif localize_type == 'bubble' and fill:
            image = cv2.fillPoly(img=image, pts=self.bubble, color=_color)
        elif localize_type == 'polygon' and fill:
            image = cv2.fillPoly(img=image, pts=self.polygon, color=_color)
        return image
Пример #29
0
def upload():
    # file=request.files['temp']
    f = request.files['temp']
    tempname = request.form['tempname']
    temp_path = '../templates/'
    name = f.filename.replace(' ', '_')
    print(tempname)
    f.save(secure_filename(f.filename))

    inputImage = cv2.imread(name)
    inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)

    edges = cv2.Canny(inputImageGray, 150, 200, apertureSize=3)

    print(edges)
    edges = abs(cv2.subtract(255, edges))

    minLineLength = 30
    maxLineGap = 5
    lines = cv2.HoughLinesP(edges, cv2.HOUGH_PROBABILISTIC, np.pi / 180, 30,
                            minLineLength, maxLineGap)
    for x in range(0, len(lines)):
        for x1, y1, x2, y2 in lines[x]:
            pts = np.array([[x1, y1], [x2, y2]], np.int32)
            cv2.polylines(inputImage, [pts], True, (0, 255, 0))

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(inputImage, "Tracks Detected", (500, 250), font, 0.5, 255)

    cv2.imwrite(temp_path + tempname + '.jpeg', edges)
    cv2.waitKey(0)

    os.remove(name)

    filename = tempname + '.jpeg'
    return send_file(temp_path + filename, mimetype='image/jpeg')
Пример #30
0
    def get_extreme_bbox(self, show=False, padding=5):
        """
        Function to retrieve the vetrices of BBox Connected Component.

        parameters
        --------------------------------------
        show : bool, optional, default : True
            Wether to show the annotated image with extreme_bboxes
        
        padding : int, optional, default : 5
            Expansion coefficient for each bbox
        
        returns
        --------------------------------------
        tuple - (ext_bboxes, annotated_img)

        Returns BBoxes vertices and Annotated Image
        """

        if not hasattr(self, 'swt_mat'):
            raise Exception(
                "Call 'swttransform' on the image before calling this function"
            )

        ext_bboxes = []
        temp1 = self.swtlabelled_pruned1.copy()
        annotated_img = self.swtlabelled_pruned13C.copy()

        for label, labelprops in self.components_props.items():
            lmask = (temp1 == label).astype(np.uint16)
            if np.sum(lmask) > 0:
                _iy, _ix = lmask.nonzero()
                _tr = [max(_ix) + padding, min(_iy) - padding]
                _br = [max(_ix) + padding, max(_iy) + padding]
                _bl = [min(_ix) - padding, max(_iy) + padding]
                _tl = [min(_ix) - padding, min(_iy) - padding]
                bbe_bbox = np.c_[_tr, _br, _bl, _tl].T.astype(int)
                ext_bboxes.append(bbe_bbox)

                annotated_img = cv2.polylines(annotated_img, [bbe_bbox], True,
                                              (0, 0, 255), 1)

        if show:
            imgshow(annotated_img, 'Extreme Bounding Box')

        return ext_bboxes, annotated_img