예제 #1
0
def getxy(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        contourList.append((y, x))
        # cv2.drawContours( imageContour, contour, -1, ( 128,128,128 )  )
        print "(row, col) = ", (y, x)
        cv2.drawMarker(imageColor, (x, y), (0, 0, 0))
        cv2.imshow('image', imageColor)
    else:
        if event == cv2.EVENT_RBUTTONDOWN:
            contourPoints = np.array(contourList)
            startSnake( contourPoints )
예제 #2
0
                        x, y, w, h = cv2.boundingRect(cnt)

                        # creates a rectangle around contour
                        cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (255, 0, 0), 2)

                        # Prints centroid text in order to double check later on
                        cv2.putText(image,
                                    str(cx) + "," + str(cy),
                                    (cx + 10, cy + 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 255),
                                    1)

                        cv2.drawMarker(image, (cx, cy), (0, 0, 255),
                                       cv2.MARKER_STAR,
                                       markerSize=5,
                                       thickness=1,
                                       line_type=cv2.LINE_AA)

                        # adds centroids that passed previous criteria to centroid list
                        cxx[i] = cx
                        cyy[i] = cy

        # eliminates zero entries (centroids that were not added)
        cxx = cxx[cxx != 0]
        cyy = cyy[cyy != 0]

        # empty list to later check which centroid indices were added to dataframe
        minx_index2 = []
        miny_index2 = []
예제 #3
0
def gen():
    """Video streaming generator function."""
    cap = cv2.VideoCapture('768x576.avi')

    # Read until video is completed
    while (cap.isOpened()):
        ret, frame = cap.read()  # import image
        if not ret:  #if vid finish repeat
            frame = cv2.VideoCapture("768x576.avi")
            continue
        if ret:  # if there is a frame continue with code
            image = cv2.resize(frame, (0, 0), None, 1, 1)  # resize image
            gray = cv2.cvtColor(image,
                                cv2.COLOR_BGR2GRAY)  # converts image to gray
            fgmask = sub.apply(gray)  # uses the background subtraction
            kernel = cv2.getStructuringElement(
                cv2.MORPH_ELLIPSE, (5, 5))  # kernel to apply to the morphology
            closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
            opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
            dilation = cv2.dilate(opening, kernel)
            retvalbin, bins = cv2.threshold(
                dilation, 220, 255, cv2.THRESH_BINARY)  # removes the shadows
            contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            minarea = 400
            maxarea = 50000
            for i in range(len(
                    contours)):  # cycles through all contours in current frame
                if hierarchy[
                        0, i,
                        3] == -1:  # using hierarchy to only count parent contours (contours not within others)
                    area = cv2.contourArea(contours[i])  # area of contour
                    if minarea < area < maxarea:  # area threshold for contour
                        # calculating centroids of contours
                        cnt = contours[i]
                        M = cv2.moments(cnt)
                        cx = int(M['m10'] / M['m00'])
                        cy = int(M['m01'] / M['m00'])
                        # gets bounding points of contour to create rectangle
                        # x,y is top left corner and w,h is width and height
                        x, y, w, h = cv2.boundingRect(cnt)
                        # creates a rectangle around contour
                        cv2.rectangle(image, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                        # Prints centroid text in order to double check later on
                        cv2.putText(image,
                                    str(cx) + "," + str(cy),
                                    (cx + 10, cy + 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, .3, (0, 0, 255),
                                    1)
                        cv2.drawMarker(image, (cx, cy), (0, 255, 255),
                                       cv2.MARKER_CROSS,
                                       markerSize=8,
                                       thickness=3,
                                       line_type=cv2.LINE_8)
        #cv2.imshow("countours", image)
        frame = cv2.imencode('.jpg', image)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
        #time.sleep(0.1)
        key = cv2.waitKey(20)
        if key == 27:
            break
예제 #4
0
    def draw_vector(self, event, x, y, flags, params):
        # self.show_img = self.img_copy()
        values = None
        x_list = []
        y_list = []
        z_list = []

        if event == 10:
            #sign of the flag shows direction of mousewheel
            if flags > 0:
                self.vector_depth += 10
            else:
                self.vector_depth -= 10

        front_point = np.float32([[x, 0, y]])

        front, jac = cv2.projectPoints(front_point, self.rotation_vector,
                                       self.translation_vector,
                                       self.camera_matrix,
                                       self.distioriton_matrix)

        back_point = np.float32([[x, self.room_length, y]])
        back, jac = cv2.projectPoints(back_point, self.rotation_vector,
                                      self.translation_vector,
                                      self.camera_matrix,
                                      self.distioriton_matrix)

        floater_point = np.float32([[x, self.vector_depth, y]])
        floater, jac = cv2.projectPoints(floater_point, self.rotation_vector,
                                         self.translation_vector,
                                         self.camera_matrix,
                                         self.distioriton_matrix)

        # print(x,y)

        front_point = (front[0][0][0], front[0][0][1])
        back_point = (back[0][0][0], back[0][0][1])
        floater_point = (floater[0][0][0], floater[0][0][1])

        self.show_img = cv2.circle(self.show_img, front_point, 8,
                                   (125, 0, 125), 2, 0)
        self.show_img = cv2.drawMarker(self.show_img,
                                       back_point,
                                       color=(255, 255, 0),
                                       markerType=1)
        self.show_img = cv2.line(self.show_img, front_point, back_point,
                                 (100, 100, 0), 2)
        self.show_img = cv2.circle(self.show_img, front_point, 8,
                                   (255, 255, 0), 2, 0)
        self.show_img = cv2.circle(self.show_img, floater_point, 8,
                                   (255, 0, 0), 2, 0)

        ifp = (int(front_point[0]), int(front_point[1]))
        if (x, y) in self.mapped_dictionary.keys():
            values = self.mapped_dictionary[(x, y)]
            for v in values:
                self.show_img = cv2.drawMarker(self.show_img, (x, y),
                                               color=(0, 0, 255),
                                               markerType=3)
                x_list = []
                y_list = []
                z_list = []
                for v in values:
                    x_list.append(v[0])
                    y_list.append(v[2])
                    z_list.append(v[1])

        cv2.imshow(self.window_name, self.show_img)
        cv2.waitKey(1)
        if self.show_3D_plot == True:
            plt.ion

            if self.fig is None:
                self.fig = plt.figure(1)
                self.ax = ax = plt.axes(projection='3d')
                self.front_plt = ax.scatter3D(front_point[0], front_point[1],
                                              0)
                self.back_plt = ax.scatter3D(back_point[0], back_point[1],
                                             self.vector_depth)
                self.float_plt = ax.scatter3D(floater_point[0],
                                              floater_point[1],
                                              self.vector_depth)
                self.line_plt = ax.plot3D([front_point[0], back_point[0]],
                                          [front_point[1], back_point[1]],
                                          [0, self.room_length],
                                          color='teal')

                # plt.quiver(*origin, V[:,0], V[:,1], color=['r','b','g'], scale=21)
                self.ax.set_xlim(-self.room_width, self.room_width)
                self.ax.set_ylim(-self.room_height, self.room_width)
                self.ax.set_zlim(-self.room_length, self.room_width)
                self.fig.canvas.draw()
                self.fig.canvas.flush_events()
                self.fig.show()
            else:

                self.ax.cla()
                if values:
                    self.points_plt = self.ax.scatter3D(x_list,
                                                        y_list,
                                                        z_list,
                                                        marker="D",
                                                        color="red")
                # plt.quiver((0,0,0), (0,1,0), , color=['r','b','g'], scale=21)
                self.ax.set_xlim(-self.room_width, self.room_width)
                self.ax.set_ylim(-self.room_height, self.room_width)
                self.ax.set_zlim(-self.room_length, self.room_width)
                self.ax.set_xlabel("X")
                self.ax.set_ylabel("Y")
                self.ax.set_zlabel("Z")
                self.float_plt = self.ax.scatter3D(floater_point[0],
                                                   floater_point[1],
                                                   self.vector_depth,
                                                   marker="o",
                                                   color="blue")
                self.front_plt = self.ax.scatter3D(front_point[0],
                                                   front_point[1],
                                                   0,
                                                   marker=("o"),
                                                   color="cyan")
                self.back_plt = self.ax.scatter3D(back_point[0],
                                                  back_point[1],
                                                  self.room_length,
                                                  marker="x",
                                                  color="cyan")
                self.line_plt = self.ax.plot3D([front_point[0], back_point[0]],
                                               [front_point[1], back_point[1]],
                                               [0, self.room_length],
                                               color='teal')
                self.quiver = self.ax.quiver([0, 0, 0], [0, 0, 0], [0, 0, 0],
                                             [self.room_width, 0, 0],
                                             [0, self.room_height, 0],
                                             [0, 0, self.room_length],
                                             length=0.1,
                                             normalize=False)
                self.fig.canvas.draw()
                self.fig.canvas.flush_events()

        return self.show_img
예제 #5
0
    def find_cube_side(self):
        cam_idx = [1, 2]
        # tracking_points_all = np.array([self.views[j].find_right_top_bottom_tracking_points() for j in cam_idx])

        tracking_points_all = np.array([[[137, 55], [137, 100], [102, 127],
                                         [71, 104], [60, 58], [96, 71],
                                         [103, 47], [137, 79], [98, 99]],
                                        [[134, 61], [133, 95], [125, 113],
                                         [86, 109], [81, 68], [125, 70],
                                         [101, 58], [134, 79], [125, 91]]])
        i = 0
        colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 0, 0),
                  (255, 255, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255),
                  (128, 128, 255)]
        ims = []
        for j in cam_idx:
            im = self.views[j].im.copy()
            for k in range(tracking_points_all[i].shape[0]):
                x, y = tracking_points_all[i][k].astype(np.int)
                cv2.drawMarker(im, (int(x), int(y)), colors[k], markerSize=5)
                cv2.putText(im,
                            str(k), (int(x), int(y)),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            .5,
                            colors[k],
                            thickness=1)
            i += 1
            ims.append(im)
            # cv2.imwrite(str(j) + '.png', im)
        mapped_xyzs = []
        mapped_enus = []
        errs = []
        for i in range(tracking_points_all.shape[1]):
            xyz, enu, err = self.extract_3d_points(
                tracking_points_all[:,
                                    i], [self.views[j].cam for j in cam_idx])
            mapped_xyzs.append(xyz)
            mapped_enus.append(enu)
            errs.append(err)
            for k in range(len(cam_idx)):
                im_ = self.views[cam_idx[k]].cam.viz_plane(200, 1)
                im = (.5 * ims[k] + .5 * im_).astype(np.uint8)
                cv2.imwrite(str(cam_idx[k]) + '-en_overlay.png', im)
        mapped_xyzs = np.array(mapped_xyzs)
        mapped_enus = np.array(mapped_enus)
        side_lens = np.linalg.norm(mapped_xyzs[1:] - mapped_xyzs[:-1], axis=1)
        side_len = np.mean(side_lens[[0, 1, 2, 3, 4, 7]])
        side_len_uncertainity = side_lens.std()
        top_center_enu_estimates = np.vstack([
            mapped_enus[[4, 0]].mean(axis=0), mapped_enus[[5, 6]].mean(axis=0)
        ])
        top_center_enu = top_center_enu_estimates.mean(axis=0)
        bottom_center_enu = mapped_enus[[1, 3]].mean(axis=0)
        top_bottom_centroid_enu = np.mean([top_center_enu, bottom_center_enu],
                                          axis=0)
        corner_centroid_enu_estimates = np.array([
            np.mean(mapped_enus[[0, 3]], axis=0),
            np.mean(mapped_enus[[2, 6]], axis=0),
            np.mean(mapped_enus[[1, 4]], axis=0)
        ])
        centroid_enu = np.vstack(
            [top_bottom_centroid_enu,
             corner_centroid_enu_estimates]).mean(axis=0)
        corner_vertices = mapped_enus[:7]
        centroid_corner_dists = np.linalg.norm(corner_vertices - centroid_enu,
                                               axis=1)
        centroid_estimation_uncertainity = centroid_corner_dists.std()

        return side_len, centroid_enu, corner_vertices, mapped_enus,\
               centroid_estimation_uncertainity, side_len_uncertainity
예제 #6
0
    def _state_follow(self):
        """
        Spot will position its body to follow the light
        """
        from simple_pid import PID

        try:
            # Get an image from the robot
            img = self._robot.get_image()

            # Find the center of the image
            img_height, img_width, _ = img.shape
            img_center_x = img_width // 2
            img_center_y = img_height // 2

            # The ROI is at the center of the image with half the width and height
            # The following crops the corner where is the plastic reflects the cmd light
            x1 = 0
            x2 = int(0.8 * img_width)
            y1 = 0
            y2 = int(0.8 * img_height)

            # Crop it to the ROI
            roi = img[y1:y2, x1:x2]

            # Detect a blob
            light_found, img_with_keypoints, pt = self._detect_blob(roi, 3500)

            # Check to see if we see a blob
            if not light_found:

                # If we have an initial point that mean we were following
                # and now the light is gone, so we'll stop
                if self._init_pt is not None and self._light_count > 50:
                    self._state_idx = self._state_idx + 1
                    self._init_pt = None
                    self._light_count = 0

                self._light_count = self._light_count + 1
                cv2.imshow('', roi)
                cv2.waitKey(5)
                return

            # Save the initial point if we don't have one yet
            if self._init_pt is None:
                self._init_pt = pt

            # Mark the 2 points for visualization
            cv2.drawMarker(img_with_keypoints, tuple(map(int, pt)),
                           (255, 0, 0))
            cv2.drawMarker(img_with_keypoints, tuple(map(int, self._init_pt)),
                           (0, 255, 0))

            # Maximum angle range is +/- 0.5 [rad]
            MAX_ANGLE = 0.50
            self._pitch_pid.output_limits = (-MAX_ANGLE, MAX_ANGLE)

            # Figure out how much to yaw and pitch
            adj = numpy.divide(numpy.subtract(self._init_pt, pt),
                               self._init_pt) * MAX_ANGLE
            yaw_cmd = self._yaw_pid(-adj[0])
            pitch_cmd = self._pitch_pid(adj[1])

            # Add text to show adjustment value
            font = cv2.FONT_HERSHEY_SIMPLEX
            bottom_left = (25, 25)
            scale = 0.5
            color = (0, 255, 0)
            lineType = 1
            disp_text = 'Yaw: {}     Pitch: {}'.format(adj[0], pitch_cmd)
            cv2.putText(img_with_keypoints, disp_text, bottom_left, font,
                        scale, color, lineType)

            cv2.imshow('', img_with_keypoints)
            cv2.waitKey(5)

            self._robot.orient(yaw_cmd, pitch_cmd, 0)
            test = 0

        except TypeError as err:
            print(err)
예제 #7
0
파일: FAST.py 프로젝트: LouisChen1905/FAST
def FAST(argv=sys.argv[1:]):
    img = cv2.imread(argv[1])

    height, width, channels = img.shape
    height = int(height / 2)
    width = int(width / 2)
    dim = (width, height)
    img = cv2.resize(img, dim, interpolation=cv2.INTER_LINEAR)
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    cv2.GaussianBlur(gray, (3, 3), 0, gray)
    result = img.copy()

    keypoints = {}
    filtered_keypoints = []
    keypoints_score = np.zeros(shape=[0, width], dtype=np.int)
    for i in range(radius, height - radius):
        score_line = np.zeros(shape=[1, width], dtype=np.int)
        keypoints_line = np.empty(shape=[0, 2], dtype=np.int)
        for j in range(radius, width - radius):
            center = gray[i, j]
            a = 0
            pixel_diff = np.int16(gray[i + fast_index[0][0],
                                       j + fast_index[0][1]]) - center
            a += 1 if math.fabs(pixel_diff) > pixel_t else 0
            pixel_diff = np.int16(gray[i + fast_index[8][0],
                                       j + fast_index[8][1]]) - center
            a += 1 if math.fabs(pixel_diff) > pixel_t else 0
            if a < 2:
                continue
            pixel_diff = np.int16(gray[i + fast_index[4][0],
                                       j + fast_index[4][1]]) - center
            a += 1 if math.fabs(pixel_diff) > pixel_t else 0
            pixel_diff = np.int16(gray[i + fast_index[11][0],
                                       j + fast_index[11][1]]) - center
            a += 1 if math.fabs(pixel_diff) > pixel_t else 0

            if a < 3:
                continue

            same_pixel_index = []
            for k in range(len(fast_index)):
                pixel_diff = np.int16(gray[i + fast_index[k][0],
                                           j + fast_index[k][1]]) - center
                if math.fabs(pixel_diff) <= pixel_t:
                    same_pixel_index.append(k)

            if len(same_pixel_index) > 1:
                for l in range(len(same_pixel_index)):
                    dist = 0
                    if l >= 1:
                        dist = same_pixel_index[l] - same_pixel_index[l - 1]
                    else:
                        dist = 16 - (same_pixel_index[len(same_pixel_index) -
                                                      1] - same_pixel_index[l])
                    if math.fabs(dist) >= threshold_num:
                        # print("Found key point")
                        # cv2.drawMarker(result, (j,i), (255, 0, 0),
                        #                markerType=cv2.MARKER_SQUARE,
                        #                markerSize=6, thickness=1)
                        score = corner_score((i, j), gray)
                        score_line[0, j] = score
                        keypoints_line = np.append(keypoints_line, [[i, j]],
                                                   axis=0)
                        # keypoints = np.sort(keypoints, axis=0, kind='quicksort')
                        break
            else:
                score = corner_score((i, j), gray)
                score_line[0, j] = score
                keypoints_line = np.append(keypoints_line, [[i, j]], axis=0)

        keypoints_score = np.append(keypoints_score, score_line, axis=0)
        keypoints[i] = keypoints_line

        # 不够non-maximal suppression计算
        if np.size(keypoints_score, axis=0) < 3:
            continue

        prev_idx = i - 2
        curr_idx = i - 1
        next_idx = i
        try:
            for k in range(len(keypoints[curr_idx])):
                index = keypoints[curr_idx][k]
                score = keypoints_score[curr_idx - 3, index[1]]
                if score > keypoints_score[prev_idx-3, index[1] - 1] \
                        and score > keypoints_score[prev_idx-3, index[1]] \
                        and score > keypoints_score[prev_idx-3, index[1] + 1] \
                        and score > keypoints_score[curr_idx-3, index[1] - 1] \
                        and score > keypoints_score[curr_idx-3, index[1] + 1] \
                        and score > keypoints_score[next_idx-3, index[1] - 1] \
                        and score > keypoints_score[next_idx-3, index[1]] \
                        and score > keypoints_score[next_idx-3, index[1] + 1]:
                    filtered_keypoints.append((index[1], index[0]))
        except KeyError:
            continue

    for i in range(len(filtered_keypoints)):
        cv2.drawMarker(result,
                       filtered_keypoints[i], (255, 0, 0),
                       markerType=cv2.MARKER_SQUARE,
                       markerSize=6,
                       thickness=1)
    cv2.imshow("FAST result", result)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector_create()
    # find and draw the keypoints
    kp = fast.detect(img, None)
    img2 = cv2.drawKeypoints(img, kp, None, color=(255, 0, 0))
    cv2.imshow("FAST opencv result", img2)

    while (1):
        if cv2.waitKey(20) & 0xFF == 27:
            break
    cv2.destroyAllWindows()
"""
特征提取之关键点检测(GFTTDetector)
"""

import cv2 as cv

image = cv.imread("images/test4.jpg")
cv.imshow("input", image)

# 创建GFTT特征检测器
gftt = cv.GFTTDetector_create(100, 0.01, 1, 3, False, 0.04)
kp1 = gftt.detect(image, None)
for marker in kp1:
    result = cv.drawMarker(image,
                           tuple(int(i) for i in marker.pt),
                           color=(0, 255, 0))

cv.imshow("GFTT-Keypoint-Detect", result)
cv.waitKey(0)
cv.destroyAllWindows()
    def update(self):
        ret, img = self.cap.read()

        preprocessed = None

        if color_keying:
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, (4, 15, 100), (70, 170, 255))
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            mask2 = cv2.inRange(hsv, (140, 0, 50), (165, 160, 255))

            if show_detailed:
                cv2.imshow("1", mask)
                cv2.imshow("2", mask2)

            imask = mask + mask2 > 0
            skin = np.zeros_like(img, np.uint8)
            skin[imask] = img[imask]
            preprocessed = skin
        else:
            preprocessed = img

        fg_mask = self.fgbg.apply(preprocessed)

        whites_count = cv2.countNonZero(fg_mask)

        blur = cv2.GaussianBlur(fg_mask, (11, 11), 0)
        _, gaussian = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        if show_detailed:
            cv2.imshow("gaussian", gaussian)

        contours, hierarchy = cv2.findContours(fg_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        max_area = 0
        ci = None
        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)
            if area > max_area:
                max_area = area
                ci = i
        if ci is not None:
            cnt = contours[ci]

        hull = cv2.convexHull(cnt)

        x, y, w, h = cv2.boundingRect(cnt)

        cropped_gaussian = gaussian[y:y + h, x:x + w]
        if show_detailed:
            cv2.imshow("crop", cropped_gaussian)

        drawing = np.zeros(img.shape, np.uint8)
        if whites_count > minimal_count:
            cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 2)
            cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 2)
            cv2.drawContours(img, [cnt], 0, (0, 255, 0), 2)
            cv2.drawContours(img, [hull], 0, (0, 0, 255), 2)
        if DEBUG:
            cv2.drawContours(img, [surfer_thing_shape_hull.shape[0]], 0, (255, 0, 255), 2)

        x_previous = self.x_center
        y_previous = self.y_center

        self.x_center = 0
        self.y_center = 0
        for i in hull:
            self.x_center += i[0][0]
            self.y_center += i[0][1]

        self.x_center /= len(hull)
        self.y_center /= len(hull)

        if x_previous is not None and y_previous is not None and whites_count > minimal_count:
            x_delta = (self.x_center - x_previous)
            y_delta = (self.y_center - y_previous)
        else:
            for s in shapes:
                s.reset_count()
            x_delta = 0
            y_delta = 0

        if whites_count < minimal_count:
            for s in shapes:
                s.reset_count()
            self.x_counter = 0
            self.y_counter = 0
        else:
            self.x_counter += x_delta
            self.y_counter += y_delta

        hull = cv2.convexHull(cnt)

        if whites_count > minimal_count and x_previous is not None and y_previous is not None:
            cv2.drawMarker(drawing, (int(self.x_center), int(self.y_center)), (255, 0, 255))
            cv2.drawMarker(drawing, (int(x_previous), int(y_previous)), (255, 255, 255))
            cv2.drawMarker(img, (int(self.x_center), int(self.y_center)), (255, 0, 255))
            cv2.drawMarker(img, (int(x_previous), int(y_previous)), (255, 255, 255))

        contour_gaussian_cropped = np.matrix(list(map(lambda a: [a[0][0] - x, a[0][1] - y], cnt)))
        self.last_contour = cv2.convexHull(contour_gaussian_cropped)

        if use_hull:
            contour_gaussian_cropped = cv2.convexHull(contour_gaussian_cropped)
            m = max(list(map(lambda x: x[0][1], contour_gaussian_cropped)))
            c = list(filter(lambda y: abs(y[0][1]-m) > 40, contour_gaussian_cropped))
            c = list(map(lambda x: list(x[0]), c))
            c = np.array(c)
            contour_gaussian_cropped = c
            if DEBUG and c.shape[0] != 0:
                cv2.drawContours(img, [contour_gaussian_cropped], 0, (255, 0, 255), 2)

        rets = [x.bestMatch(contour_gaussian_cropped) for x in shapes]
        m_ret = 1
        m_sh = None
        message = None
        for i, sh in zip(rets, shapes):
            if m_ret > i:
                m_ret = i
                m_sh = sh

        if whites_count > minimal_count and m_ret < 0.15:
            m_sh.inc_count()

        if self.x_counter > 400:
            tmp_m_sh = None
            tmp_m_count = -1
            for s in shapes:
                if tmp_m_count < s.count:
                    tmp_m_count = s.count
                    tmp_m_sh = s
            if tmp_m_sh.count > 5:
                message = tmp_m_sh.msg + ' left ' + str(tmp_m_sh.count)

        if self.x_counter < -400:
            tmp_m_sh = None
            tmp_m_count = -1
            for s in shapes:
                if tmp_m_count < s.count:
                    tmp_m_count = s.count
                    tmp_m_sh = s
            if tmp_m_sh.count > 5:
                message = tmp_m_sh.msg + ' right ' + str(tmp_m_sh.count)

        if color_keying:
            cv2.imshow('skin', skin)

        if show_detailed:
            cv2.imshow('fg_mask', fg_mask)
            cv2.imshow('drawing', drawing)

        return cv2.cvtColor(img, cv2.COLOR_BGR2RGB), message
예제 #10
0
def draw_corners(img, corners, ids):
    for corner, id in zip(corners, ids):
        x, y = corner
        cv2.drawMarker(img, (x, y), (255, 0, 0), cv2.MARKER_CROSS, 10, 2)
        cv2.putText(img, str(id), (x, y), cv2.FONT_HERSHEY_PLAIN, 2,
                    (255, 0, 0), 2)
예제 #11
0
    def process_video(self):
        global j
        j = 1

        cap = cv2.VideoCapture(self.link.text())

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        kernel3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))

        substractor = cv2.createBackgroundSubtractorMOG2(varThreshold=150,
                                                         detectShadows=False)

        self.sayac = 0
        frame_width = int(cap.get(3))
        frame_height = int(cap.get(4))

        # save as .264
        fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
        kayit = cv2.VideoWriter('kayit.264', fourcc, 10,
                                (frame_width, frame_height))

        while True:
            _, frame = cap.read()
            self.exit = 1
            mask = substractor.apply(frame)

            mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

            mask = cv2.erode(mask, kernel2, iterations=1)
            mask = cv2.dilate(mask, kernel3, iterations=1)

            contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
            # cv2.drawContours(frame, contours, -1, (0,255,0), 3)

            cv2.line(frame, (self.yatay.value(), self.dikey.value()),
                     (self.yatay2.value(), self.dikey2.value()), (0, 255, 0),
                     2)
            m = self.egimofline()

            if 1 >= m >= 0:
                self.detectlinedikey = int(
                    (self.dikey2.value() + self.dikey.value()) / 2)
                for contour, in zip(contours):
                    (x, y, w, h) = cv2.boundingRect(contour)
                    a = int(w / 2)
                    b = int(h / 2)
                    if w > 20 and h > 20:
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (255, 0, 0), 2)
                        centroid = (x + a, y + b)
                        cv2.drawMarker(frame,
                                       centroid, (0, 255, 0),
                                       markerType=50,
                                       markerSize=10)

                        if self.yatay.value() < x + a<self.yatay2.value() and self.detectlinedikey-2 < y + b < \
                                self.detectlinedikey + 2:
                            self.sayac += 1
                            self.saveJson()
                            print(self.sayac)
            elif m > 1:
                self.detectlineyatay = int(
                    (self.yatay2.value() + self.yatay.value()) / 2)
                for contour, in zip(contours):
                    (x, y, w, h) = cv2.boundingRect(contour)
                    a = int(w / 2)
                    b = int(h / 2)
                    if w > 20 and h > 20:
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (255, 0, 0), 2)
                        centroid = (x + a, y + b)
                        cv2.drawMarker(frame,
                                       centroid, (0, 255, 0),
                                       markerType=50,
                                       markerSize=10)

                        if self.detectlineyatay-2 < x + a < self.detectlineyatay + 2 and self.dikey2.value() < y + b < \
                                self.dikey.value():
                            self.sayac += 1
                            self.saveJson()
                            print(self.sayac)
            else:
                break
            """for contour, in zip(contours):
                (x, y, w, h) = cv2.boundingRect(contour)
                a = int(w / 2)
                b = int(h / 2)
                if w > 20 and h > 20 and y + b > 70:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                    centroid = (x + a, y + b)
                    cv2.drawMarker(frame, centroid, (0, 255, 0), markerType=50, markerSize=10)

                    if self.yatay.value() < x + a and self.dikey.value() < y + b:
                        self.sayac += 1
                        self.saveJson()
                        print(self.sayac)"""
            cv2.putText(frame, "Vehicle: " + str(self.sayac), (50, 45),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 250, 0), 2)

            self.t = int(self.nmbrofcars.text())

            if (self.sayac == self.t) & (j == 1):
                cv2.imwrite('resim3.jpg', frame)
                self.sendMail()
                j = 0

            kayit.write(frame)

            # write as .bin
            # with open('file1.bin', 'ab') as f:
            #   f.write(frame)
            # self.save264()
            cv2.imshow("Frame", frame)
            cv2.imshow("mask", mask)

            key = cv2.waitKey(25)
            if key == 27:
                break

        cap.release()
        kayit.release()
        cv2.destroyAllWindows()
예제 #12
0
def main():
    im = cv.imread("flag.png")
    gray = cv.cvtColor(im, cv.COLOR_RGB2GRAY)
    _, thresh = cv.threshold(gray, 127, 255, 0)

    # Now find contours using the chain approx none parameter

    _, contours, _h = cv.findContours(thresh, cv.RETR_TREE,
                                      cv.CHAIN_APPROX_NONE)

    cv.drawContours(im, contours[0], -1, (0, 255, 0), 2)
    print("Contours: ", contours)
    print("Contours count: ", len(contours))

    # [Explanation]
    # So the find contorus function works so that the number of areas we have
    # that are contoured seperately, the points for that particular box/shape
    # is stored as a different element on the contours list

    # [Working Example] This example works for the flag.png image
    # print(contours[0][0])
    # print(contours[1][0])
    # print(contours[2])

    # Lets determine the moments of the given image using the given contours
    # [Working Example] This example works for the flag.png
    # I have arbitrarily chosen the 0th contour and will be working with that for this example
    star = contours[0]
    moments = cv.moments(
        star
    )  #its important to note that moments are a dictionary that store the value for m00 and m11 and so on till a third degree

    # [Explanation] I would also like to mention the use of moments and the intuition behind there use here
    # So a moment of a 2d function would be, intuitively speaking
    # For m10,we can image that as we go across a particular row of the image
    # the value for our x will keep increase i.e. the idx of our row
    # As we find increments there the weight assigned to the function impulse at
    # that point is greater hence the moment, considering the pivot at the 0th idx
    # is also much greater as we move vertically across the image.
    # What this does for m10 for example is it finds a summation of
    # the complete row weighted in a linear fashion with farther impulses getting weighted a linearly larger value
    # We also get a larger value exactly for this reason
    # Im also wondering if the contours have to be closed in any way. For that i should
    # probably research how that chain_approximation works

    # Calculating centroids and displaying area for all contours
    # Calculating the moment for all contours
    all_moments = []
    for c in contours:
        all_moments.append(cv.moments(c))

    print(all_moments)
    mom1 = all_moments[0]
    centroidx = mom1['m10'] / mom1['m00']
    centroidy = mom1['m01'] / mom1['m00']

    cv.drawMarker(im, (int(centroidx), int(centroidy)), (255, 100, 0),
                  cv.MARKER_STAR, 3)

    # Lets also try to bound the star in a bounding box using its contours
    x, y, w, h = cv.boundingRect(contours[0])
    cv.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 5)
    box = cv.minAreaRect(contours[0])
    box_p = cv.boxPoints(box)
    box_p = np.int0(box_p)

    cv.drawContours(im, [box_p], 0, (0, 255, 255), 2)

    # Lets also label the smallest contour area
    # The area is simply m00

    cv.namedWindow('original', cv.WINDOW_AUTOSIZE)
    cv.moveWindow('original', 0, 0)
    cv.imshow('original', im)
    cv.waitKey(0)
    cv.destroyAllWindows()
    return
def image_scanning(imageObject, imageScene, detectionBoxes, heightImageObject,
                   widthImageObject, heightOverlap, widthOverlap):

    heightImageScene, widthImageScene = imageScene.shape[:2]

    heightSteps = int(
        math.floor(heightImageScene / (heightImageObject - heightOverlap)))
    widthSteps = int(
        math.floor(widthImageScene / (widthImageObject - widthOverlap)))

    print('Horizontal Steps: %d' % (widthSteps))
    print('Vertical Steps: %d' % (heightSteps))

    colorMap = np.ones((heightImageScene, widthImageScene), np.uint8) * 255
    colorMap = cv2.cvtColor(colorMap, cv2.COLOR_GRAY2BGR)

    for heightStep in range(0, heightSteps - 1):
        for widthStep in range(0, widthSteps - 1):

            iniHeightPosition = int(
                round((heightImageObject - heightOverlap)) * heightStep)
            iniWidthPosition = int(
                round((widthImageObject - widthOverlap)) * widthStep)

            endHeightPosition = iniHeightPosition + heightImageObject
            endWidthPosition = iniWidthPosition + widthImageObject

            if iniHeightPosition < endHeightPosition - 1 and iniWidthPosition < endWidthPosition - 1:

                subImageFromScene = imageScene[
                    iniHeightPosition:endHeightPosition,
                    iniWidthPosition:endWidthPosition]

                rfactor = 1

                keyPointsImageObject, descriptorsImageObject = detector.detectAndCompute(
                    imageObject, None)
                keyPointsImageScene, descriptorsImageScene = detector.detectAndCompute(
                    subImageFromScene, None)

                MIN_KEYPOINTS = 12
                if len(keyPointsImageScene) > MIN_KEYPOINTS:

                    # Show object image key points
                    vis = np.zeros((heightImageObject, widthImageObject),
                                   np.uint8)
                    vis[:heightImageObject, :widthImageObject] = imageObject
                    kp_color = (250, 0, 0)

                    for marker in keyPointsImageObject:
                        vis = cv2.drawMarker(vis,
                                             tuple(int(i) for i in marker.pt),
                                             color=kp_color)

                    cv2.imshow('key points', vis)

                    # Do matching and draw resuts for this image
                    match_and_draw(colorMap, detectionBoxes,
                                   keyPointsImageObject, keyPointsImageScene,
                                   descriptorsImageObject,
                                   descriptorsImageScene, iniHeightPosition,
                                   iniWidthPosition)

                cv2.waitKey(10)

    return colorMap
예제 #14
0
                idx = np.argmax(probs)

                prob = probs[idx]
                bbox = candRects[
                    idx]  #take the region with highest probability to be a phone
            else:
                bbox = None

            if len(candImgs) == 0 or bbox is None:
                print(basefile + ' Not found!')
                cv2.imshow(basefile, tempImg)
            else:
                cx = bbox[0] + bbox[2] / 2.0
                cy = bbox[1] + bbox[3] / 2.0

                cv2.drawMarker(tempImg, (int(cx), int(cy)), (255, 0, 0), 1, 10,
                               2)

                #normalized to the shape
                cx = cx / w
                cy = cy / h

                distance = np.sqrt((cx - x)**2 + (cy - y)**2)
                if distance > 0.05:
                    cv2.imshow(basefile, tempImg)
                    print(basefile, ' too far by ', distance)
    elif isinstance(check_type, list):
        for i in check_type:
            fn = os.path.join(path, i)
            basefile = i

            x = test_set[test_set['imgID'] == i]['x'].values[0]
예제 #15
0
def draw_clusters(image, image_name, radius, boxes, labels_boxes):
    colors = []
    for i in range(len(boxes) + 100):
        random_col = (random.randint(0, 255), random.randint(0, 255),
                      random.randint(0, 255))
        colors.append(random_col)
    red = (0, 0, 255)
    black = (0, 0, 0)
    count = 0
    offset_x = -25
    offset_y = -10
    for idxx, b in enumerate(boxes):
        idx = labels_boxes[idxx]
        col = colors[idx + 1]
        if not WEIGHTED_DISTANCE_CLUSTERING:
            image = cv2.circle(image,
                               tuple(b.x0),
                               radius,
                               color=col,
                               thickness=3)
            image = cv2.circle(image,
                               tuple(b.x1),
                               radius,
                               color=col,
                               thickness=3)
            image = cv2.circle(image,
                               tuple(b.x2),
                               radius,
                               color=col,
                               thickness=3)
            image = cv2.circle(image,
                               tuple(b.x3),
                               radius,
                               color=col,
                               thickness=3)
        else:
            image = cv2.ellipse(image,
                                tuple(b.x0), (radius, int(radius / Y_WEIGHT)),
                                angle=0,
                                startAngle=0,
                                endAngle=360,
                                color=col,
                                thickness=6)
            image = cv2.ellipse(image,
                                tuple(b.x1), (radius, int(radius / Y_WEIGHT)),
                                angle=0,
                                startAngle=0,
                                endAngle=360,
                                color=col,
                                thickness=6)
            image = cv2.ellipse(image,
                                tuple(b.x2), (radius, int(radius / Y_WEIGHT)),
                                angle=0,
                                startAngle=0,
                                endAngle=360,
                                color=col,
                                thickness=6)
            image = cv2.ellipse(image,
                                tuple(b.x3), (radius, int(radius / Y_WEIGHT)),
                                angle=0,
                                startAngle=0,
                                endAngle=360,
                                color=col,
                                thickness=6)

        # image = cv2.putText(image, text=str(labels_boxes[idxx]), org=tuple(b.x0), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, color=(36, 255, 12), thickness=2)
        # image = cv2.putText(image, text=str(labels_boxes[idxx]), org=tuple(b.x1), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, color=(36, 255, 12), thickness=2)
        # image = cv2.putText(image, text=str(labels_boxes[idxx]), org=tuple(b.x2), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, color=(36, 255, 12), thickness=2)
        # image = cv2.putText(image, text=str(labels_boxes[idxx]), org=tuple(b.x3), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, color=(36, 255, 12), thickness=2)
        # image = cv2.putText(image, text=str(count), org=tuple([b.x0[0]+offset_x, b.x0[1]+offset_y]), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, color=(0, 0, 0), thickness=2)

        # count += 1
        # image = cv2.putText(image, text=str(count), org=tuple([b.x1[0]+offset_x, b.x1[1]+offset_y]), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6,color=(0, 0, 0), thickness=2)
        # count += 1
        # image = cv2.putText(image, text=str(count), org=tuple([b.x2[0]+offset_x, b.x2[1]+offset_y]), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6,color=(0, 0, 0), thickness=2)
        # count += 1
        # image = cv2.putText(image, text=str(count), org=tuple([b.x3[0]+offset_x, b.x3[1]+offset_y]), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6,color=(0, 0, 0), thickness=2)
        # count += 1

    for id_1, box_1 in enumerate(boxes):
        idx = labels_boxes[id_1]
        col = colors[idx + 1]
        pts = np.array([box_1.x0, box_1.x1, box_1.x2, box_1.x3])
        pts = pts.reshape((-1, 1, 2))
        image = cv2.polylines(image, [pts], True, color=col, thickness=7)

        image = cv2.drawMarker(image,
                               tuple(box_1.x0),
                               color=red,
                               thickness=9,
                               markerType=cv2.MARKER_CROSS)
        image = cv2.drawMarker(image,
                               tuple(box_1.x1),
                               color=red,
                               thickness=9,
                               markerType=cv2.MARKER_CROSS)
        image = cv2.drawMarker(image,
                               tuple(box_1.x2),
                               color=red,
                               thickness=9,
                               markerType=cv2.MARKER_CROSS)
        image = cv2.drawMarker(image,
                               tuple(box_1.x3),
                               color=red,
                               thickness=9,
                               markerType=cv2.MARKER_CROSS)

    cv2.imwrite(CLUSTERED_PATH + image_name, image)
예제 #16
0
파일: main.py 프로젝트: zooliet/UWTracking
                    if sub_tracker:
                        sub_boundingbox, sub_loc = sub_tracker.update(frame)
                        sub_boundingbox = list(map(int, sub_boundingbox))

                    # 이탈 정도(0.25), motion_tracker.interval, waitKey(x) 조정 필요
                    if kcf_tracker.peak_value < 0.1: # 0.25:
                        print('[KCF] Disabled: peak value({:.02f}) is too low'.format(kcf_tracker.peak_value))
                        kcf_tracker.enable = False
                        zoom.zoom_to(1)

                    else:
                        kcf_tracker.update_location(boundingbox)
                        # str = "{}x{}({}x{})".format(kcf_tracker.mean_width, kcf_tracker.mean_height, selected_width, selected_height)
                        # util.draw_str(frame_draw, (20, 20), str)
                        cv2.rectangle(frame_draw,(kcf_tracker.x1,kcf_tracker.y1), (kcf_tracker.x2,kcf_tracker.y2), (0,255,0), 1)
                        cv2.drawMarker(frame_draw, tuple(kcf_tracker.center), (0,255,0))
                        request_to_track_flag = True

                        # if something happened...
                        # if test_flag is True:
                        #     test_flag = False
                        if sub_tracker:
                            sub_tracker.update_location(sub_boundingbox)
                            cv2.rectangle(frame_draw,(sub_tracker.x1,sub_tracker.y1), (sub_tracker.x2,sub_tracker.y2), (255,255,0), 1)
                            cv2.drawMarker(frame_draw, tuple(sub_tracker.center), (255,255,0))
                            if (kcf_tracker.mean_area > sub_tracker.mean_area / 0.49) or (kcf_tracker.mean_area < sub_tracker.mean_area / 0.81):
                                resized_window = util.resize_selection({'x1': sub_tracker.x1, 'y1': sub_tracker.y1, 'x2': sub_tracker.x2, 'y2': sub_tracker.y2}, 1/0.8)
                                kcf_tracker.x1 = resized_window['x1']
                                kcf_tracker.y1 = resized_window['y1']
                                kcf_tracker.x2 = resized_window['x2']
                                kcf_tracker.y2 = resized_window['y2']
def draw_eyes(img_bgr,
              eyes_landmarks,
              radius,
              offsets_list,
              is_new=True,
              is_show=False,
              save_name=None):
    """
    绘制图像
    """
    import cv2
    import copy
    import numpy as np

    if is_new:
        img_bgr = copy.deepcopy(img_bgr)

    th = 1
    eye_upscale = 1
    img_bgr = cv2.resize(img_bgr, (0, 0), fx=eye_upscale, fy=eye_upscale)

    for el, r, offsets in zip(eyes_landmarks, radius, offsets_list):
        start_x, start_y, offset_scale = offsets
        real_el = el * offset_scale + [start_x, start_y]
        real_radius = r * offset_scale

        # 眼睛
        cv2.polylines(
            img_bgr,
            [
                np.round(eye_upscale * real_el[0:8]).astype(np.int32).reshape(
                    -1, 1, 2)
            ],
            isClosed=True,
            color=(255, 255, 0),
            thickness=th,
            lineType=cv2.LINE_AA,
        )

        # 眼球
        cv2.polylines(
            img_bgr,
            [
                np.round(eye_upscale * real_el[8:16]).astype(np.int32).reshape(
                    -1, 1, 2)
            ],
            isClosed=True,
            color=(0, 255, 255),
            thickness=th,
            lineType=cv2.LINE_AA,
        )

        iris_center = real_el[16]
        eye_center = real_el[17]

        eye_center = (real_el[0] + real_el[4]) / 2

        # 虹膜中心
        cv2.drawMarker(
            img_bgr,
            tuple(np.round(eye_upscale * iris_center).astype(np.int32)),
            color=(255, 0, 255),
            markerType=cv2.MARKER_CROSS,
            markerSize=4,
            thickness=th + 1,
            line_type=cv2.LINE_AA,
        )

        # 眼睑中心
        cv2.drawMarker(
            img_bgr,
            tuple(np.round(eye_upscale * eye_center).astype(np.int32)),
            color=(0, 0, 255),
            markerType=cv2.MARKER_CROSS,
            markerSize=4,
            thickness=th + 1,
            line_type=cv2.LINE_AA,
        )

        cv2.circle(img_bgr,
                   center=tuple(eye_center),
                   radius=real_radius,
                   color=(0, 0, 255))

        if is_show:
            show_img_bgr(img_bgr, save_name=save_name)  # 显示眼睛

    return img_bgr
예제 #18
0
import cv2
import numpy as np

img = cv2.imread("lenna_full.jpg")

center = (img.shape[1] // 2, img.shape[0] // 2)
img = cv2.drawMarker(img, center, color=(255, 0, 255))

cv2.imshow("lenna_full.jpg", img)
cv2.waitKey(0)

cv2.destroyAllWindows()
예제 #19
0
    def _monitor(self):

        tic = time.time()

        gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

        self._updateThreshold()
        _, thresholded = cv2.threshold(gray, self.THRESH, 255,
                                       cv2.THRESH_TOZERO)

        # _, contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_NONE)

        contours = contours[:2]

        # if contours.__len__() > 2:
        #     QMessageBox.warning(self, "Thresholding error", "More than 2 projections were found. " + \
        #         "Please increase threshold manually or select a better noise area.")

        cv2.drawContours(self.draw_only_frame, contours, -1, (0, 255, 0), 2)
        self._draw_noiseArea()

        try:
            moments_star_1 = cv2.moments(contours[0])
            moments_star_2 = cv2.moments(contours[1])

        except IndexError:
            print("Only {} were found ! (Must be at least 2)".format(
                len(contours)))

        else:

            try:
                cX_star1 = int(moments_star_1["m10"] / moments_star_1["m00"])
                cY_star1 = int(moments_star_1["m01"] / moments_star_1["m00"])

                cX_star2 = int(moments_star_2["m10"] / moments_star_2["m00"])
                cY_star2 = int(moments_star_2["m01"] / moments_star_2["m00"])

            except ZeroDivisionError:
                return

            if self.enable_seeing.isChecked():
                delta_x = abs(cX_star2 - cX_star1)
                delta_y = abs(cY_star2 - cY_star1)

                self.arr_delta_x.append(delta_x)
                self.arr_delta_y.append(delta_y)

                # self._calcSeeing()
                self._calcSeeing_arcsec()

            cv2.drawMarker(self.draw_only_frame, (cX_star1, cY_star1),
                           color=(0, 0, 255),
                           markerSize=30,
                           thickness=1)
            cv2.drawMarker(self.draw_only_frame, (cX_star2, cY_star2),
                           color=(0, 0, 255),
                           markerSize=30,
                           thickness=1)

        finally:

            self._displayImage()

            threading.Thread(target=self._writeVideoFile, args=(),
                             daemon=True).start()

        toc = time.time()
        elapsed = toc - tic
        try:
            print("FPS max = {}".format(int(1.0 / elapsed)))
        except ZeroDivisionError:
            pass
예제 #20
0
        focal = 1.0
        E, mask = cv2.findEssentialMat(coords2, coords1, focal=1.0, pp=(0., 0.), method=cv2.RANSAC, prob=0.999, threshold=1.0)
        # R, t = cv2.recoverPose(E, pcoords2, coords1, focal = 1.0, pp = (0.,0.), mask)
        points, R, t, mask = cv2.recoverPose(E, coords2, coords1)

        t_p = np.dot(t.T, R_pos)
        t_pos = t_pos + t_p.T
        R_pos = np.dot(R, R_pos)

        # print(R_pos)
        # print(t_pos)
        # print("===========================")

        # drawing matched points
        for marker in coords1[:maxMatches]:
            img4 = cv2.drawMarker(img3, tuple(int(i) for i in marker), color=(0, 255, 255))

        th_x = math.asin(R[2][1])
        th_y = math.atan(-R[2][0]/R[2][2])
        th_z = math.atan(-R[0][1]/R[1][1])


        print("\r{0} {1} {2}".format(th_x, th_y, th_z), end="")


        theta = [0.0, th_z]

        r = [0.0, 0.5]

        plt.polar(theta, r)
예제 #21
0
def DebugMatches(filename,
                 width,
                 height,
                 img1,
                 points1,
                 H1,
                 img2,
                 points2,
                 H2,
                 TargetWidth=1024):
    def transformPoints(points, H):
        return cv2.perspectiveTransform(numpy.float32([points]), H)[0]

    def Scale(sx, sy):
        return numpy.array([[float(sx), 0.0, 0.0], [0.0, float(sy), 0.0],
                            [0.0, 0.0, 1.0]])

    def Translate(tx, ty):
        return numpy.array([[1.0, 0.0, float(tx)], [0.0, 1.0,
                                                    float(ty)],
                            [0.0, 0.0, 1.0]])

    bounds1 = transformPoints([(0, 0), (width, 0), (width, height),
                               (0, height)], H1)
    bounds2 = transformPoints([(0, 0), (width, 0), (width, height),
                               (0, height)], H2)

    bounds = [*bounds1, *bounds2]

    border = 10
    x1 = int(min([x for x, y in bounds])) - border
    x2 = int(max([x for x, y in bounds])) + border
    y1 = int(min([y for x, y in bounds])) - border
    y2 = int(max([y for x, y in bounds])) + border

    W = x2 - x1
    H = y2 - y1
    vs = float(TargetWidth) / float(W)
    W = int(W * vs)
    H = int(H * vs)

    H1 = numpy.matmul(numpy.matmul(Scale(vs, vs), Translate(-x1, -y1)), H1)
    H2 = numpy.matmul(numpy.matmul(Scale(vs, vs), Translate(-x1, -y1)), H2)

    # could be that img1 and img2 are at a lower resolution
    Timg1 = numpy.matmul(
        H1, Scale(width / float(img1.shape[1]), height / float(img1.shape[0])))
    Timg2 = numpy.matmul(
        H2, Scale(width / float(img2.shape[1]), height / float(img2.shape[0])))
    # , cv2.INTER_LINEAR, cv2.BORDER_TRANSPARENT, (0, 0, 0, 0))
    blended1 = cv2.warpPerspective(img1, Timg1, (W, H))
    # , cv2.INTER_LINEAR, cv2.BORDER_TRANSPARENT, (0, 0, 0, 0))
    blended2 = cv2.warpPerspective(img2, Timg2, (W, H))

    img = numpy.zeros((H, W, 4), dtype='uint8')
    img[:, :, 3].fill(255)
    img[:, :, 0] = blended1
    img[:, :, 1] = blended2

    red = (0, 0, 255, 100)
    green = (0, 255, 0, 100)
    blue = (0, 0, 255, 100)
    points1 = transformPoints(points1, H1)
    points2 = transformPoints(points2, H2)
    for point1, point2 in zip(points1, points2):
        cv2.drawMarker(img, tuple(point1), red, cv2.MARKER_CROSS, 10, 2)
        cv2.drawMarker(img, tuple(point2), green, cv2.MARKER_CROSS, 10, 2)
        cv2.line(img, tuple(point1), tuple(point2), red, 3)
        cv2.line(img, tuple(point1), tuple(point2), green, 3)

    borders1 = transformPoints([(0, 0), (width, 0), (width, height),
                                (0, height)], H1)
    borders2 = transformPoints([(0, 0), (width, 0), (width, height),
                                (0, height)], H2)
    for I in range(4):
        cv2.line(img, tuple(borders1[I]), tuple(borders1[(I + 1) % 4]), red, 2)
        cv2.line(img, tuple(borders2[I]), tuple(borders2[(I + 1) % 4]), green,
                 2)

    img = cv2.flip(img, 0)
    SaveImage(filename, img)
def particle_tracker(v, file_name):
    # Open output file

    output_name = sys.argv[3] + file_name
    output = open(output_name,"w")

    frameCounter = 0
    stepsize= 1
    # read first frame
    ret, frame = v.read()

    if ret == False:
        return

    # detect face in first frame
    c,r,w,h = detect_one_face(frame)

    # Write track point for first frame

    pt_x, pt_y=c + w/2.0,r + h/2.0
    #channged
    output.write("%d,%d,%d\n" % (frameCounter,pt_x,pt_y)) # Write as 0,pt_x,pt_y
    frameCounter = frameCounter + 1

    # set the initial tracking window
    track_window = (c,r,w,h)

    # initialize the tracker
    # e.g. kf = cv2.KalmanFilter(4,2,0)
    # or: particles = np.ones((n_particles, 2), int) * initial_pos

    n_particles = 450

    roi_hist = hsv_histogram_for_window(frame, (c,r,w,h)) # this is provided for you
    init_pos = np.array([c + w / 2.0, r + h / 2.0], int)  # Initial position
    particles = np.ones((n_particles, 2), int) * init_pos  # Init particles to init position

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    prob = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

    # print(init_pos.shape)
    # print(particles.shape)
    f0 = particleevaluator(prob, particles.T) * np.ones(n_particles)  # Evaluate appearance model
    weights = np.ones(n_particles) / n_particles  # weights are uniform (at first)
    # print(np.average(particles,axis=0))
    pos=init_pos

    while(1):
        stepsize=30
        ret ,frame = v.read() # read another frame
        if ret == False:
            break

        # if you track particles - take the weighted average
        # the Kalman filter already has the tracking point in the state vector

        # hist_bp: obtain using cv2.calcBackProject and the HSV histogram
        # c,r,w,h: obtain using detect_one_face()
        # Particle motion model: uniform step (TODO: find a better motion model)

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        prob = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        #moving in the direction of particle with maximum weight
        # x=pos[0]+np.random.uniform(-5, 5)
        # y=pos[1]+np.random.uniform(-5, 5)
        # init_pos=pos
        np.add(particles, np.random.uniform(-stepsize,stepsize, particles.shape), out=particles, casting="unsafe")

        # print(particles.shape)
        # print(np.argmax())
        # Clip out-of-bounds particles,determine the width and height and clip to this rnge
        particles = particles.clip(np.zeros(2), np.array((frame.shape[1], frame.shape[0])) - 1).astype(int)

        f = particleevaluator(prob, particles.T)  # Evaluate particles
        weights = np.float32(f.clip(1))  # Weight ~ histogram response
        weights /= np.sum(weights)  # Normalize w
        pos = np.sum(particles.T * weights, axis=1).astype(int)  # expected position: weighted average

        # print(f-f.clip(1))

        if 1. / np.sum(weights ** 2) < n_particles / 2:  # If particle cloud degenerate:
            # print('resampled')
            particles = particles[resample(weights), :]  # Resample particles according to weights
        # resample() function is provided for you

        img2 = cv2.drawMarker(frame,(pos[0],pos[1]),(0,255,0),markerType=1,markerSize=10)
        cv2.imshow('img',img2)
        cv2.waitKey(60)

        # plt.imshow(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
        # # plt.scatter(particles[1],particles[0], c='b', s=5)
        # plt.pause(0.05)

        # write the result to the output file
        output.write("%d,%d,%d\n" % (frameCounter,pos[0],pos[1])) # Write as frame_index,pt_x,pt_y
        frameCounter = frameCounter + 1

    output.close()
예제 #23
0
def learn_image_maker(r1x, r1y, r1_theta, r2x, r2y, m11, m12, m21, m22, m31,
                      m32, m41, m42, mc1, mc2, mc3, mc4, rm1, rm2, rm3, erm1,
                      erm2, erm3):  #r1 =robot r2 = enemy
    #def learn_image_maker():
    map = map_init()

    #point state
    if m11 == 1:
        map = cv2.line(map, (125, 36), (111, 50), 170, thickness=2)
    elif m11 == -1:
        map = cv2.line(map, (125, 36), (111, 50), 85, thickness=2)

    if m12 == 1:
        map = cv2.line(map, (139, 50), (125, 64), 170, thickness=2)
    elif m12 == -1:
        map = cv2.line(map, (139, 50), (125, 64), 85, thickness=2)

    if m21 == 1:
        map = cv2.line(map, (200, 111), (186, 125), 170, thickness=2)
    elif m21 == -1:
        map = cv2.line(map, (200, 111), (186, 125), 85, thickness=2)
    if m22 == 1:
        map = cv2.line(map, (214, 125), (200, 139), 170, thickness=2)
    elif m22 == -1:
        map = cv2.line(map, (214, 125), (200, 139), 85, thickness=2)

    if m31 == 1:
        map = cv2.line(map, (125, 186), (111, 200), 170, thickness=2)
    elif m31 == -1:
        map = cv2.line(map, (125, 186), (111, 200), 85, thickness=2)
    if m32 == 1:
        map = cv2.line(map, (139, 200), (125, 214), 170, thickness=2)
    elif m32 == -1:
        map = cv2.line(map, (139, 200), (125, 214), 85, thickness=2)

    if m41 == 1:
        map = cv2.line(map, (50, 111), (36, 125), 170, thickness=2)
    elif m41 == -1:
        map = cv2.line(map, (50, 111), (36, 125), 85, thickness=2)
    if m42 == 1:
        map = cv2.line(map, (64, 125), (50, 139), 170, thickness=2)
    elif m42 == -1:
        map = cv2.line(map, (64, 125), (50, 139), 85, thickness=2)

    if mc1 == 1:
        map = cv2.line(map, (125, 100), (100, 125), 170, thickness=2)
    elif mc1 == -1:
        map = cv2.line(map, (125, 100), (100, 125), 85, thickness=2)
    if mc2 == 1:
        map = cv2.line(map, (125, 100), (150, 125), 170, thickness=2)
    elif mc2 == -1:
        map = cv2.line(map, (125, 100), (150, 125), 85, thickness=2)
    if mc3 == 1:
        map = cv2.line(map, (150, 125), (125, 150), 170, thickness=2)
    elif mc3 == -1:
        map = cv2.line(map, (150, 125), (125, 150), 85, thickness=2)
    if mc4 == 1:
        map = cv2.line(map, (125, 150), (100, 125), 170, thickness=2)
    elif mc4 == -1:
        map = cv2.line(map, (125, 150), (100, 125), 85, thickness=2)

    #robot_state
    #robot1
    pts = np.array(
        [[r1x, r1y - 13], [r1x + 13, r1y], [r1x, r1y + 13], [r1x - 13, r1y]],
        np.int32)
    pts = pts.reshape((-1, 1, 2))
    map = cv2.fillPoly(map, [pts], 170)
    #robot rotation
    if r1_theta == 1:
        map = cv2.drawMarker(map, (r1x - 7, r1y - 7),
                             255,
                             markerType=cv2.MARKER_TILTED_CROSS,
                             thickness=1)
        #robot marker
        if rm1 == 1:
            map = cv2.line(map, (r1x, r1y - 13), (r1x + 13, r1y),
                           85,
                           thickness=2)
        if rm2 == 1:
            map = cv2.line(map, (r1x + 13, r1y), (r1x, r1y + 13),
                           85,
                           thickness=2)
        if rm3 == 1:
            map = cv2.line(map, (r1x, r1y + 13), (r1x - 13, r1y),
                           85,
                           thickness=2)

    elif r1_theta == 2:
        map = cv2.drawMarker(map, (r1x + 7, r1y - 7),
                             255,
                             markerType=cv2.MARKER_TILTED_CROSS,
                             thickness=1)
        #robot marker
        if rm1 == 1:
            map = cv2.line(map, (r1x + 13, r1y), (r1x, r1y + 13),
                           85,
                           thickness=2)
        if rm2 == 1:
            map = cv2.line(map, (r1x, r1y + 13), (r1x - 13, r1y),
                           85,
                           thickness=2)
        if rm3 == 1:
            map = cv2.line(map, (r1x, r1y - 13), (r1x - 13, r1y),
                           85,
                           thickness=2)

    elif r1_theta == 3:
        map = cv2.drawMarker(map, (r1x + 7, r1y + 7),
                             255,
                             markerType=cv2.MARKER_TILTED_CROSS,
                             thickness=1)
        #robot marker
        if rm1 == 1:
            map = cv2.line(map, (r1x, r1y + 13), (r1x - 13, r1y),
                           85,
                           thickness=2)
        if rm2 == 1:
            map = cv2.line(map, (r1x, r1y - 13), (r1x - 13, r1y),
                           85,
                           thickness=2)
        if rm3 == 1:
            map = cv2.line(map, (r1x + 13, r1y), (r1x, r1y - 13),
                           85,
                           thickness=2)

    elif r1_theta == 4:
        map = cv2.drawMarker(map, (r1x - 7, r1y + 7),
                             255,
                             markerType=cv2.MARKER_TILTED_CROSS,
                             thickness=1)
        #robot marker
        if rm1 == 1:
            map = cv2.line(map, (r1x, r1y - 13), (r1x - 13, r1y),
                           85,
                           thickness=2)
        if rm2 == 1:
            map = cv2.line(map, (r1x + 13, r1y), (r1x, r1y - 13),
                           85,
                           thickness=2)
        if rm3 == 1:
            map = cv2.line(map, (r1x, r1y + 13), (r1x + 13, r1y),
                           85,
                           thickness=2)

    #robot2
    pts = np.array(
        [[r2x, r2y - 13], [r2x + 13, r2y], [r2x, r2y + 13], [r2x - 13, r2y]],
        np.int32)
    pts = pts.reshape((-1, 1, 2))
    map = cv2.fillPoly(map, [pts], 85)
    map = cv2.drawMarker(map, (r2x + 7, r2y + 7),
                         255,
                         markerType=cv2.MARKER_TILTED_CROSS,
                         thickness=1)
    #robot marker
    if erm1 == 1:
        map = cv2.line(map, (r2x, r2y + 13), (r2x - 13, r2y), 170, thickness=2)
    if erm2 == 1:
        map = cv2.line(map, (r2x, r2y - 13), (r2x - 13, r2y), 170, thickness=2)
    if erm3 == 1:
        map = cv2.line(map, (r2x + 13, r2y), (r2x, r2y - 13), 170, thickness=2)

    #cv2.imwrite('test.png',map)
    return map
def kf_tracker(v, file_name):
    # Open output file

    output_name = sys.argv[3] + file_name
    output = open(output_name,"w")

    frameCounter = 0
    stepsize= 1
    # read first frame
    ret, frame = v.read()

    if ret == False:
        return

    # detect face in first frame
    c,r,w,h = detect_one_face(frame)

    # Write track point for first frame

    pt_x, pt_y=c + w/2.0,r + h/2.0
    output.write("%d,%d,%d\n" % (frameCounter,pt_x,pt_y)) # Write as 0,pt_x,pt_y
    frameCounter = frameCounter + 1

    state = np.array([c + w / 2, r + h / 2, 0, 0], dtype=np.float32)  # initial position
    kalman = cv2.KalmanFilter(4, 2)
    kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
    kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
    kalman.processNoiseCov = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32) * 0.03
    kalman.measurementNoiseCov = np.array([[1, 0], [0, 1]], np.float32) * 0.00003
    kalman.errorCovPost = 1e-1 * np.eye(4, 4,dtype=np.float32)
    kalman.statePost = state
    # print(kalman.measurementMatrix)

    while(1):
        ret, frame = v.read() # read another frame
        if ret == False:
            break
        c, r, w, h = detect_one_face(frame)

        # if you track particles - take the weighted average
        # the Kalman filter already has the tracking point in the state vector

        # hist_bp: obtain using cv2.calcBackProject and the HSV histogram
        # c,r,w,h: obtain using detect_one_face()
        # Particle motion model: uniform step (TODO: find a better motion model)

        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # prob = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
        # retval,track_window=cv2.CamShift(prob,track_window,term_crit)
        # c, r, w, h = track_window

        prediction = kalman.predict()
        pos=prediction[:2]

        # if not((c==0)and(r==0)and(w==0)and(h==0)):  # e.g. face found
        if (any([c,r,w,h])>0):  # e.g. face found
            posterior = kalman.correct(np.array([[np.float32(c+w/2)],[np.float32(r+h/2)]]))
            # print(posterior[:2])
            pos = posterior[:2]
        else:
            pass
            # print('entered kalman',frameCounter)

        # use prediction or posterior as your tracking result

        img2 = cv2.drawMarker(frame,(pos[0],pos[1]),(0,255,0),markerType=1,markerSize=10)
        cv2.imshow('img',img2)
        cv2.waitKey(60)

        # write the result to the output file
        output.write("%d,%d,%d\n" % (frameCounter,pos[0],pos[1])) # Write as frame_index,pt_x,pt_y
        frameCounter = frameCounter + 1

    output.close()
예제 #25
0
 def draw_point(self, img, front_pt, back_pt):
     img = cv2.circle(img, front_pt, 8, (125, 0, 125), 2, 0)
     img = cv2.drawMarker(img, back_pt, color=(255, 255, 0), markerType=1)
     img = cv2.line(img, front_pt, back_pt, (100, 100, 0), 2)
     img = cv2.circle(img, front_pt, 8, (255, 255, 0), 2, 0)
     return img
예제 #26
0
    def annotate(self):
        """Label all kinds of things in the image.
        """
        if self.search_point is not None:
            p1, p2 = self.get_search_window()

            cv2.rectangle(self.img, p1, p2, color=(255, 255, 255), thickness=1)
        else:
            cv2.rectangle(self.img, (0, 0), (self.width - 1, self.height - 1),
                          color=(255, 255, 255),
                          thickness=1)

        # draw all detected contours to see masking issues
        if DRAW_MINOR_CONTOURS and self.contours is not None:
            cv2.drawContours(self.img, self.contours, -1, (150, 150, 0),
                             THICKNESS_MINOR_CONTOUR)

        # draw largest contour and contour label
        if DRAW_MAJOR_CONTOURS and self.largest_contour is not None:
            cv2.drawContours(self.img, [self.largest_contour], 0, (0, 0, 255),
                             THICKNESS_MAJOR_CONTOUR)

        # Marker on centroid of largest contour
        if self.largest_contour is not None:
            cv2.drawMarker(img=self.img,
                           position=centroid(self.largest_contour),
                           color=(0, 255, 0))

        # Label nodes, highlight node closest to largest contour centroid
        for node_id, node in self.nodes.items():
            color = (255, 0, 0) if node_id == self.active_node else (255, 255,
                                                                     255)
            cv2.circle(self.img, (node['x'], node['y']), MIN_DIST_TO_NODE // 2,
                       color)

        # Draw the detection trail
        points = self.results
        if DRAW_TRAIL and len(points) > 1:
            for p_idx in range(len(points) - 1):
                try:
                    x1, y1 = map(int, points[p_idx])
                    x2, y2 = map(int, points[p_idx + 1])
                except (ValueError, TypeError):
                    pass
                else:
                    cv2.line(self.img, (x1, y1), (x2, y2),
                             color=(0, 255, 0),
                             thickness=2)

        # Draw the Kalman filter predictions trail
        cv2.drawMarker(self.img, position=self.last_kf_pos, color=(0, 0, 255))
        points = self.kf_results
        if DRAW_KF_TRAIL and len(points) > 1:
            for p_idx in range(len(points) - 1):
                try:
                    x1, y1 = map(int, points[p_idx])
                    x2, y2 = map(int, points[p_idx + 1])
                except (ValueError, TypeError):
                    pass
                else:
                    cv2.line(self.img, (x1, y1), (x2, y2),
                             color=(50, 50, 255),
                             thickness=1)
예제 #27
0
    def get_frame(self):
        font = cv2.FONT_HERSHEY_DUPLEX

        #動画のフレーム数をカウント
        f = 0
        self.flg = 0

        #図形の認識に用いる閾値
        # self.threshold_time = 180
        # self.threshold_new_block = 25
        # self.threshold_block_move = 3
        # self.threshold_area = 500

        # self.detect_red_cx = [num*5 for num in range(self.threshold_time)]
        # self.detect_red_cy = [num*5 for num in range(self.threshold_time)]
        # self.detect_blue_cx = [num*5 for num in range(self.threshold_time)]
        # self.detect_blue_cy = [num*5 for num in range(self.threshold_time)]
        # self.detect_green_cx = [num*5 for num in range(self.threshold_time)]
        # self.detect_green_cy = [num*5 for num in range(self.threshold_time)]
        # self.detect_purple_cx = [num*5 for num in range(self.threshold_time)]
        # self.detect_purple_cy = [num*5 for num in range(self.threshold_time)]
        # self.detect_yellow_cx = [num*5 for num in range(self.threshold_time)]
        # self.detect_yellow_cy = [num*5 for num in range(self.threshold_time)]

        # # HSV色空間における"赤色"の値域を決定
        # self.lower_red1 = np.array([0, 65, 0])
        # self.upper_red1 = np.array([30, 255, 255])

        # self.lower_red2 = np.array([122, 65, 0])
        # self.upper_red2 = np.array([180, 255, 255])

        # # HSV色空間における"青色"の値域を決定
        # self.lower_blue = np.array([93, 90, 0])
        # self.upper_blue = np.array([114, 255, 164])
        # # HSV色空間における"緑色"の値域を決定
        # _green = np.array([62, 90, 0])
        # self.upper_green = np.array([91, 255, 164])
        # # HSV色空間における"紫色"の値域を決定
        # self.lower_purple = np.array([150, 70, 150])
        # self.upper_purple = np.array([160, 176, 255])
        # # HSV色空間における"黄色"の値域を決定
        # self.lower_yellow = np.array([30, 120, 120])
        # self.upper_yellow = np.array([60, 255, 255])

        # self.flg = 0
        f += 1
        #ーーーーーーーーーーーーーーーーーーーーーーーーーーself.videoのカメラ映像取得ーーーーーーーーーーーーーーーーーーーーーーーーーーーーーーーー
        ret, self.frame = self.video.read()

        # resize the window
        height = self.frame.shape[0]
        width = self.frame.shape[1]
        self.frame = cv2.resize(self.frame,
                                (int(width * 0.5), int(height * 0.5)))

        #     windowsize = (300, 300)
        #     self.frame = cv2.resize(self.frame, windowsize)
        self.frame = cv2.flip(self.frame, 1)
        #---------------------------------------------------------webカメラからの映像------------------------------------------------
        self.frame_src = self.frame.copy()

        hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

        # ウェブカメラからの"赤色"の検出----------------------------------------------------
        mask_red1 = cv2.inRange(hsv, self.lower_red1, self.upper_red1)

        mask_red2 = cv2.inRange(hsv, self.lower_red2, self.upper_red2)

        mask_red = mask_red1 + mask_red2

        # マスク画像をブロブ解析
        # 2値画像のラベリング処理
        label = cv2.connectedComponentsWithStats(mask_red)

        # ブロブ情報を項目別に抽出
        n = label[0] - 1
        #label[2]は各図形のバウンティンぐボックスとオブジェクトのサイズを保存
        data = np.delete(label[2], 0, 0)

        center = np.delete(label[3], 0, 0)

        # 検出した図形の面積から一定の大きさのものだけ抽出
        data2 = np.zeros((0, 5), dtype="int32")
        center2 = np.zeros((0, 2), dtype="float64")

        for i in range(n):
            if data[i, 4] > self.threshold_area:
                data2 = np.r_[data2, data[i:i + 1, :]]
                center2 = np.r_[center2, center[i:i + 1, :]]

        if data2.shape[0] != 0:
            for i in range(data2.shape[0]):

                # ブロブ面積最大のインデックス
                #max_index = np.argmax(data[:, 4])

                detected_blob = {}

                # 各ブロブの各種情報を取得
                detected_blob["upper_left"] = (data2[:, 0][i], data2[:, 1][i]
                                               )  # 左上座標
                detected_blob["width"] = data2[:, 2][i]  # 幅
                detected_blob["height"] = data2[:, 3][i]  # 高さ
                detected_blob["area"] = data2[:, 4][i]  # 面積
                detected_blob["center"] = center2[i]  # 中心座標

                # ブロブの中心座標を取得
                center_red_x = int(detected_blob["center"][0])
                center_red_y = int(detected_blob["center"][1])

                cv2.putText(self.frame, "bar", (center_red_x, center_red_y),
                            font, 1, (0), 2)
                cv2.drawMarker(self.frame, (center_red_x, center_red_y),
                               (0, 0, 0),
                               markerSize=10,
                               thickness=10)

                #過去に同じ座標で認識しているとき
                for k in range(len(list_shape_cx)):
                    if (abs(list_shape_cx[k] - center_red_x) <
                            self.threshold_new_block) and (
                                abs(list_shape_cy[k] - center_red_y) <
                                self.threshold_new_block):
                        #print('already detected red'+str(center_red_x)+', '+str(center_red_y))
                        break
                    else:
                        continue
                else:
                    self.detect_red_cx.append(center_red_x)
                    self.detect_red_cy.append(center_red_y)
                    for i_c in range(self.threshold_time):
                        #print("i_c:",i_c)
                        #print("self.detect_red_cx[-1]:",self.detect_red_cx[-1])
                        if (abs(self.detect_red_cx[-1] -
                                self.detect_red_cx[-(2 + i_c)]) <
                                self.threshold_block_move):
                            if (abs(self.detect_red_cy[-1] -
                                    self.detect_red_cy[-(2 + i_c)])
                                ) < self.threshold_block_move:
                                if i_c == (self.threshold_time - 1):
                                    #print("bar")
                                    list_shape.append(0)
                                    list_shape_cx.append(center_red_x)
                                    list_shape_cy.append(center_red_y)
                                    #cv2.imwrite(path+"/new_object_"+str(f)+".jpg", self.frame)
                                    self.flg = 1
                                else:
                                    continue
                            else:
                                break
                        else:
                            break
                    else:
                        break
                continue

        # ウェブカメラからの"青色"の検出----------------------------------------------------
        mask_blue = cv2.inRange(hsv, self.lower_blue, self.upper_blue)

        # マスク画像を
        # 2値画像のラベリング処理
        label = cv2.connectedComponentsWithStats(mask_blue)

        # ブロブ情報を項目別に抽出
        n = label[0] - 1
        #label[2]は各図形のバウンティンぐボックスとオブジェクトのサイズを保存
        data = np.delete(label[2], 0, 0)

        center = np.delete(label[3], 0, 0)

        # 検出した図形の面積から一定の大きさのものだけ抽出
        data2 = np.zeros((0, 5), dtype="int32")
        center2 = np.zeros((0, 2), dtype="float64")

        for i in range(n):
            if data[i, 4] > self.threshold_area:
                data2 = np.r_[data2, data[i:i + 1, :]]
                center2 = np.r_[center2, center[i:i + 1, :]]

        if data2.shape[0] != 0:
            for i in range(data2.shape[0]):

                # ブロブ面積最大のインデックス
                #max_index = np.argmax(data[:, 4])

                detected_blob = {}

                # 各ブロブの各種情報を取得
                detected_blob["upper_left"] = (data2[:, 0][i], data2[:, 1][i]
                                               )  # 左上座標
                detected_blob["width"] = data2[:, 2][i]  # 幅
                detected_blob["height"] = data2[:, 3][i]  # 高さ
                detected_blob["area"] = data2[:, 4][i]  # 面積
                detected_blob["center"] = center2[i]  # 中心座標

                # ブロブの中心座標を取得
                center_blue_x = int(detected_blob["center"][0])
                center_blue_y = int(detected_blob["center"][1])

                cv2.putText(self.frame, "box", (center_blue_x, center_blue_y),
                            font, 1, (0), 2)
                cv2.drawMarker(self.frame, (center_blue_x, center_blue_y),
                               (0, 0, 0),
                               markerSize=10,
                               thickness=10)

                #過去に同じ座標で認識しているとき
                for k in range(len(list_shape_cx)):
                    if (abs(list_shape_cx[k] - center_blue_x) <
                            self.threshold_new_block) and (
                                abs(list_shape_cy[k] - center_blue_y) <
                                self.threshold_new_block):
                        #print('already detected')
                        break
                    else:
                        continue
                else:
                    self.detect_blue_cx.append(center_blue_x)
                    self.detect_blue_cy.append(center_blue_y)
                    for i_c in range(self.threshold_time):
                        #print("i_c",i_c)
                        #print("self.detect_blue_cx[-1]:",self.detect_blue_cx[-1])
                        if (abs(self.detect_blue_cx[-1] -
                                self.detect_blue_cx[-(2 + i_c)]) <
                                self.threshold_block_move):
                            if (abs(self.detect_blue_cy[-1] -
                                    self.detect_blue_cy[-(2 + i_c)]) <
                                    self.threshold_block_move):
                                if i_c == (self.threshold_time - 1):
                                    #print("box")
                                    list_shape.append(5)
                                    list_shape_cx.append(center_blue_x)
                                    list_shape_cy.append(center_blue_y)
                                    #cv2.imwrite(path+"/new_object_"+str(f)+".jpg", self.frame)
                                    self.flg = 1
                                else:
                                    continue
                            else:
                                break
                        else:
                            break
                    else:
                        break
                continue

        # ウェブカメラからの"緑色"の検出----------------------------------------------------
        mask_green = cv2.inRange(hsv, self.lower_green, self.upper_green)
        # マスク画像をブロブ解析
        # 2値画像のラベリング処理
        label = cv2.connectedComponentsWithStats(mask_green)

        # ブロブ情報を項目別に抽出
        n = label[0] - 1
        #label[2]は各図形のバウンティンぐボックスとオブジェクトのサイズを保存
        data = np.delete(label[2], 0, 0)

        center = np.delete(label[3], 0, 0)

        # 検出した図形の面積から一定の大きさのものだけ抽出
        data2 = np.zeros((0, 5), dtype="int32")
        center2 = np.zeros((0, 2), dtype="float64")

        for i in range(n):
            if data[i, 4] > self.threshold_area:
                data2 = np.r_[data2, data[i:i + 1, :]]
                center2 = np.r_[center2, center[i:i + 1, :]]

        if data2.shape[0] != 0:
            for i in range(data2.shape[0]):

                # ブロブ面積最大のインデックス
                #max_index = np.argmax(data[:, 4])

                detected_blob = {}

                # 各ブロブの各種情報を取得
                detected_blob["upper_left"] = (data2[:, 0][i], data2[:, 1][i]
                                               )  # 左上座標
                detected_blob["width"] = data2[:, 2][i]  # 幅
                detected_blob["height"] = data2[:, 3][i]  # 高さ
                detected_blob["area"] = data2[:, 4][i]  # 面積
                detected_blob["center"] = center2[i]  # 中心座標

                # ブロブの中心座標を取得
                center_green_x = int(detected_blob["center"][0])
                center_green_y = int(detected_blob["center"][1])

                cv2.putText(self.frame, "triangle",
                            (center_green_x, center_green_y), font, 1, (0), 2)
                cv2.drawMarker(self.frame, (center_green_x, center_green_y),
                               (0, 0, 0),
                               markerSize=10,
                               thickness=10)

                #過去に同じ座標で認識しているとき
                for k in range(len(list_shape_cx)):
                    if (abs(list_shape_cx[k] - center_green_x) <
                            self.threshold_new_block) and (
                                abs(list_shape_cy[k] - center_green_y) <
                                self.threshold_new_block):
                        #print('already detected green:'+str(center_green_x)+', '+str(center_green_y))
                        break
                    else:
                        continue
                else:
                    self.detect_green_cx.append(center_green_x)
                    self.detect_green_cy.append(center_green_y)
                    for i_c in range(self.threshold_time):
                        #print("i_c:",i_c)
                        #print("self.detect_green_cx[-1]:",self.detect_green_cx[-1])
                        if (abs(self.detect_green_cx[-1] -
                                self.detect_green_cx[-(2 + i_c)]) <
                                self.threshold_block_move):
                            if (abs(self.detect_green_cy[-1] -
                                    self.detect_green_cy[-(2 + i_c)]) <
                                    self.threshold_block_move):
                                if i_c == (self.threshold_time - 1):
                                    #print("triangle")
                                    list_shape.append(3)
                                    list_shape_cx.append(center_green_x)
                                    list_shape_cy.append(center_green_y)
                                    #cv2.imwrite(path+"/new_object_"+str(f)+".jpg", self.frame)
                                    self.flg = 1
                                else:
                                    continue
                            else:
                                break
                        else:
                            break
                    else:
                        break
                continue

        # ウェブカメラからの"紫色"の検出----------------------------------------------------
        mask_purple = cv2.inRange(hsv, self.lower_purple, self.upper_purple)

        # マスク画像をブロブ解析
        # 2値画像のラベリング処理
        label = cv2.connectedComponentsWithStats(mask_purple)

        # ブロブ情報を項目別に抽出
        n = label[0] - 1
        #label[2]は各図形のバウンティンぐボックスとオブジェクトのサイズを保存
        data = np.delete(label[2], 0, 0)

        center = np.delete(label[3], 0, 0)

        # 検出した図形の面積から一定の大きさのものだけ抽出
        data2 = np.zeros((0, 5), dtype="int32")
        center2 = np.zeros((0, 2), dtype="float64")

        for i in range(n):
            if data[i, 4] > self.threshold_area:
                data2 = np.r_[data2, data[i:i + 1, :]]
                center2 = np.r_[center2, center[i:i + 1, :]]

        if data2.shape[0] != 0:
            for i in range(data2.shape[0]):

                # ブロブ面積最大のインデックス
                #max_index = np.argmax(data[:, 4])

                detected_blob = {}

                # 各ブロブの各種情報を取得
                detected_blob["upper_left"] = (data2[:, 0][i], data2[:, 1][i]
                                               )  # 左上座標
                detected_blob["width"] = data2[:, 2][i]  # 幅
                detected_blob["height"] = data2[:, 3][i]  # 高さ
                detected_blob["area"] = data2[:, 4][i]  # 面積
                detected_blob["center"] = center2[i]  # 中心座標

                # ブロブの中心座標を取得
                center_purple_x = int(detected_blob["center"][0])
                center_purple_y = int(detected_blob["center"][1])

                cv2.putText(self.frame, "half-circle",
                            (center_purple_x, center_purple_y), font, 1, (0),
                            2)
                cv2.drawMarker(self.frame, (center_purple_x, center_purple_y),
                               (0, 0, 0),
                               markerSize=10,
                               thickness=10)

                #過去に同じ座標で認識しているとき
                for k in range(len(list_shape_cx)):
                    if (abs(list_shape_cx[k] - center_purple_x) <
                            self.threshold_new_block) and (
                                abs(list_shape_cy[k] - center_purple_y) <
                                self.threshold_new_block):
                        #print('already detected purple:'+str(center_purple_x)+', '+str(center_purple_y))
                        break
                    else:
                        continue
                else:
                    self.detect_purple_cx.append(center_purple_x)
                    self.detect_purple_cy.append(center_purple_y)
                    for i_c in range(self.threshold_time):
                        #print("i_c:",i_c)
                        #print("self.detect_purple_cx[-1]:",self.detect_purple_cx[-1])
                        if (abs(self.detect_purple_cx[-1] -
                                self.detect_purple_cx[-(2 + i_c)]) <
                                self.threshold_block_move):
                            if (abs(self.detect_purple_cy[-1] -
                                    self.detect_purple_cy[-(2 + i_c)]) <
                                    self.threshold_block_move):
                                if i_c == (self.threshold_time - 1):
                                    #print("half-circle")
                                    list_shape.append(3)
                                    list_shape_cx.append(center_purple_x)
                                    list_shape_cy.append(center_purple_y)
                                    #cv2.imwrite(path+"/new_object_"+str(f)+".jpg", self.frame)
                                    self.flg = 1
                                else:
                                    continue
                            else:
                                break
                        else:
                            break
                    else:
                        break
                continue

        # ウェブカメラからの"黄色"の検出----------------------------------------------------
        mask_yellow = cv2.inRange(hsv, self.lower_yellow, self.upper_yellow)

        # マスク画像をブロブ解析
        # 2値画像のラベリング処理
        label = cv2.connectedComponentsWithStats(mask_yellow)

        # ブロブ情報を項目別に抽出
        n = label[0] - 1
        #label[2]は各図形のバウンティンぐボックスとオブジェクトのサイズを保存
        data = np.delete(label[2], 0, 0)

        center = np.delete(label[3], 0, 0)

        # 検出した図形の面積から一定の大きさのものだけ抽出
        data2 = np.zeros((0, 5), dtype="int32")
        center2 = np.zeros((0, 2), dtype="float64")

        for i in range(n):
            if data[i, 4] > self.threshold_area:
                data2 = np.r_[data2, data[i:i + 1, :]]
                center2 = np.r_[center2, center[i:i + 1, :]]

        if data2.shape[0] != 0:
            for i in range(data2.shape[0]):

                # ブロブ面積最大のインデックス
                #max_index = np.argmax(data[:, 4])

                detected_blob = {}

                # 各ブロブの各種情報を取得
                detected_blob["upper_left"] = (data2[:, 0][i], data2[:, 1][i]
                                               )  # 左上座標
                detected_blob["width"] = data2[:, 2][i]  # 幅
                detected_blob["height"] = data2[:, 3][i]  # 高さ
                detected_blob["area"] = data2[:, 4][i]  # 面積
                detected_blob["center"] = center2[i]  # 中心座標

                # ブロブの中心座標を取得
                center_yellow_x = int(detected_blob["center"][0])
                center_yellow_y = int(detected_blob["center"][1])

                cv2.putText(self.frame, "square",
                            (center_yellow_x, center_yellow_y), font, 1, (0),
                            2)
                cv2.drawMarker(self.frame, (center_yellow_x, center_yellow_y),
                               (0, 0, 0),
                               markerSize=10,
                               thickness=10)

                #過去に同じ座標で認識しているとき
                for k in range(len(list_shape_cx)):
                    if (abs(list_shape_cx[k] - center_yellow_x) <
                            self.threshold_new_block) and (
                                abs(list_shape_cy[k] - center_yellow_y) <
                                self.threshold_new_block):
                        #print('already detected yellow:'+str(center_yellow_x)+', '+str(center_yellow_y))
                        break
                    else:
                        continue
                else:
                    self.detect_yellow_cx.append(center_yellow_x)
                    self.detect_yellow_cy.append(center_yellow_y)
                    for i_c in range(self.threshold_time):
                        #print("i_c:",i_c)
                        #print("self.detect_yellow_cx[-1]:",self.detect_yellow_cx[-1])
                        if (abs(self.detect_yellow_cx[-1] -
                                self.detect_yellow_cx[-(2 + i_c)]) <
                                self.threshold_block_move):
                            #print("a")
                            if (abs(self.detect_yellow_cy[-1] -
                                    self.detect_yellow_cy[-(2 + i_c)]) <
                                    self.threshold_block_move):
                                #print("b")
                                if i_c == (self.threshold_time - 1):
                                    #print("square")
                                    list_shape.append(4)
                                    list_shape_cx.append(center_yellow_x)
                                    list_shape_cy.append(center_yellow_y)
                                    #cv2.imwrite(path+"/new_object_"+str(f)+".jpg", self.frame)
                                    self.flg = 1
                                else:
                                    continue
                            else:
                                break
                        else:
                            break
                    else:
                        break
                continue

    #     print('self.frame:'+str(f))
    # cv2.imshow('mask_purple', mask_purple)
    # cv2.imshow('mask_red', mask_red)
    # cv2.imshow('mask_blue', mask_blue)
    # cv2.imshow('mask_green', mask_green)
    # cv2.imshow('mask_yellow', mask_yellow)
    # cv2.imshow('main',self.frame)
    # if cv2.waitKey(1) & 0xFF == ord('q'):
    #     break

    # capture.release()
    # cv2.destroyAllWindows()

    #htmlに送る用のコード
        ret, jpeg = cv2.imencode('.jpg', self.frame)
        if self.flg == 0:
            shape = 0
            vec = [0, 0]
        else:
            shape = list_shape[-1]
            vec = [list_shape_cx[-1], list_shape_cy[-1]]

        x = np.random.randint(100, 400)

        return jpeg.tobytes(), self.flg, shape, vec, x
예제 #28
0
def ImageProcessor(unprocessed_frames, processed_frames, proc_complete,
                   event_manager, n_calib):
    processing = False
    proc_complete.set()

    # preallocate memory for all arrays used in processing
    img_mean = np.zeros([h, w], dtype=np.float32)
    img_std = np.ones([h, w], dtype=np.float32)

    A = np.zeros([h, w], dtype=np.uint8)
    B = np.zeros([h, w], dtype=np.uint8)
    B_old = np.zeros([h, w], dtype=np.uint8)
    C = np.zeros([h, w], dtype=np.uint8)

    mean_1 = np.zeros([h, w], dtype=np.float32)
    mean_2 = np.zeros([h, w], dtype=np.float32)

    std_1 = np.zeros([h, w], dtype=np.float32)
    std_2 = np.zeros([h, w], dtype=np.float32)
    std_3 = np.zeros([h, w], dtype=np.float32)
    std_4 = np.zeros([h, w], dtype=np.float32)
    std_5 = np.zeros([h, w], dtype=np.float32)
    std_6 = np.zeros([h, w], dtype=np.float32)

    B_1_std = np.zeros([h, w], dtype=np.float32)
    B_1_mean = np.zeros([h, w], dtype=np.float32)
    B_greater = np.zeros([h, w], dtype=np.uint8)
    B_2_mean = np.zeros([h, w], dtype=np.float32)
    B_less = np.zeros([h, w], dtype=np.uint8)

    ########## FOR TESTING ##############
    img_array = []
    C_array = []
    # std_data = np.zeros([h,w],dtype=np.float32)
    # mean_data = np.zeros([h,w],dtype=np.float32)
    save_arr = False
    y_data = np.zeros((h, w))
    img_temp = np.zeros((3, h, w))
    C_temp = np.zeros((3, h, w))
    temp_img = np.zeros((h, w))
    ########## FOR TESTING ##############

    last_n_frame = 0

    p = c.LEARNING_RATE

    time_cumulative = 0

    total_time = 0
    total_frames = 0

    while True:
        time.sleep(1E-4)
        try:
            if event_manager.shutdown.is_set():
                proc_complete.set()
                return
            # get the frames from queue
            n_frame, n_frame_2, frame_buf = unprocessed_frames.get_nowait()

            # y_data is a numpy array hxw with 8 bit greyscale brightness values
            y_data = np.frombuffer(frame_buf, dtype=np.uint8,
                                   count=w * h).reshape(
                                       (h, w)).astype(np.float32)

            # every 3 frames update the background
            if n_frame_2 > (last_n_frame + c.BACKGROUND_RATE
                            ) and not event_manager.event_change.is_set():
                last_n_frame = n_frame_2

                # img_mean = (1 - p) * img_mean + p * y_data  # calculate mean
                np.multiply(1 - p, img_mean, out=mean_1)
                np.multiply(p, y_data, out=mean_2)
                np.add(mean_1, mean_2, out=img_mean)

                # img_std = np.sqrt((1 - p) * (img_std ** 2) + p * ((y_data - img_mean) ** 2))  # calculate std deviation
                np.square(img_std, out=std_1)
                np.multiply(1 - p, std_1, out=std_2)
                np.subtract(y_data, img_mean, out=std_3)
                np.square(std_3, out=std_4)
                np.multiply(p, std_4, out=std_5)
                np.add(std_2, std_5, out=std_6)
                np.sqrt(std_6, out=img_std)

            if not proc_complete.is_set() and n_frame > -1:
                mean_data = img_mean
                std_data = img_std

                start = time.time_ns()
                B_old = np.copy(B)
                # B = np.logical_or((y_data > (img_mean + 2*img_std)),
                # (y_data < (img_mean - 2*img_std)))  # foreground new
                np.multiply(img_std, 3, out=B_1_std)
                np.add(B_1_std, img_mean, out=B_1_mean)
                B_greater = np.greater(y_data, B_1_mean)
                np.subtract(img_mean, B_1_std, out=B_2_mean)
                B_less = np.less(y_data, B_2_mean)
                B = np.logical_or(B_greater, B_less)

                A = np.invert(
                    np.logical_and(B_old, B)
                )  # difference between prev foreground and new foreground
                C = np.logical_and(
                    A,
                    B)  # different from previous frame and part of new frame

                C = 255 * C.astype(np.uint8)

                C = cv2.filter2D(C, ddepth=-1, kernel=kernel4)
                C[C < c.LPF_THRESH] = 0
                C[C >= c.LPF_THRESH] = 255

                n_features_cv, labels_cv, stats_cv, centroids_cv = cv2.connectedComponentsWithStats(
                    C, connectivity=4)

                label_mask_cv = np.logical_and(
                    stats_cv[:, cv2.CC_STAT_AREA] > 2,
                    stats_cv[:, cv2.CC_STAT_AREA] < 10000)
                ball_candidates = np.concatenate(
                    (stats_cv[label_mask_cv, 2:], centroids_cv[label_mask_cv]),
                    axis=1)

                # sort ball candidates by size and keep the top 100
                ball_candidates = ball_candidates[
                    ball_candidates[:, c.SIZE].argsort()[::-1][:c.N_OBJECTS]]

                processed_frames.put((n_frame, ball_candidates))

                ########## FOR TESTING ##############
                C_temp = cv2.cvtColor(C, cv2.COLOR_GRAY2RGB)
                img_temp = cv2.cvtColor(y_data, cv2.COLOR_GRAY2RGB)
                ball_candidates = ball_candidates.astype(int)
                for ball in ball_candidates:
                    cv2.drawMarker(C_temp, (ball[c.X_COORD], ball[c.Y_COORD]),
                                   (0, 0, 255),
                                   cv2.MARKER_CROSS,
                                   thickness=2,
                                   markerSize=10)
                    cv2.drawMarker(img_temp,
                                   (ball[c.X_COORD], ball[c.Y_COORD]),
                                   (0, 0, 255),
                                   cv2.MARKER_CROSS,
                                   thickness=2,
                                   markerSize=10)

                save_arr = True
                img_array.append((n_frame, y_data))
                C_array.append(C_temp)

                total_time += (time.time_ns() - start)
                total_frames += 1
                ########## FOR TESTING ##############

            elif event_manager.record_stream.is_set() and n_frame > -1:
                if n_frame % n_calib.value == 0:
                    processed_frames.put((n_frame, y_data))

        except queue.Empty:
            if not event_manager.recording.is_set(
            ) and unprocessed_frames.qsize(
            ) == 0:  # if the recording has finished
                proc_complete.set()  # set the proc_complete event
                ######### FOR TESTING ##############
                if total_frames > 0:
                    print((total_time / total_frames) / 1E6)
                    total_frames = 0
                    total_time = 0
                if img_array is not None and save_arr:
                    for i, img in enumerate(img_array):
                        frame, data = img
                        os.chdir(c.IMG_P)
                        cv2.imwrite(f"{frame:04d}.png", data)
                        cv2.imwrite(f"C{frame:04d}.png", C_array[i])
                    os.chdir(c.DATA_P)
                    np.save('img_mean', mean_data)
                    np.save('img_std', std_data)
                    os.chdir(c.ROOT_P)
                    img_array = []
                    C_array = []
                    save_arr = False
예제 #29
0
    im2, contours, hierarchy = cv2.findContours(maskc, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)

    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
    if contours:
        # compute the center of the contour
        M = cv2.moments(contours[0])
        if M["m00"] != 0:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])

            # draw the contour and center of the shape on the image
            # cv2.drawContours(frame, [c], -1, (0, 255, 0), 2)
            # cv2.circle(frame, (cX, cY), 7, (0, 255, 0), -1)
            # cv2.putText(frame, "center", (cX - 20, cY - 20),
            #			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            cv2.drawMarker(frame, (cX, cY), (0, 255, 0),
                           markerSize=200,
                           thickness=20)
    else:
        print("Nothing found")
    cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('frame', 600, 600)
    cv2.namedWindow('maskc', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('maskc', 600, 600)
    cv2.moveWindow('maskc', 600, 0)
    cv2.imshow("frame", frame)
    cv2.imshow("maskc", maskc)
    cv2.waitKey(2)
cv2.destroyAllWindows()
예제 #30
0
        def _visualize_output():
            last_frame_index = 0
            last_frame_time = time.time()
            fps_history = []
            all_gaze_histories = []

            if args.fullscreen:
                cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
                cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN,
                                     cv.WINDOW_FULLSCREEN)

            while True:
                # If no output to visualize, show unannotated frame
                if inferred_stuff_queue.empty():
                    next_frame_index = last_frame_index + 1
                    if next_frame_index in data_source._frames:
                        next_frame = data_source._frames[next_frame_index]
                        if 'faces' in next_frame and len(
                                next_frame['faces']) == 0:
                            if not args.headless:
                                cv.imshow('vis', next_frame['bgr'])
                            if args.record_video:
                                video_out_queue.put_nowait(next_frame_index)
                            last_frame_index = next_frame_index
                    if cv.waitKey(1) & 0xFF == ord('q'):
                        return
                    continue

                # Get output from neural network and visualize
                output = inferred_stuff_queue.get()
                bgr = None
                for j in range(batch_size):
                    frame_index = output['frame_index'][j]
                    if frame_index not in data_source._frames:
                        continue
                    frame = data_source._frames[frame_index]

                    # Decide which landmarks are usable
                    heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(
                        -1, 18),
                                            axis=0)
                    can_use_eye = np.all(heatmaps_amax > 0.7)
                    can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
                    can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)

                    start_time = time.time()
                    eye_index = output['eye_index'][j]
                    bgr = frame['bgr']
                    eye = frame['eyes'][eye_index]
                    eye_image = eye['image']
                    eye_side = eye['side']
                    eye_landmarks = output['landmarks'][j, :]
                    eye_radius = output['radius'][j][0]
                    if eye_side == 'left':
                        eye_landmarks[:, 0] = eye_image.shape[
                            1] - eye_landmarks[:, 0]
                        eye_image = np.fliplr(eye_image)

                    # Embed eye image and annotate for picture-in-picture
                    eye_upscale = 2
                    eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image),
                                                cv.COLOR_GRAY2BGR)
                    eye_image_raw = cv.resize(eye_image_raw, (0, 0),
                                              fx=eye_upscale,
                                              fy=eye_upscale)
                    eye_image_annotated = np.copy(eye_image_raw)
                    if can_use_eyelid:
                        cv.polylines(
                            eye_image_annotated,
                            [
                                np.round(
                                    eye_upscale * eye_landmarks[0:8]).astype(
                                        np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(255, 255, 0),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )
                    if can_use_iris:
                        cv.polylines(
                            eye_image_annotated,
                            [
                                np.round(
                                    eye_upscale * eye_landmarks[8:16]).astype(
                                        np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(0, 255, 255),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )
                        cv.drawMarker(
                            eye_image_annotated,
                            tuple(
                                np.round(eye_upscale *
                                         eye_landmarks[16, :]).astype(
                                             np.int32)),
                            color=(0, 255, 255),
                            markerType=cv.MARKER_CROSS,
                            markerSize=4,
                            thickness=1,
                            line_type=cv.LINE_AA,
                        )
                    face_index = int(eye_index / 2)
                    eh, ew, _ = eye_image_raw.shape
                    v0 = face_index * 2 * eh
                    v1 = v0 + eh
                    v2 = v1 + eh
                    u0 = 0 if eye_side == 'left' else ew
                    u1 = u0 + ew
                    bgr[v0:v1, u0:u1] = eye_image_raw
                    bgr[v1:v2, u0:u1] = eye_image_annotated

                    # Visualize preprocessing results
                    frame_landmarks = (frame['smoothed_landmarks']
                                       if 'smoothed_landmarks' in frame else
                                       frame['landmarks'])
                    for f, face in enumerate(frame['faces']):
                        try:
                            for landmark in frame_landmarks[f][:-1]:
                                cv.drawMarker(
                                    bgr,
                                    tuple(np.round(landmark).astype(np.int32)),
                                    color=(0, 0, 255),
                                    markerType=cv.MARKER_STAR,
                                    markerSize=2,
                                    thickness=1,
                                    line_type=cv.LINE_AA)
                        except IndexError:
                            print(
                                f"Catch index error on {frame['frame_index']}")
                            pass
                        cv.rectangle(
                            bgr,
                            tuple(np.round(face[:2]).astype(np.int32)),
                            tuple(
                                np.round(np.add(face[:2],
                                                face[2:])).astype(np.int32)),
                            color=(0, 255, 255),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )

                    # Transform predictions
                    eye_landmarks = np.concatenate([
                        eye_landmarks,
                        [[
                            eye_landmarks[-1, 0] + eye_radius,
                            eye_landmarks[-1, 1]
                        ]]
                    ])
                    eye_landmarks = np.asmatrix(
                        np.pad(eye_landmarks, ((0, 0), (0, 1)),
                               'constant',
                               constant_values=1.0))
                    eye_landmarks = (
                        eye_landmarks *
                        eye['inv_landmarks_transform_mat'].T)[:, :2]
                    eye_landmarks = np.asarray(eye_landmarks)
                    eyelid_landmarks = eye_landmarks[0:8, :]
                    iris_landmarks = eye_landmarks[8:16, :]
                    iris_centre = eye_landmarks[16, :]
                    eyeball_centre = eye_landmarks[17, :]
                    eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
                                                    eye_landmarks[17, :])

                    # Smooth and visualize gaze direction
                    num_total_eyes_in_frame = len(frame['eyes'])
                    if len(all_gaze_histories) != num_total_eyes_in_frame:
                        all_gaze_histories = [
                            list() for _ in range(num_total_eyes_in_frame)
                        ]
                    gaze_history = all_gaze_histories[eye_index]
                    if can_use_eye:
                        # Visualize landmarks
                        cv.drawMarker(  # Eyeball centre
                            bgr,
                            tuple(np.round(eyeball_centre).astype(np.int32)),
                            color=(0, 255, 0),
                            markerType=cv.MARKER_CROSS,
                            markerSize=4,
                            thickness=1,
                            line_type=cv.LINE_AA,
                        )
                        # cv.circle(  # Eyeball outline
                        #     bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
                        #     int(np.round(eyeball_radius)), color=(0, 255, 0),
                        #     thickness=1, lineType=cv.LINE_AA,
                        # )

                        # Draw "gaze"
                        # from models.elg import estimate_gaze_from_landmarks
                        # current_gaze = estimate_gaze_from_landmarks(
                        #     iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
                        i_x0, i_y0 = iris_centre
                        e_x0, e_y0 = eyeball_centre
                        theta = -np.arcsin(
                            np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
                        phi = np.arcsin(
                            np.clip((i_x0 - e_x0) /
                                    (eyeball_radius * -np.cos(theta)), -1.0,
                                    1.0))
                        current_gaze = np.array([theta, phi])
                        gaze_history.append(current_gaze)
                        gaze_history_max_len = 10
                        if len(gaze_history) > gaze_history_max_len:
                            gaze_history = gaze_history[-gaze_history_max_len:]
                        util.gaze.draw_gaze(bgr,
                                            iris_centre,
                                            np.mean(gaze_history, axis=0),
                                            length=120.0,
                                            thickness=1)
                    else:
                        gaze_history.clear()

                    if can_use_eyelid:
                        cv.polylines(
                            bgr,
                            [
                                np.round(eyelid_landmarks).astype(
                                    np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(255, 255, 0),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )

                    if can_use_iris:
                        cv.polylines(
                            bgr,
                            [
                                np.round(iris_landmarks).astype(
                                    np.int32).reshape(-1, 1, 2)
                            ],
                            isClosed=True,
                            color=(0, 255, 255),
                            thickness=1,
                            lineType=cv.LINE_AA,
                        )
                        cv.drawMarker(
                            bgr,
                            tuple(np.round(iris_centre).astype(np.int32)),
                            color=(0, 255, 255),
                            markerType=cv.MARKER_CROSS,
                            markerSize=4,
                            thickness=1,
                            line_type=cv.LINE_AA,
                        )

                    dtime = 1e3 * (time.time() - start_time)
                    if 'visualization' not in frame['time']:
                        frame['time']['visualization'] = dtime
                    else:
                        frame['time']['visualization'] += dtime

                    def _dtime(before_id, after_id):
                        return int(1e3 * (frame['time'][after_id] -
                                          frame['time'][before_id]))

                    def _dstr(title, before_id, after_id):
                        return '%s: %dms' % (title, _dtime(
                            before_id, after_id))

                    if eye_index == len(frame['eyes']) - 1:
                        # Calculate timings
                        frame['time']['after_visualization'] = time.time()
                        fps = int(
                            np.round(1.0 / (time.time() - last_frame_time)))
                        fps_history.append(fps)
                        if len(fps_history) > 60:
                            fps_history = fps_history[-60:]
                        fps_str = '%d FPS' % np.mean(fps_history)
                        last_frame_time = time.time()
                        fh, fw, _ = bgr.shape
                        cv.putText(bgr,
                                   fps_str,
                                   org=(fw - 110, fh - 20),
                                   fontFace=cv.FONT_HERSHEY_DUPLEX,
                                   fontScale=0.8,
                                   color=(0, 0, 0),
                                   thickness=1,
                                   lineType=cv.LINE_AA)
                        cv.putText(bgr,
                                   fps_str,
                                   org=(fw - 111, fh - 21),
                                   fontFace=cv.FONT_HERSHEY_DUPLEX,
                                   fontScale=0.79,
                                   color=(255, 255, 255),
                                   thickness=1,
                                   lineType=cv.LINE_AA)
                        if not args.headless:
                            cv.imshow('vis', bgr)
                        last_frame_index = frame_index

                        # Record frame?
                        if args.record_video:
                            video_out_queue.put_nowait(frame_index)

                        # Quit?
                        if cv.waitKey(1) & 0xFF == ord('q'):
                            return

                        # Print timings
                        if frame_index % 60 == 0:
                            latency = _dtime('before_frame_read',
                                             'after_visualization')
                            processing = _dtime('after_frame_read',
                                                'after_visualization')
                            timing_string = ', '.join([
                                _dstr('read', 'before_frame_read',
                                      'after_frame_read'),
                                _dstr('preproc', 'after_frame_read',
                                      'after_preprocessing'),
                                'infer: %dms' %
                                int(frame['time']['inference']),
                                'vis: %dms' %
                                int(frame['time']['visualization']),
                                'proc: %dms' % processing,
                                'latency: %dms' % latency,
                            ])
                            print('%08d [%s] %s' %
                                  (frame_index, fps_str, timing_string))
예제 #31
0
myMatcher = FeatureMatcher(features1,cornerIndexOrig[0:10],features2,cornerIndexRotated[0:10])
matchingPoints=myMatcher.getMatchingPoints()
print "matching Points " , type(matchingPoints),len(matchingPoints)
print "matching Points " , matchingPoints[0],len(matchingPoints[0])
print "matching Points " , matchingPoints[1],len(matchingPoints[1])

m1 = matchingPoints[0]
m2 = matchingPoints[1]
cornerTest=m1.tolist()
cornerTest2=m2

for index in cornerTest:
    x = index[1]
    y = index[0]
    cv2.drawMarker( imageColor, (x,y), (255,0,0), markerSize=30 )

cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow('image', imageColor)


for index in cornerTest2:
    x = index[1]
    y = index[0]
    cv2.drawMarker( imageRotatedColor, (x,y), (0,255,0), markerSize=30 )

cv2.namedWindow('image Rotated',cv2.WINDOW_NORMAL)
cv2.imshow('image Rotated', imageRotatedColor)

# test matching
# matchedFeatures = myMatcher.getMatchingFeatures()
예제 #32
0
    "position": [30, 300],
    "initialSize": 100,
    "initialThickness": 5,
    "brigntness": 250,
    "velocity": 4
}
shortestLenhth2border = min(videoParam["size"][0] - marker["position"][0],
                            marker["position"][0],
                            videoParam["size"][1] - marker["position"][1],
                            marker["position"][1])

for t in range(0, videoParam["time"] * videoParam["fps"]):
    img = np.zeros(videoParam["size"], dtype=np.uint8)
    if t == 0:
        size = marker["initialSize"]
        thic = marker["initialThickness"]

    marker["position"][0] += marker["velocity"]
    if marker["position"][0] >= videoParam["size"][0] - marker["initialSize"]:
        print("Warning: Graph is out of border. Interrupted.")
        break
    cv2.drawMarker(img, tuple(marker["position"]), marker["brigntness"],
                   marker["type"], int(size), int(thic))
    outImg = img[verticalCropPoint[0]:verticalCropPoint[1],
                 horizontalCropPoint[0]:horizontalCropPoint[1]]
    out.write(cv2.cvtColor(outImg, cv2.COLOR_GRAY2BGR))
    cv2.imshow("test", outImg)
    cv2.waitKey(10)
out.release()
cv2.destroyAllWindows()
예제 #33
0
imageColor = cv2.imread(imageFileName)
imageGray = cv2.imread(imageFileName, 0)

# get current time before harris
start = cv2.getTickCount()

harrisCorners = HarrisCorner( 1, imageGray )
harrisCorners.findCorners( 100000)
cornerIndex = harrisCorners.localMaxima( 25)
# get current time after harris
end = cv2.getTickCount()

time = (end - start)/ cv2.getTickFrequency()
print "Time taken by harris" , time , "seconds"


print imageGray.shape, cornerIndex.shape
# print cornerIndex

for index in cornerIndex:
    x = index[1]
    y = index[0]
    cv2.drawMarker( imageColor, (x,y), (0,0,0), markerSize=3 )

cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.imshow('image', imageColor)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #34
0
def ColorDetector(Color):
    thresholdValueGreen = 50
    thresholdValueRed = 50
    thresholdValueYellow = 50
    thresholdValueBlue = 65
    # Read image
    frame = cv2.imread('/home/ros/catkin_repo/ros/src/grp6_proj/nodes/top1')

    #resize frame
    #frame = cv2.resize(frame, (640,480))
    #convert til HSV colorspace
    hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
    
    # Show image on screen
    cv2.imshow('frame', frame)
    #wait untill keypress
    cv2.waitKey()
    
    #find color green
    if Color == 'green':
        # Range for green
        #Hue has value from 0-180 (value from Photoshop/2)
        #Saturation 255 is full color, 0 is white (photoshop values is in %)
        #Value/brightness interval from 0-255. 255 is full color, 0 is black (photoshop values is in %)
        green_lower = np.array([40,65,100])
        green_upper = np.array([75,255,255])
        mask_green = cv2.inRange(hsv, green_lower, green_upper)

        exr = cv2.bitwise_and(frame, frame, mask=mask_green)
        cv2.imshow("Frame", exr)

        #HSVtoGRAY(exr)
        exr = cv2.cvtColor(exr,cv2.COLOR_HSV2BGR)
        exr = cv2.cvtColor(exr, cv2.COLOR_BGR2GRAY)
        #Threshold for hvornaar farven bliver talt med
        thresholded = ((exr>thresholdValueGreen)*255).astype('uint8')

    
    elif Color == 'red':
        # Range for lower red
        red_lower = np.array([0,150,100])
        red_upper = np.array([10,255,255])
        mask_red1 = cv2.inRange(hsv, red_lower, red_upper)

        # Range for upper range
        red_lower = np.array([170,150,50])
        red_upper = np.array([180,255,255])
        mask_red2 = cv2.inRange(hsv, red_lower, red_upper)

        mask_red = mask_red1 + mask_red2
        exr = cv2.bitwise_and(frame, frame, mask=mask_red)
    
        cv2.imshow("Frame", exr)

        exr = cv2.cvtColor(exr,cv2.COLOR_HSV2BGR)
        exr = cv2.cvtColor(exr, cv2.COLOR_BGR2GRAY)
        
        
        thresholded = ((exr>thresholdValueRed)*255).astype('uint8')
        
    
    elif Color == 'blue':
        # Range for blue
        blue_lower = np.array([105,50,50])
        blue_upper = np.array([125,255,255])
        mask_blue = cv2.inRange(hsv, blue_lower, blue_upper)

        exr = cv2.bitwise_and(frame, frame, mask=mask_blue)
        cv2.imshow("Frame", exr)

        exr = cv2.cvtColor(exr,cv2.COLOR_HSV2BGR)
        exr = cv2.cvtColor(exr, cv2.COLOR_BGR2GRAY)
        
        
        thresholded = ((exr>thresholdValueBlue)*255).astype('uint8')
    
    elif Color == 'yellow':
        # Range for yellow
        yellow_lower = np.array([25,80,20])
        yellow_upper = np.array([35,255,255])
        mask_yellow = cv2.inRange(hsv, yellow_lower, yellow_upper)

        exr = cv2.bitwise_and(frame, frame, mask=mask_yellow)
        cv2.imshow("Frame", exr)

        exr = cv2.cvtColor(exr,cv2.COLOR_HSV2BGR)
        exr = cv2.cvtColor(exr, cv2.COLOR_BGR2GRAY)
        
        thresholded = ((exr>thresholdValueYellow)*255).astype('uint8')
    else: 
        print('***Please choose a valid color***')

    
    # show those areas
    #cv2.imshow('exrWithThreshold', thresholded)
    cv2.waitKey()

    #dette kode er taget fra undervisningen - Skrevet af: Mads Dyrmann and Henrik Skov Midtiby
    # Find connected components
    _, contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
    # loop through all components, one at the time
    for cnt in contours:
        # Whats the area of the component?
        areal = cv2.contourArea(cnt)
        
        # Gr noget hvis arealet er strre end 1.
        if(areal > 1):
            # get the center of mass
            M = cv2.moments(cnt)
            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])
            #print(cx, cy)
            #coordinates = "x: %s, y: %d" % (cx,cy)

            #alternativ
            # cx=sum(cnt[:,0][:,0])/len(cnt[:,0][:,0])
            # cy=sum(cnt[:,0][:,1])/len(cnt[:,0][:,1])

            #Print the coordinates that are within our limits/table
            A1 = (cy>34 and cy<310) and (cx>1 and cx<237)
            A2 = (cy>34 and cy<310) and (cx>381 and cx<600)
            A3 = (cy>34 and cy<233) and (cx>237 and cx<381)
            if (A1 or A2 or A3):
                print (cx)
                print (cy)
            
            #print ('x:', cx,'y:', cy)
            
            # Tegn et sigtekorn p billedet, der markerer elementet.
            # <alter these lines>
            frame = cv2.drawMarker(frame, (cx, cy), (255,0,255),markerType=cv2.MARKER_CROSS, thickness=2)
            # </alter these lines>        
    #viser billede med kryds p
    cv2.imshow('frame annotated', frame)

    cv2.waitKey(0)   
    
    return