def transform_bounding_parallelipiped_to_bounding_square(
        width, height, corner_points):
    t = ProjectiveTransform()
    src = corner_points[0:-1]
    dst = np.asarray([[0, 0], [0, 1], [1, 1], [1, 0]])
    if not t.estimate(src, dst): raise Exception("estimate failed")
    H = np.ceil(height).astype(int)
    W = np.ceil(width).astype(int)
    x2, y2 = np.mgrid[:H, :W]
    crds = np.hstack((x2.reshape(-1, 1), y2.reshape(-1, 1))).astype(int)
    crds1 = crds / np.array([H, W])
    scrds = np.minimum(np.round(t.inverse(crds1)[:, ::-1]).astype(int), 1023)
    return H, W, scrds, crds
예제 #2
0
class Homography():
    def __init__(self, config):
        '''
        Initilization for Regristration Class 

        Parameters
        -------------
        config: Config
        configuration class for traffic intersection 

        '''

        self.config = config

        # Four corners of the interection, hard-coded in camera space
        self.corners = self.config.street_corners
        # Four corners of the intersection, hard-coded in transformed space
        self.st_corners = self.config.simulator_corners

        #Computes the projected transform for the going from camera to simulator coordinate frame
        self.tf_mat = ProjectiveTransform()

        self.sc, self.cc = self.load_homography_data()
        self.tf_mat.estimate(self.sc, self.cc)

        self.af = AddOffset()

        #self.tf_mat.estimate(cc, sc,4)

    def load_homography_data(self):

        sim_corners = pickle.load(open('homography/simulator.p', 'r'))

        cam_corners = pickle.load(open('homography/camera.p', 'r'))

        return np.array(sim_corners), np.array(cam_corners)

    def determine_lane(self, point):
        '''
        Detemines the lane the current trajectory is on 
        both the index of the lane and the side of the road

        Parameters
        --------------
        point: np.array([x,y])
            The current pose of the car in x,y space

        Returns
        -------------
        dict, containing the current index and road side

        '''

        for i in range(len(self.config.lanes)):

            lane = self.config.lanes[i]

            if lane.contains_point(point):
                side = lane.side_of_road(point)
                return {'lane_index': i, 'road_side': side}

        return None

    def apply_homography_on_img(self, img):
        img_warped = warp(img, self.tf_mat, output_shape=(800, 800), order=0)
        # cv2.imshow('Test Homography', img_warped)
        # cv2.waitKey(30)

        return img_warped

    def is_near_edge(self, y):

        dist = np.abs(self.config.img_dim[1] - y)

        if dist < 15:
            return True
        else:
            return False

    def transform_trajectory(self, trajectory):
        '''
        Takes in a trajectory class and converts it to the simulator's frame of reference
        additionally identifies the current lanes

        Parameters
        ---------------
        trajectories: a list of tuple frames
        A list of frames, which is a list state tuples for each timestep

        Returns
        ---------------
        A list of frames, which is a list state dictionaries for each timestep
        '''

        tf_traj = []
        for frame in trajectory:

            new_frame = []

            for obj_dict in frame:
                x = self.config.img_dim[0] * obj_dict['x']
                y = self.config.img_dim[1] * obj_dict['y']

                cam_pose = np.array([x, y])

                offset_pose = self.af.add_offset(cam_pose)

                #offset_pose = cam_pose

                pose = self.tf_mat.inverse(offset_pose)[0]

                new_obj_dict = {
                    'pose': pose,
                    'class_label': obj_dict['cls_label'],
                    'timestep': obj_dict['t'],
                    'lane': self.determine_lane(pose),
                    'is_initial_state': obj_dict['is_initial_state'],
                    'cam_pose': cam_pose,
                    'is_near_edge': self.is_near_edge(y)
                }

                new_frame.append(new_obj_dict)

            if len(new_frame) > 0:
                tf_traj.append(new_frame)

        return tf_traj
        corrected_line = t(line_for_correct).astype(int)
        cv.line(result, (corrected_line[0][0], corrected_line[0][1]),
                (corrected_line[5][0], corrected_line[5][1]), (255, 0, 0), 3)
        cv.line(result, (corrected_line[1][0], corrected_line[1][1]),
                (corrected_line[4][0], corrected_line[4][1]), (255, 0, 0), 3)
        cv.line(result, (corrected_line[2][0], corrected_line[2][1]),
                (corrected_line[7][0], corrected_line[7][1]), (255, 0, 0), 3)
        cv.line(result, (corrected_line[3][0], corrected_line[3][1]),
                (corrected_line[6][0], corrected_line[6][1]), (255, 0, 0), 3)

    if event == cv.EVENT_LBUTTONDOWN:
        print(x, ' ', y)
        if len(four_point) < 4:
            four_point.append([x, y])
        else:
            data_local = t([x, y])
            cv.circle(result, (int(data_local[0][0]), int(data_local[0][1])),
                      3, (255, 231, 0), -1)
        cv.circle(img, (x, y), 3, (255, 255, 255), -1)

    cv.imshow('image', img)
    cv.imshow('result', result)


cv.setMouseCallback('image', click_event)

cv.waitKey(0)
print(dst)
print(t.inverse(dst))
예제 #4
0
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                # extract the index of the class label from the `detections`,
                # then compute the (x, y)-coordinates of the bounding box for
                # the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                if CLASSES[idx] != "person":
                    continue

                # Projective Transform returns location relative to (5, 11) in layout
                relativeLoc = tuple(
                    projectiveTransform.inverse(
                        np.array([[(startX + endX) / 2,
                                   endY - 0.05 * (endY - startY)]]))[0])
                absoluteLoc = (5 + int(relativeLoc[0] // 200),
                               11 + int(relativeLoc[1] // 200))
                label = "{}: {:.2f}%, at {}".format(CLASSES[idx],
                                                    confidence * 100,
                                                    absoluteLoc)
                print("[INFO] {}".format(label))
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (0, 0, 255), 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

        if args["measure_times"]:
            times.append(time.time() - start1)