Пример #1
0
    def rescale(self, image, intrinsic):
        """resize the image and intrinsic given a relative scale
        """

        intrinsic_out = uts.intrinsic_vec_to_mat(intrinsic, self.image_size)
        hs, ws = self.image_size
        image_out = cv2.resize(image.copy(), (ws, hs))

        return image_out, intrinsic_out
Пример #2
0
    def showAnn(self, image_name, settings, save_dir, alpha=0.8):
        """Show the annotation of a pose file in an image
        Input:
            image_name: the name of image
        Output:
            depth: a rendered depth map of each car
            masks: an instance mask of the label
            image_vis: an image show the overlap of car model and image
        """

        car_pose_file = '%s/%s.json' % (self._data_config['pose_dir'],
                                        image_name)
        car_pose_file = '/media/SSD_1TB/ApolloScape/ECCV2018_apollo/train/' + '%s.json' % image_name
        with open(car_pose_file) as f:
            car_poses = json.load(f)
        image_file = '%s/%s.jpg' % (self._data_config['image_dir'], image_name)
        image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)[:, :, ::-1]

        #intrinsic = self.dataset.get_intrinsic(image_name)
        ### we use only camera5 intrinsics
        intrinsic = self.dataset.get_intrinsic("Camera_5")
        self.intrinsic = uts.intrinsic_vec_to_mat(intrinsic)

        merged_image = image.copy()
        mask_all = np.zeros(image.shape)
        for i, car_pose in enumerate(car_poses):
            car_name = car_models.car_id2name[car_pose['car_id']].name
            mask = self.render_car_cv2(car_pose['pose'], car_name, image)
            mask_all += mask

        mask_all = mask_all * 255 / mask_all.max()
        cv2.addWeighted(image.astype(np.uint8), 1.0, mask_all.astype(np.uint8),
                        alpha, 0, merged_image)

        # Save figure
        plt.close('all')
        fig = plt.figure(frameon=False)
        #fig.set_size_inches(image.shape[1]/10, image.shape[0]/10)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        fig.add_axes(ax)
        ax.imshow(merged_image)

        save_set_dir = os.path.join(save_dir, settings)
        if not os.path.exists(save_set_dir):
            os.mkdir(save_set_dir)
        fig.savefig(os.path.join(save_dir, settings, image_name + '.png'),
                    dpi=1)

        return image
Пример #3
0
    def convert(self, pose_file_in, pose_file_out):
        """ Convert the raw labelled file to required json format
        Input:
            file_name: str filename
        """
        car_poses = self.read_car_pose(pose_file_in)
        car_num = len(car_poses)
        MAX_DEPTH = self.visualizer.MAX_DEPTH
        image_size = self.visualizer.image_size
        intrinsic = self.visualizer.dataset.get_intrinsic(pose_file_in)
        self.visualizer.intrinsic = uts.intrinsic_vec_to_mat(
            intrinsic, image_size)
        self.depth = MAX_DEPTH * np.ones(image_size)
        self.mask = np.zeros(self.depth.shape)
        vis_rate = np.zeros(car_num)

        for i, car_pose in enumerate(car_poses):
            car_name = car_models.car_id2name[car_pose['car_id']].name
            depth, mask = self.visualizer.render_car(car_pose['pose'],
                                                     car_name)
            self.mask, self.depth = self.visualizer.merge_inst(
                depth, i + 1, self.mask, self.depth)
            vis_rate[i] = np.float32(np.sum(
                mask == (i + 1))) / (np.float32(np.sum(mask)) + np.spacing(1))

        keep_idx = []
        for i, car_pose in enumerate(car_poses):
            area = np.round(
                np.float32(np.sum(self.mask == (i + 1))) /
                (self.visualizer.scale**2))
            if area > 1:
                keep_idx.append(i)

            car_pose['pose'] = car_pose['pose'].tolist()
            car_pose['area'] = int(area)
            car_pose['visible_rate'] = float(vis_rate[i])
            keep_idx.append(i)

        car_poses = [car_poses[idx] for idx in keep_idx]
        with open(pose_file_out, 'w') as f:
            json.dump(car_poses,
                      f,
                      sort_keys=True,
                      indent=4,
                      ensure_ascii=False)
Пример #4
0
    def showAnn(self, image_name, settings, save_dir, alpha=0.8):
        """Show the annotation of a pose file in an image
        Input:
            image_name: the name of image
        Output:
            depth: a rendered depth map of each car
            masks: an instance mask of the label
            image_vis: an image show the overlap of car model and image
        """

        car_pose_file = '%s/%s.json' % (self._data_config['pose_dir'], image_name)
        with open(car_pose_file) as f:
            car_poses = json.load(f)
        image_file = '%s/%s.jpg' % (self._data_config['image_dir'], image_name)
        image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)

        #intrinsic = self.dataset.get_intrinsic(image_name)
        ### we use only camera5 intrinsics
        intrinsic = self.dataset.get_intrinsic("Camera_5")
        self.intrinsic = uts.intrinsic_vec_to_mat(intrinsic)

        merged_image = image.copy()
        mask_all = np.zeros(image.shape)
        for i, car_pose in enumerate(car_poses):
            car_name = car_models.car_id2name[car_pose['car_id']].name
            mask = self.render_car_cv2(car_pose['pose'], car_name, image)
            mask_all += mask

        mask_all = mask_all * 255 / mask_all.max()
        cv2.addWeighted(image.astype(np.uint8), 1.0, mask_all.astype(np.uint8), alpha, 0, merged_image)

        output_path = os.path.join(save_dir, settings, image_name + '.png')
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        cv2.imwrite(output_path, merged_image)

        return image
Пример #5
0
    def findTrans(self, image_name):
        """Show the annotation of a pose file in an image
        Input:
            image_name: the name of image
        Output:
            depth: a rendered depth map of each car
            masks: an instance mask of the label
            image_vis: an image show the overlap of car model and image
        """

        car_pose_file = '%s/%s.json' % (self._data_config['pose_dir'], image_name)
        with open(car_pose_file) as f:
            car_poses = json.load(f)
        image_file = '%s/%s.jpg' % (self._data_config['image_dir'], image_name)
        image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)[:, :, ::-1]

        #intrinsic = self.dataset.get_intrinsic(image_name)
        ### we use only camera5 intrinsics
        intrinsic = self.dataset.get_intrinsic("Camera_5")
        self.intrinsic = uts.intrinsic_vec_to_mat(intrinsic)

        merged_image = image.copy()

        dis_trans_all = []
        for car_pose in car_poses:
            car_name = car_models.car_id2name[car_pose['car_id']].name

            car = self.car_models[car_name]
            pose = np.array(car_pose['pose'])
            # project 3D points to 2d image plane
            rmat = uts.euler_angles_to_rotation_matrix(pose[:3])

            x_y_z_R = np.matmul(rmat, np.transpose(np.float32(car['vertices'])))
            x_y_z = x_y_z_R + pose[3:][:, None]
            x2 = x_y_z[0]/x_y_z[2]
            y2 = x_y_z[1]/x_y_z[2]
            u = intrinsic[0] * x2 + intrinsic[2]
            v = intrinsic[1] * y2 + intrinsic[3]

            ###
            fx = intrinsic[0]
            fy = intrinsic[1]
            cx = intrinsic[2]
            cy = intrinsic[3]

            xc = ((u.max() + u.min())/2 - cx) / fx
            yc = ((v.max() + v.min())/2 - cy) / fy
            ymin = (v.min() - cy) / fy
            ymax= (v.max() - cy) / fy
            Rymin = x_y_z_R[1, :].min()
            Rymax = x_y_z_R[1, :].max()

            Rxc = x_y_z_R[0, :].mean()
            Ryc = x_y_z_R[1, :].mean()
            Rzc = x_y_z_R[2, :].mean()

            # Rxc = 0
            # Ryc = 0
            # Rzc = 0
            # Rxc = (x_y_z_R[0, :].max() + x_y_z_R[0, :].min())/2
            # Ryc = (x_y_z_R[1, :].max() + x_y_z_R[1, :].min())/2
            # Rzc = (x_y_z_R[2, :].max() + x_y_z_R[2, :].min())/2
            # Because the car highest point happened in the center!
            #zc = (Ryc - Rymin) / (yc - ymin)
            zc = (Ryc - Rymax) / (yc - ymax)

            xt = zc * xc - Rxc
            yt = zc * yc - Ryc
            zt = zc - Rzc
            pred_pose = np.array([xt, yt, zt])
            dis_trans = np.linalg.norm(pred_pose - pose[3:])

            # pose_pred_all = np.concatenate([car_pose['pose'][:3], pred_pose])
            # mask = self.render_car_cv2(pose_pred_all, car_name, image)
            # cv2.addWeighted(image.astype(np.uint8), 1.0, mask.astype(np.uint8), 0.5, 0, merged_image)
            # plt.imshow(merged_image)

            print(dis_trans)
            dis_trans_all.append(dis_trans)

        return dis_trans_all

        if False:
            xmin = (u.min() - cx) / fx
            xmax = (u.max() - cx) / fx
            ymin = (v.min() - cy) / fy
            ymax = (v.max() - cy) / fy

            Rxmin = x_y_z_R[0, :].min()
            Rxmax = x_y_z_R[0, :].max()
            Rymin = x_y_z_R[1, :].min()
            Rymax = x_y_z_R[1, :].max()
            Rzmin = x_y_z_R[2, :].min()
            Rzmax = x_y_z_R[2, :].max()

            # z1 = (Rxmax - Rxmin) / (xmax - xmin)
            # z2 = (Rymax - Rymin) / (ymax - ymin)
            #xt = (xmax*xmin) /(ymax*xmin-ymin*xmax) * (ymin*Rxmin/xmin - ymax*Rxmax/ymin - Rymin)
            xt = (Rxmax * xmin - Rxmin * xmax) / (xmax-xmin)
            yt = (Rymax * ymin - Rymin * ymax) / (ymax-ymin)

            ztxmin = (xt + Rxmin) /xmin - Rzmin
            ztxmax = (xt + Rxmax) / xmax - Rzmin
            ztymin = (yt + Rymin) / ymin - Rzmin
            ztymax = (yt + Rymax) / ymax - Rzmin

            pred_pose = np.array([xt, yt, ztymin])
            dis_trans = np.linalg.norm(pred_pose - pose[3:])

            pred_pose = np.array([xt, yt, ztxmin])
            dis_trans = np.linalg.norm(pred_pose - pose[3:])