Ejemplo n.º 1
0
    def evaluate_uncertainty(self,
                             mean_pts2d,
                             covar,
                             pose_targets,
                             class_type,
                             intri_type='blender',
                             vote_type=VotingType.BB8,
                             intri_matrix=None):
        points_3d = VotingType.get_pts_3d(vote_type, class_type)

        begin = time.time()
        # full
        cov_invs = []
        for vi in range(covar.shape[0]):
            if covar[vi, 0, 0] < 1e-6 or np.sum(np.isnan(covar)[vi]) > 0:
                cov_invs.append(np.zeros([2, 2]).astype(np.float32))
                continue

            cov_inv = np.linalg.inv(scipy.linalg.sqrtm(covar[vi]))
            cov_invs.append(cov_inv)
        cov_invs = np.asarray(cov_invs)  # pn,2,2
        weights = cov_invs.reshape([-1, 4])
        weights = weights[:, (0, 1, 3)]

        if intri_type == 'use_intrinsic' and intri_matrix is not None:
            K = intri_matrix
        else:
            K = self.projector.intrinsic_matrix[intri_type]

        pose_pred = uncertainty_pnp(mean_pts2d, weights, points_3d, K)
        model = self.linemod_db.get_ply_model(class_type)
        diameter = self.linemod_db.get_diameter(class_type)
        self.uncertainty_pnp_cost.append(time.time() - begin)

        if class_type in ['eggbox', 'glue']:
            self.add_metric_sym(pose_pred, pose_targets, model, diameter)
        else:
            self.add_metric(pose_pred, pose_targets, model, diameter)

        self.projection_2d(pose_pred, pose_targets, model, K)
        self.cm_degree_5_metric(pose_pred, pose_targets)

        return pose_pred
Ejemplo n.º 2
0
	def __update_ax__(self, ax, index):
		print('asasdasdasdasd')
		index = index % self.len

		# Clear axes
		ax.clear()
		ax.set_title("Viewpoint {}".format(index))

		# Load image
		imgPath = os.path.join(self.imgDir, str(index+1).zfill(self.formats['rgbNLeadingZeros'])+'.'+self.formats['rgbFormat'])
		img = self.read_img_np(imgPath)
		height, width, _ = img.shape
		ax.imshow(img)

		# Scatter points and project poses
		x = self.points[index]
		if x is not None:
			nInstances = x.shape[0]
			colorCycler = pltColorCycler()
			for iInstance in range(nInstances):
				color = next(colorCycler)
				ax.scatter(x[iInstance,:,0], x[iInstance,:,1], c=color, edgecolors='black')
				covar = self.covariance[index][iInstance]
				if covar is not None:
					print(covar)
					weights = covar_to_weight(covar)
					pose = uncertainty_pnp(x[iInstance], weights, self.keypoints3D, self.K)
					poseBajs = pnp(self.keypoints3D, x[iInstance], self.K)
					print(pose)
					bbProj = project(self.bbCorners.T, self.K@pose)
					bbProjBajs = project(self.bbCorners.T, self.K@poseBajs)
					#ax.scatter(bbProj[:,0], bbProj[:,1], marker='*', c=color)
					ax.plot(bbProj[self.cornerOrder, 0], bbProj[self.cornerOrder, 1], c=color)
					ax.plot(bbProjBajs[self.cornerOrder, 0], bbProjBajs[self.cornerOrder, 1], c='brown')
					for iKeypoint in range(covar.shape[0]):
						thisCov = covar[iKeypoint]
						(eigVal, eigVec) = np.linalg.eig(thisCov)
						for iDir in range(2):
							ax.arrow(x[iInstance,iKeypoint,0], x[iInstance,iKeypoint,1], np.sqrt(eigVal[iDir])*eigVec[0,iDir], np.sqrt(eigVal[iDir])*eigVec[1,iDir])

		ax.set_xlim(0, width)
		ax.set_ylim(height, 0)
Ejemplo n.º 3
0
def demo():
    net = Resnet18_8s(ver_dim=vote_num * 2, seg_dim=2)
    net = NetWrapper(net).cuda()
    net = DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=train_cfg['lr'])
    model_dir = os.path.join(cfg.MODEL_DIR, 'cat_demo')
    load_model(net.module.net, optimizer, model_dir, args.load_epoch)
    data, points_3d, bb8_3d = read_data()
    image, mask, vertex, vertex_weights, pose, corner_target = [
        d.unsqueeze(0).cuda() for d in data
    ]

    # Run the net
    seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net(
        image, mask, vertex, vertex_weights)

    print('vertex_pred.shape')
    print(vertex_pred.shape)
    print(' ')

    print('vertex_pred[0]')
    print(vertex_pred)
    print(' ')

    # Various visualizations
    #visualize_vertex_field(vertex_pred,vertex_weights, keypointIdx=3)
    print('asdasdsadas')
    print(seg_pred.shape, mask.shape)
    visualize_mask(np.squeeze(seg_pred.cpu().detach().numpy()),
                   mask.cpu().detach().numpy())
    rgb = Image.open('data/demo/cat.jpg')
    img = np.array(rgb)
    #visualize_overlap_mask(img, np.squeeze(seg_pred.cpu().detach().numpy()), None)

    # Run the ransac voting
    eval_net = DataParallel(EvalWrapper2().cuda())
    #corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]
    corner_pred, covar = [
        x.cpu().detach().numpy()[0] for x in eval_net(seg_pred, vertex_pred)
    ]
    print('Keypoint predictions:')
    print(corner_pred)
    print(' ')
    print('covar: ', covar)
    print(' ')
    camera_matrix = np.array([[572.4114, 0., 325.2611],
                              [0., 573.57043, 242.04899], [0., 0., 1.]])

    # Fit pose to points
    #pose_pred = pnp(points_3d, corner_pred, camera_matrix)
    #evaluator = Evaluator()
    #pose_pred = evaluator.evaluate_uncertainty(corner_pred,covar,pose,'cat',intri_matrix=camera_matrix)

    def getWeights(covar):
        cov_invs = []
        for vi in range(covar.shape[0]):  # For every keypoint
            if covar[vi, 0, 0] < 1e-6 or np.sum(np.isnan(covar)[vi]) > 0:
                cov_invs.append(np.zeros([2, 2]).astype(np.float32))
                continue
            cov_inv = np.linalg.inv(scipy.linalg.sqrtm(covar[vi]))
            cov_invs.append(cov_inv)
        cov_invs = np.asarray(cov_invs)  # pn,2,2
        weights = cov_invs.reshape([-1, 4])
        weights = weights[:, (0, 1, 3)]
        return weights

    weights = getWeights(covar)
    pose_pred = uncertainty_pnp(corner_pred, weights, points_3d, camera_matrix)

    print('Predicted pose: \n', pose_pred)
    print('Ground truth pose: \n', pose[0].detach().cpu().numpy())
    print(' ')

    projector = Projector()
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')
    bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(),
                                  'linemod')
    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    visualize_points(image[None, ...],
                     corner_target.detach().cpu().numpy(),
                     pts_pred=corner_pred[None, :, :])
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...],
                           bb8_2d_gt[None, None, ...])