Example #1
0
def demo():
    net = Resnet18_8s(ver_dim=vote_num * 2, seg_dim=2)
    net = NetWrapper(net).cuda()
    net = DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=train_cfg['lr'])
    model_dir = os.path.join(cfg.MODEL_DIR, "switch_linemod_train")
    load_model(net.module.net, optimizer, model_dir, -1)

    image, points_3d, bb8_3d = read_data()
    image = image[None, ...]
    seg_pred, vertex_pred = net(image)

    # visualize_mask(mask)
    # visualize_vertex(vertex, vertex_weights)
    # visualize_hypothesis(image, seg_pred, vertex_pred, corner_target)
    # visualize_voting_ellipse(image, seg_pred, vertex_pred, corner_target)

    eval_net = DataParallel(EvalWrapper().cuda())
    corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]
    camera_matrix = np.array([[572.4114, 0., 325.2611],
                              [0., 573.57043, 242.04899], [0., 0., 1.]])
    pose_pred = pnp(points_3d, corner_pred, camera_matrix)

    projector = Projector()
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')
    print(bb8_2d_pred)
    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...])
Example #2
0
def demo():
    net = Resnet18_8s(ver_dim=vote_num * 2, seg_dim=2)
    net = NetWrapper(net).cuda()
    net = DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=train_cfg['lr'])
    model_dir = os.path.join(cfg.MODEL_DIR, "cat_demo")
    load_model(net.module.net, optimizer, model_dir, args.load_epoch)
    data, points_3d, bb8_3d = read_data()
    image, mask, vertex, vertex_weights, pose, corner_target = [
        d.unsqueeze(0).cuda() for d in data
    ]
    seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net(
        image, mask, vertex, vertex_weights)

    eval_net = DataParallel(EvalWrapper().cuda())
    corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]
    camera_matrix = np.array([[572.4114, 0., 325.2611],
                              [0., 573.57043, 242.04899], [0., 0., 1.]])
    pose_pred = pnp(points_3d, corner_pred, camera_matrix)

    projector = Projector()
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')
    bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(),
                                  'linemod')
    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...],
                           bb8_2d_gt[None, None, ...])
def demo():
    net = Resnet18_8s(ver_dim=vote_num * 2, seg_dim=2)
    net = NetWrapper(net).cuda()
    net = DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=train_cfg['lr'])
    model_dir = os.path.join(cfg.MODEL_DIR, "cat_linemod_train")  #cat_demo

    load_model(net.module.net, optimizer, model_dir, args.load_epoch)

    data, points_3d, bb8_3d = read_data()

    image, mask, vertex, vertex_weights, pose, corner_target = [
        d.unsqueeze(0).cuda() for d in data
    ]

    seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net(
        image, mask, vertex, vertex_weights)

    eval_net = DataParallel(EvalWrapper().cuda())

    #向量方形图,语义分割图,然后 ransac 计算 kp,,向量方向图一旦准了,kp也就准了
    corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]

    camera_matrix = np.array([[572.4114, 0., 325.2611],
                              [0., 573.57043, 242.04899], [0., 0., 1.]])

    pose_pred = pnp(points_3d, corner_pred, camera_matrix)

    projector = Projector()
    #
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')

    bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(),
                                  'linemod')

    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]

    print("loss_seg:{} , loss_vertex:{} , precision:{},recall:{},  ".format(
        loss_seg, loss_vertex, precision, recall))
    #399.pth
    #loss_seg:tensor([0.0015], device='cuda:0', grad_fn=<MeanBackward0>) , loss_vertex:tensor([0.0016], device='cuda:0', grad_fn=<DivBackward1>) ,
    #precision:tensor([0.9434], device='cuda:0'),recall:tensor([0.9677], device='cuda:0'),
    #199.pth
    # loss_seg:tensor([0.0015], device='cuda:0', grad_fn=<MeanBackward0>) , loss_vertex:tensor([0.0016], device='cuda:0', grad_fn=<DivBackward1>) ,
    # precision:tensor([0.9583], device='cuda:0'),recall:tensor([0.9524], device='cuda:0'),
    erro = bb8_2d_pred - bb8_2d_gt
    erro = np.abs(erro)
    err = np.reshape(erro, (erro.size, ))
    #abserr = map(abs,err)

    print("reproject sum_error:{} ".format(np.sum(err)))
    ## 199  reproject sum_error:13.385891544820552
    ## 399  reproject sum_error:12.718721049803733

    ##看了是有提高   准召定义  准确下降,召回上升
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...],
                           bb8_2d_gt[None, None, ...])
Example #4
0
def demo():
    net = Resnet18_8s(ver_dim=vote_num * 2, seg_dim=2)
    net = NetWrapper(net).cuda()
    net = DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=train_cfg['lr'])
    model_dir = os.path.join(cfg.MODEL_DIR, "cat_demo")
    load_model(net.module.net, optimizer, model_dir, -1)
    data, points_3d, bb8_3d = read_data()
    #print("BB8_3D: ",bb8_3d)
    image, mask, vertex, vertex_weights, pose, corner_target = [
        d.unsqueeze(0).cuda() for d in data
    ]
    seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net(
        image, mask, vertex, vertex_weights)
    seg_mask = torch.argmax(seg_pred, 1)

    print("seg_mask", seg_mask, type(seg_mask), seg_mask.shape, seg_mask[0])

    visualize_mask(seg_mask)
    visualize_mask(mask)
    #visualize_vertex(vertex, vertex_weights)
    #visualize_hypothesis(image, seg_pred, vertex_pred, corner_target)
    visualize_voting_ellipse(image, seg_pred, vertex_pred, corner_target)

    eval_net = DataParallel(EvalWrapper().cuda())
    corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]
    print("Corner Predictions: ", corner_pred)

    camera_matrix = np.array([[572.4114, 0., 325.2611],
                              [0., 573.57043, 242.04899], [0., 0., 1.]])
    pose_pred = pnp(points_3d, corner_pred, camera_matrix)

    projector = Projector()
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')
    print("Pose prediction :\n", pose_pred)
    print("GT pose: \n", pose[0].detach().cpu().numpy())

    bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(),
                                  'linemod')
    print(bb8_2d_gt)

    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...],
                           bb8_2d_gt[None, None, ...])
Example #5
0
	def __update_ax__(self, ax, index):
		print('asasdasdasdasd')
		index = index % self.len

		# Clear axes
		ax.clear()
		ax.set_title("Viewpoint {}".format(index))

		# Load image
		imgPath = os.path.join(self.imgDir, str(index+1).zfill(self.formats['rgbNLeadingZeros'])+'.'+self.formats['rgbFormat'])
		img = self.read_img_np(imgPath)
		height, width, _ = img.shape
		ax.imshow(img)

		# Scatter points and project poses
		x = self.points[index]
		if x is not None:
			nInstances = x.shape[0]
			colorCycler = pltColorCycler()
			for iInstance in range(nInstances):
				color = next(colorCycler)
				ax.scatter(x[iInstance,:,0], x[iInstance,:,1], c=color, edgecolors='black')
				covar = self.covariance[index][iInstance]
				if covar is not None:
					print(covar)
					weights = covar_to_weight(covar)
					pose = uncertainty_pnp(x[iInstance], weights, self.keypoints3D, self.K)
					poseBajs = pnp(self.keypoints3D, x[iInstance], self.K)
					print(pose)
					bbProj = project(self.bbCorners.T, self.K@pose)
					bbProjBajs = project(self.bbCorners.T, self.K@poseBajs)
					#ax.scatter(bbProj[:,0], bbProj[:,1], marker='*', c=color)
					ax.plot(bbProj[self.cornerOrder, 0], bbProj[self.cornerOrder, 1], c=color)
					ax.plot(bbProjBajs[self.cornerOrder, 0], bbProjBajs[self.cornerOrder, 1], c='brown')
					for iKeypoint in range(covar.shape[0]):
						thisCov = covar[iKeypoint]
						(eigVal, eigVec) = np.linalg.eig(thisCov)
						for iDir in range(2):
							ax.arrow(x[iInstance,iKeypoint,0], x[iInstance,iKeypoint,1], np.sqrt(eigVal[iDir])*eigVec[0,iDir], np.sqrt(eigVal[iDir])*eigVec[1,iDir])

		ax.set_xlim(0, width)
		ax.set_ylim(height, 0)
        rgb, mask, vertex, vertex_weight, pose, gt_corners = data
        pts2d=gt_corners[0].numpy()[:,:2].astype(np.float32)

        pts3d=modeldb.get_extreme_3d('duck')
        pts3d=np.concatenate([pts3d,modeldb.get_centers_3d('duck')[None,:]],0).astype(np.float32)
        wgt2d=np.zeros([pts2d.shape[0],3]).astype(np.float32)
        wgt2d[:,(0,2)]=1.0

        for k in range(pts2d.shape[0]):
            if np.random.random()<0.5:
                scale = np.random.uniform(1, 8)
            else:
                scale = np.random.uniform(32, 48)
            pts2d[k]+=np.random.normal(0,scale,2)
            wgt2d[k,(0,2)]=1/scale
        wgt2d/=wgt2d.max()

        pose_pred=uncertainty_pnp(pts2d,wgt2d,pts3d,camera_matrix)
        pose_pred2=pnp(pts3d,pts2d,camera_matrix)

        pts2d1,_=pts_to_img_pts(pts3d,pose_pred[:,:3],pose_pred[:,3],camera_matrix)
        pts2d2,_=pts_to_img_pts(pts3d,pose_pred2[:,:3],pose_pred2[:,3],camera_matrix)

        residual1=np.mean(np.abs(pts2d1-pts2d))
        residual2=np.mean(np.abs(pts2d2-pts2d))

        print(residual1,residual2)
        pose=pose.numpy()
        print(np.mean(np.abs(pose-pose_pred)))
        print(np.mean(np.abs(pose-pose_pred2)))
Example #7
0
File: demo.py Project: leeshd/pvnet
    # camera_matrix = np.array([[572.4114, 0., 325.2611],
    #                           [0., 573.57043, 242.04899],
    #                           [0., 0., 1.]])
    # pose_pred = pnp(points_3d, corner_pred, camera_matrix)

    # projector = Projector()
    # bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')
    # bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(), 'linemod')
    # image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    # visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...], bb8_2d_gt[None, None, ...])
=======
    seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net(image, mask, vertex, vertex_weights)
    raise TypeError
    eval_net = DataParallel(EvalWrapper().cuda())
    corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]
    camera_matrix = np.array([[572.4114, 0., 325.2611],
                              [0., 573.57043, 242.04899],
                              [0., 0., 1.]])
    pose_pred = pnp(points_3d, corner_pred, camera_matrix)

    projector = Projector()
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'linemod')
    bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(), 'linemod')
    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...], bb8_2d_gt[None, None, ...])
>>>>>>> 2c722555563b8a77e36b246d82747754cf8dfae7


if __name__ == "__main__":
    demo()
Example #8
0
def demo(idx):
    net = Resnet18_8s(ver_dim=vote_num * 2, seg_dim=2)
    net = NetWrapper(net).cuda()
    net = DataParallel(net)

    optimizer = optim.Adam(net.parameters(), lr=train_cfg['lr'])
    model_dir = os.path.join(cfg.MODEL_DIR, "intake_demo")
    load_model(net.module.net, optimizer, model_dir, -1)
    data, points_3d, bb8_3d = read_data(idx)
    #print("BB8_3D: ",bb8_3d)
    image, mask, vertex, vertex_weights, pose, corner_target = [
        d.unsqueeze(0).cuda() for d in data
    ]
    seg_pred, vertex_pred, loss_seg, loss_vertex, precision, recall = net(
        image, mask, vertex, vertex_weights)
    seg_mask = torch.argmax(seg_pred, 1)

    visualize_mask(seg_mask)
    visualize_mask(mask)
    #visualize_vertex(vertex, vertex_weights)
    #visualize_hypothesis(image, seg_pred, vertex_pred, corner_target)
    #visualize_voting_ellipse(image, seg_pred, vertex_pred, corner_target)
    #############
    eval_net = DataParallel(EvalWrapper().cuda())
    uncertain_eval_net = DataParallel(UncertaintyEvalWrapper().cuda())
    corner_pred = eval_net(seg_pred, vertex_pred).cpu().detach().numpy()[0]
    net.eval()
    loss_seg, loss_vertex, precision, recall = [
        torch.mean(val) for val in (loss_seg, loss_vertex, precision, recall)
    ]
    print("LOSS SEG :", loss_seg, "\nLOSS VERTEX : ", loss_vertex,
          "\nPRECISION :", precision, '\nRECALL :', recall)

    ###############
    #print("Corner Predictions: ",corner_pred)
    camera_matrix = np.array([[700, 0., 320.], [0., 700, 240.], [0., 0., 1.]])
    pose_pred = pnp(points_3d, corner_pred, camera_matrix)
    projector = Projector()
    print("Pose prediction :\n", pose_pred)
    pose_gt = pose[0].detach().cpu().numpy()
    print("GT Pose :\n", pose[0].detach().cpu().numpy())
    s = 0
    import math as m
    for i in range(3):
        if pose_pred[2][3] < 0:
            print('NB!')
        s += (pose_pred[i][3] - pose_gt[i][3])**2
    s = m.sqrt(s)
    print("--->",
          loss_seg.detach().cpu().numpy(),
          loss_vertex.detach().cpu().numpy(),
          precision.detach().cpu().numpy(),
          recall.detach().cpu().numpy(), s)
    bb8_2d_pred = projector.project(bb8_3d, pose_pred, 'blender')
    bb8_2d_gt = projector.project(bb8_3d, pose[0].detach().cpu().numpy(),
                                  'blender')
    #print(bb8_2d_gt)

    image = imagenet_to_uint8(image.detach().cpu().numpy())[0]
    visualize_bounding_box(image[None, ...], bb8_2d_pred[None, None, ...],
                           bb8_2d_gt[None, None, ...])