Пример #1
0
 def test_axis_angle_from_matrix2(self, q):
     m = quat2mat(q)
     axis_reference, angle_reference = mat2axangle(m)
     assume(angle_reference < np.pi - 0.001)
     assume(angle_reference > -np.pi + 0.001)
     axis, angle = spw.diffable_axis_angle_from_matrix_stable(m)
     self.assertGreaterEqual(angle, -1.e-10)
     my_m = spw.to_numpy(angle_axis2mat(angle, axis))
     angle_diff = mat2axangle(m.T.dot(my_m))[1]
     self.assertAlmostEqual(angle_diff, 0.0, places=6)
Пример #2
0
def parse_rots_by_len(DATA_ROOT, length):

    all_quats = np.zeros(shape=(length, 4), dtype=np.float32)
    all_rots = np.zeros(shape=(length, 3, 3), dtype=np.float32)
    all_axang = np.zeros(shape=(length, 3))
    print(all_rots.shape)
    for i in range(length):
        #file_name ='./driller/data/rot' + str(i) + '.rot'
        #file_name = '\\driller\\data\\rot' + str(i) + '.rot'
        p0 = os.path.abspath(DATA_ROOT)
        file_name = os.path.join(p0, 'driller', 'driller', 'data',
                                 'rot' + str(i) + '.rot')
        # print(file_name)

        p = Path(file_name)

        with p.open() as f:
            j = 0
            for line in f:
                line = line.split()

                if len(line) == 2:
                    continue
                else:
                    all_rots[i, j, :] = line
                    j = j + 1

        all_quats[i, :] = mat2quat(all_rots[i, :, :])
        direc, angle = mat2axangle(all_rots[i, :, :])
        all_axang[i, :] = direc * angle

    return all_rots, all_quats, all_axang
Пример #3
0
def test_mat2axangle_thresh():
    # Test precision threshold to mat2axangle
    axis, angle = mat2axangle(np.eye(3))
    assert_almost_equal(axis, [0, 0, 1])
    assert_almost_equal(angle, 0)
    offset = 1e-6
    mat = np.diag([1 + offset] * 3)
    axis, angle = mat2axangle(mat)
    assert_almost_equal(axis, [0, 0, 1])
    assert_almost_equal(angle, 0)
    offset = 1e-4
    mat = np.diag([1 + offset] * 3)
    assert_raises(ValueError, mat2axangle, mat)
    axis, angle = mat2axangle(mat, 1e-4)
    assert_almost_equal(axis, [0, 0, 1])
    assert_almost_equal(angle, 0)
Пример #4
0
def test_angle_axis_imps():
    for x, y, z in euler_tuples:
        M = ttb.euler2mat(z, y, x)
        q = tq.mat2quat(M)
        vec, theta = tq.quat2axangle(q)
        M1 = axangle2mat(vec, theta)
        M2 = axangle2aff(vec, theta)[:3,:3]
        assert_array_almost_equal(M1, M2)
        v1, t1 = mat2axangle(M1)
        M3 = axangle2mat(v1, t1)
        assert_array_almost_equal(M, M3)
        A = np.eye(4)
        A[:3,:3] = M
        v2, t2, point = aff2axangle(A)
        M4 = axangle2mat(v2, t2)
        assert_array_almost_equal(M, M4)
Пример #5
0
    est_pose = np.random.rand(3, 4)
    est_pose[:, :3] = est_rot
    gt_pose = np.random.rand(3, 4)
    gt_pose[:, :3] = gt_rot
    rd_ori = re(est_rot, gt_rot)

    t = time.perf_counter()
    for i in range(3000):
        closest_rot = get_closest_rot(est_rot, gt_rot, sym_info)
    print(("calculate closest rot {}s".format((time.perf_counter() - t) / 3000)))
    closest_pose = np.copy(gt_pose)
    closest_pose[:, :3] = closest_rot

    rd_closest = re(est_rot, closest_pose[:, :3])
    print(("rot_est: {}, rot_gt: {}, closest rot_gt: {}".format(
        mat2axangle(est_rot), mat2axangle(gt_rot), mat2axangle(closest_rot))))
    print(("original rot dist: {}, closest rot dist: {}".format(rd_ori, rd_closest)))

    est_img, _ = renderer.render(obj_id, est_rot, trans)
    gt_img, _ = renderer.render(obj_id, gt_rot, trans)
    closest_img, _ = renderer.render(obj_id, closest_rot, trans)
    show_imgs = [est_img[:, :, [2, 1, 0]], gt_img[:, :, [2, 1, 0]], closest_img[:, :, [2, 1, 0]]]
    show_titles = ["est", "gt_ori", "gt_closest"]
    grid_show(show_imgs, show_titles, row=1, col=3)

    # import cv2
    # while(1):
    #     est_img = render(renderer, est_rot, trans)
    #     cv2.imshow('test', cv2.cvtColor(est_img, cv2.COLOR_RGB2BGR))
    #     q = cv2.waitKey(16)
    #     if q == ord('w'):
Пример #6
0
def main():
	global n_iter
	args = parser.parse_args()

	# Data loading code
	normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
	                                        std=[0.5, 0.5, 0.5])
	train_transform = custom_transforms.Compose([
		# custom_transforms.RandomScaleCrop(),
		custom_transforms.ArrayToTensor(),
		normalize
	])

	print("=> fetching scenes in '{}'".format(args.data))
	train_set = SequenceFolder(
		args.data,
		transform=train_transform,
		seed=args.seed,
		ttype=args.ttype,
		add_geo=args.geo,
		depth_source=args.depth_init,
		sequence_length=args.sequence_length,
		gt_source='g',
		std=args.std_tr,
		pose_init=args.pose_init,
		dataset="",
		get_path=True
	)

	print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
	val_loader = torch.utils.data.DataLoader(
		train_set, batch_size=args.batch_size, shuffle=False,
		num_workers=args.workers, pin_memory=True)

	# create model
	print("=> creating model")
	pose_net = PoseNet(args.nlabel, args.std_tr, args.std_rot, add_geo_cost=args.geo, depth_augment=False).cuda()

	if args.pretrained_dps:
		# freeze feature extra layers
		# for param in pose_net.feature_extraction.parameters():
		#     param.requires_grad = False

		print("=> using pre-trained weights for DPSNet")
		model_dict = pose_net.state_dict()
		weights = torch.load(args.pretrained_dps)['state_dict']
		pretrained_dict = {k: v for k, v in weights.items() if
		                   k in model_dict and weights[k].shape == model_dict[k].shape}

		model_dict.update(pretrained_dict)

		pose_net.load_state_dict(model_dict)

	else:
		pose_net.init_weights()

	cudnn.benchmark = True
	pose_net = torch.nn.DataParallel(pose_net)

	global n_iter
	data_time = AverageMeter()

	pose_net.eval()
	end = time.time()

	errors = np.zeros((2, 2, int(np.ceil(len(val_loader)))), np.float32)
	with torch.no_grad():
		for i, (tgt_img, ref_imgs, ref_poses, intrinsics, intrinsics_inv, tgt_depth, ref_depths,
		        ref_noise_poses, initial_pose, tgt_path, ref_paths) in enumerate(val_loader):

			data_time.update(time.time() - end)
			tgt_img_var = Variable(tgt_img.cuda())
			ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
			ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
			ref_noise_poses_var = [Variable(pose.cuda()) for pose in ref_noise_poses]
			initial_pose_var = Variable(initial_pose.cuda())

			ref_depths_var = [Variable(dep.cuda()) for dep in ref_depths]
			intrinsics_var = Variable(intrinsics.cuda())
			intrinsics_inv_var = Variable(intrinsics_inv.cuda())
			tgt_depth_var = Variable(tgt_depth.cuda())

			pose = torch.cat(ref_poses_var, 1)

			noise_pose = torch.cat(ref_noise_poses_var, 1)

			pose_norm = torch.norm(noise_pose[:, :, :3, 3], dim=-1, keepdim=True)  # b * n* 1

			p_angle, p_trans, rot_c, trans_c = pose_net(tgt_img_var, ref_imgs_var, initial_pose_var, noise_pose,
			                                            intrinsics_var,
			                                            intrinsics_inv_var,
			                                            tgt_depth_var,
			                                            ref_depths_var, trans_norm=pose_norm)

			batch_size = p_angle.shape[0]
			p_angle_v = torch.sum(F.softmax(p_angle, dim=1).view(batch_size, -1, 1) * rot_c, dim=1)
			p_trans_v = torch.sum(F.softmax(p_trans, dim=1).view(batch_size, -1, 1) * trans_c, dim=1)
			p_matrix = Variable(torch.zeros((batch_size, 4, 4)).float()).cuda()
			p_matrix[:, 3, 3] = 1
			p_matrix[:, :3, :] = torch.cat([angle2matrix(p_angle_v), p_trans_v.unsqueeze(-1)], dim=-1)  # 2*3*4

			p_rel_pose = torch.ones_like(noise_pose)
			for bat in range(batch_size):
				path = tgt_path[bat]
				dirname = Path.dirname(path)

				orig_poses = np.genfromtxt(Path.join(dirname, args.pose_init + "_poses.txt"))
				for j in range(len(ref_imgs)):
					p_rel_pose[:, j] = torch.matmul(noise_pose[:, j], inv(p_matrix))

					seq_num = int(Path.basename(ref_paths[bat][j])[:-4])
					orig_poses[seq_num] = p_rel_pose[bat, j, :3, :].data.cpu().numpy().reshape(12, )

					p_aa = mat2axangle(p_rel_pose[bat, j, :3, :3].data.cpu().numpy())
					gt_aa = mat2axangle(pose[bat, j, :3, :3].data.cpu().numpy(), unit_thresh=1e-2)

					n_aa = mat2axangle(noise_pose[bat, j, :3, :3].data.cpu().numpy(), unit_thresh=1e-2)
					p_t = p_rel_pose[bat, j, :3, 3].data.cpu().numpy()
					gt_t = pose[bat, j, :3, 3].data.cpu().numpy()
					n_t = noise_pose[bat, j, :3, 3].data.cpu().numpy()
					p_aa = p_aa[0] * p_aa[1]
					n_aa = n_aa[0] * n_aa[1]
					gt_aa = gt_aa[0] * gt_aa[1]
					error = compute_motion_errors(np.concatenate([n_aa, n_t]), np.concatenate([gt_aa, gt_t]), True)
					error_p = compute_motion_errors(np.concatenate([p_aa, p_t]), np.concatenate([gt_aa, gt_t]), True)
					print("%d n r%.6f, t%.6f" % (i, error[0], error[2]))
					print("%d p r%.6f, t%.6f" % (i, error_p[0], error_p[2]))
					errors[0, 0, i] += error[0]
					errors[0, 1, i] += error[2]
					errors[1, 0, i] += error_p[0]
					errors[1, 1, i] += error_p[2]
				errors[:, :, i] /= len(ref_imgs)
				if args.save and not Path.exists(Path.join(dirname, args.save + "_poses.txt")):
					np.savetxt(Path.join(dirname, args.save + "_poses.txt"), orig_poses)

		mean_error = errors.mean(2)
		error_names = ['rot', 'trans']
		print("%s Results : " % args.pose_init)
		print(
			"{:>10}, {:>10}".format(
				*error_names))
		print("{:10.4f}, {:10.4f}".format(*mean_error[0]))

		print("new Results : ")
		print(
			"{:>10}, {:>10}".format(
				*error_names))
		print("{:10.4f}, {:10.4f}".format(*mean_error[1]))
Пример #7
0
def SO3log(R):
    # print R
    ax, a = mat2axangle(R)
    return ax * a
Пример #8
0
def rotationToExpCoords(R):
    axis, angle = axangles.mat2axangle(R)
    expCoords = axis * angle
    return expCoords
Пример #9
0
def calc_rotation_error_rad(r_mat_1: np.ndarray, r_mat_2: np.ndarray) -> float:
    r_mat_diff = r_mat_2 @ r_mat_1.T
    _, angle = mat2axangle(r_mat_diff)
    return np.abs(angle)
Пример #10
0
def mat2rvec(mat):
    """Convert rotation matrix to rotation vector."""
    axis, angle = mat2axangle(mat, unit_thresh=1e-05)
    return axis * angle
def compute_calibration_parameters(setup):
    calibration_data_robot = np.loadtxt(open("applications/calibration/robotTableCalibrationData" + setup + ".csv", "rb"),
                                        delimiter=",", skiprows=1)
    points_on_table_robot = np.loadtxt(open("applications/calibration/pointsOnTableRobot" + setup + "_Robotiq.csv", "rb"), delimiter=",",
                                       skiprows=0)

    posRobot = np.empty((3, calibration_data_robot.shape[0]))

    # print posRobot

    # Coordinate transformation of calibration tool
    # (here assumed to only be translation)
    # T_TCP_Tool = [0, 0, 0.012]
    if setup == "A":
        #rot_tool = TransformableFrame([0, 0, 0, 0.00310609, -0.00032931, 0.00941169])
        #rot_tool = TransformableFrame([0, 0, 0, 0.0033114369*2, -0.000386604156*2, 0.0093046936*2]) * TransformableFrame([0, 0, 0, -0.0075, 0, 0])
        rot_tool = TransformableFrame([0, 0, 0, -0.00245736, -0.0006192, 0.01119357])
        T_TCP_Tool = [0, 0, 0.197]  # For Robot A
        T_TCP_Tool = [0, 0, 0.215]  # For Robot A
        T_TCP_Tool = [0, 0, 0.1801]  # For Robot A
        T_TCP_Tool = rot_tool.inv() * TransformableFrame([0, 0, 0.1796, 0, 0, 0])
    elif setup == "B":
        #T_TCP_Tool = TransformableFrame([0.1505, -0.0515, 0.0285, 0, np.pi/2, 0]) # For Robot B Screwdriver
        #T_TCP_Tool = TransformableFrame([0, 0, 0.26282, 0, 0, 3.14])  # For Robot B Robotiq setup A
        T_TCP_Tool = TransformableFrame([-0.00022, 0.00063, 0.26128, 0, 0, 3.14])  # For Robot B Robotiq setup B


    #print(T_TCP_Tool)
    #print(rot_tool * T_TCP_Tool)
    #print("T_TCP_Tool Weiss: ", rot_tool.inv() * TransformableFrame([0, 0, 0.197, 0, 0, 0]))

    # start = height of data - 1
    # end = -1, because it stops when this is reached
    # step = -1 to count down
    print("for")
    for i in range(calibration_data_robot.shape[0] - 1, -1, -1):
        # print i, calibration_data_robot[i]
        # 0.315091558821 mm and a standard deviation of 0.0771040428194 mm
        posRobot[:, i] = (TransformableFrame(calibration_data_robot[i]) * T_TCP_Tool)[0:3]

    T_World_Robot = findTransformation(posRobot, np.transpose(points_on_table_robot[:, 0:3]))

    # print T_World_Robot

    # Verify transformation
    pWorld = np.transpose(
        np.linalg.multi_dot([
            np.eye(3, 4),
            T_World_Robot,
            np.concatenate((posRobot, np.ones((1, calibration_data_robot.shape[0]))), axis=0)
        ])
    )

    eEst = np.zeros(pWorld.shape[0])
    eRotEst = np.zeros(pWorld.shape[0])
    avgRot = np.zeros(3)

    for i in range(calibration_data_robot.shape[0] - 1, -1, -1):
        out = axangles.mat2axangle(T_World_Robot[0:3, 0:3])  # rotm2axang(T_World_Robot[0:3, 0:3])
        T_World_Robot_pose = np.append(T_World_Robot[0:3, 3], out[0] * out[1]).tolist()
        T_World_TCP_ref = TransformableFrame(points_on_table_robot[i].tolist())
        T_World_TCP_act = TransformableFrame(T_World_Robot_pose) * TransformableFrame(calibration_data_robot[i].tolist()) * T_TCP_Tool
        T_ref_act = T_World_TCP_ref.inv()*T_World_TCP_act
        eEst[i] = np.linalg.norm(T_ref_act[0:3])
        print(T_ref_act[0:3])
        eRotEst[i] = np.linalg.norm(T_ref_act[3:6])
        avgRot += T_ref_act[3:6]

    avgRot /= calibration_data_robot.shape[0]

    #for i in range(pWorld.shape[0] - 1, -1, -1):
    #    eEst[i] = np.linalg.norm(pWorld[i, :] - points_on_table_robot[i, 0:3])

    print("The calibration of the World to Robot has a mean error of", np.mean(eEst) * 1e3, \
          "mm and a standard deviation of", np.std(eEst) * 1e3, "mm")
    print("The calibration of the World to Robot has a rotational mean error of", np.mean(eRotEst), \
          "rad and a standard deviation of", np.std(eRotEst), "rad")
    print("Average rotation: ", avgRot)

    # Convert Transformation to axis angle coordinates
    out = axangles.mat2axangle(T_World_Robot[0:3, 0:3])  # rotm2axang(T_World_Robot[0:3, 0:3])
    # out = np.concatenate(out[0], out[1])
    out4 = np.append(out[0], out[1])

    print("\nT_World_Robot")
    print(T_World_Robot)

    print("\nT_World_Robot in axis angle coordinates")
    print(out4)

    print("\nT_World_Robot position")
    print(T_World_Robot[0:3, 3])

    print("")
    print("T_World_Robot pose")
    print(np.append(T_World_Robot[0:3, 3], out[0] * out[1]).tolist())

    return out