Exemplo n.º 1
0
def sample_rgb_plane():
	fx = 529.29
	fy = 531.28
	px = 466.96
	py = 273.26
	I = np.array([fx, 0 , px, 0, fy, py, 0, 0, 1]).reshape(3,3)
	x_r = 0.970358818444
	y_r = 0.105224751742
	z_r = 0.145592452085
	w_r = 0.161661229068
	M = tf.quaternion_matrix([w_r,x_r,y_r,z_r]) 
	x_t = 0.290623142918
	y_t = -0.0266687975686
	z_t = 1.20030737138
	M[0, 3] = x_t
	M[1, 3] = y_t
	M[2, 3] = z_t
	M_d = np.delete(M, 3, 0)
	# print "Extrinsics"
	# print M # pose extrinsics
	origin = np.array([0,0,0,1])
	np.transpose(origin)
	C = np.dot(I, M_d)
	coord = np.dot(C, origin)
	x_coord = coord[0] / coord[2]
	y_coord = coord[1] / coord[2]
	x_samples = np.linspace(-0.024, 0.024, num = 10)
	y_samples = np.linspace(-0.024, 0.024, num = 10)
	sample_points = []
	for i in x_samples:
		for j in y_samples:
			sample_points.append([i,j,0,1])
	sample_points = np.transpose(np.array(sample_points))
	sample_points_viz = np.dot(C, sample_points)
	sample_rgb = np.transpose(np.dot(M_d, sample_points))
	cov = np.asarray([0.9] * sample_rgb.shape[0])
	rgb_plane_est = bayesplane.fit_plane_bayes(sample_rgb, cov)
	return rgb_plane_est, sample_rgb
def run_experiment(data_index):
    print("----------- Begin Exp:" + str(data_index))
    rgb_image_path = ("../data/iros_data2/rgb_frame%04d.png" % (data_index, ))
    depth_image_path = ("../data/iros_data2/depth_frame%04d.png" %
                        (data_index, ))
    info_file_path = ("../data/iros_data2/apriltag_info_%04d.txt" %
                      (data_index, ))
    rgb_image = cv2.imread(rgb_image_path)
    depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)
    info_file = info_file_path

    # Getting Apriltag groundtruth information from the chessboard
    info_array = info_parser(info_file)
    corner1 = [float(info_array[1]), float(info_array[2])]
    corner2 = [float(info_array[4]), float(info_array[5])]
    corner3 = [float(info_array[7]), float(info_array[8])]
    corner4 = [float(info_array[10]), float(info_array[11])]

    px = float(info_array[14])
    py = float(info_array[15])
    pz = float(info_array[16])
    rx = float(info_array[17])
    ry = float(info_array[18])
    rz = float(info_array[19])
    rw = float(info_array[20])

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    objp = np.zeros((3 * 4, 3), np.float32)
    objp[:, :2] = np.mgrid[0:4, 0:3].T.reshape(-1, 2)
    objp = objp * square_size
    axis = np.float32([[0.08, 0, 0], [0, 0.08, 0], [0, 0,
                                                    -0.08]]).reshape(-1, 3)
    objpoints = []
    imgpoints = []

    gray = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray, (4, 3), None)
    print ret
    if ret == True:
        corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                    criteria)
        rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)
        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
        rgb_image = draw(rgb_image, corners, imgpts)
        board_h = cv2hom(rvecs, tvecs)  # The pose of the board

        aptag_h = np.eye(4)
        aptag_offset = np.array([0.265, -0.003,
                                 0]).reshape(3, 1)  # Board measurements
        aptag_h[0:3][:, 3] = aptag_offset.reshape(3)
        aptag_h[0:3][:, 0:3] = np.array([1, 0, 0, 0, -1, 0, 0, 0,
                                         -1]).reshape(3,
                                                      3)  # Orientation offset
        groundtruth_h = np.dot(
            board_h, aptag_h)  # The pose of the tag measured from the board
        print "Groundtruth H:"
        print groundtruth_h
        gt_rvec, _ = cv2.Rodrigues(groundtruth_h[0:3][:, 0:3])
        gt_tvec = groundtruth_h[0:3][:, 3].reshape(3, 1)

        exp_rmat = tf.quaternion_matrix([rw, rx, ry, rz])
        exp_h = exp_rmat
        exp_rvec, _ = cv2.Rodrigues(exp_h[0:3][:, 0:3])
        exp_tvec = np.array([px, py, pz]).reshape(3)
        exp_h[
            0:3][:,
                 3] = exp_tvec  #The pose of the tag measured from the Rosnode
        print "Experimental H:"
        print exp_h

        angle_diff = quatAngleDiff(groundtruth_h[0:3][:, 0:3], exp_h[0:3][:,
                                                                          0:3])
        print angle_diff

        origin_point = np.float32([[0, 0, 0]])
        origin_pt, _ = cv2.projectPoints(origin_point, exp_rvec, exp_tvec, mtx,
                                         dist)
        origin_pt2, _ = cv2.projectPoints(origin_point, gt_rvec, gt_tvec, mtx,
                                          dist)
        print origin_pt
        print origin_pt2
        x = origin_pt[0][0][0]
        y = origin_pt[0][0][1]
        x2 = origin_pt2[0][0][0]
        y2 = origin_pt2[0][0][1]
        cv2.circle(rgb_image, (x, y), 6, (0, 0, 255), -1)
        cv2.circle(rgb_image, (x, y), 2, (0, 255, 0), -1)
        cv2.namedWindow('img', 1)
        imgpts2, _ = cv2.projectPoints(axis, exp_rvec, exp_tvec, mtx, dist)
        # rgb_image = draw_origin(rgb_image, origin_pt, imgpts2)
        # cv2.imshow('img', rgb_image)
        # cv2.waitKey(0)
    else:
        print("Cannot localize using Chessboard")
        return False
    #### Experiment
    print corner1
    im_pt1 = corner1
    im_pt2 = corner2
    im_pt3 = corner3
    im_pt4 = corner4

    im_pts = im_pt1 + im_pt2 + im_pt3 + im_pt4
    image_pts = np.array(im_pts).reshape(4, 2)
    ob_pt1 = [-tag_radius, -tag_radius, 0.0]
    ob_pt2 = [tag_radius, -tag_radius, 0.0]
    ob_pt3 = [tag_radius, tag_radius, 0.0]
    ob_pt4 = [-tag_radius, tag_radius, 0.0]
    ob_pts = ob_pt1 + ob_pt2 + ob_pt3 + ob_pt4
    object_pts = np.array(ob_pts).reshape(4, 3)

    retval, cv2rvec, cv2tvec = cv2.solvePnP(object_pts,
                                            image_pts,
                                            mtx,
                                            dist,
                                            flags=cv2.ITERATIVE)

    ################# Baseline ###########################
    print "----------- Basic Test ----------------"
    print("Baseline rvec:")
    print cv2rvec
    print("Baseline tvec:")
    print cv2tvec

    nrvec, ntvec = fuse.solvePnP_RGBD(rgb_image, depth_image, object_pts,
                                      image_pts, mtx, dist, 0)
    nrvec2, ntvec2 = fuse.solvePnP_D(rgb_image, depth_image, object_pts,
                                     image_pts, mtx, dist, 0)

    print("Tvec RGBD:")
    print ntvec

    print("Tvec D:")
    print ntvec2

    print("GroundTruth:")
    print exp_tvec
    test_tvec = exp_tvec.reshape(3, 1)

    rot_difference1 = lm.quatAngleDiff(nrvec, gt_rvec)
    print("RGBD Rotational Difference:")
    print rot_difference1

    trans_difference1 = np.linalg.norm(ntvec - test_tvec)
    print("RGBD Translation Difference:")
    print trans_difference1

    rot_difference2 = lm.quatAngleDiff(nrvec2, gt_rvec)
    print("Depth Rotational Difference:")
    print rot_difference2

    trans_difference2 = np.linalg.norm(ntvec2 - test_tvec)
    print("Depth Translation Difference:")
    print trans_difference2

    return rot_difference1, rot_difference2
def main(args):
    # Declare Test Variables
    # Camera Intrinsics
    fx = 529.29
    fy = 531.28
    px = 466.96
    py = 273.26
    I = np.array([fx, 0, px, 0, fy, py, 0, 0, 1]).reshape(3, 3)

    x_start = 459
    x_end = 490
    y_start = 288
    y_end = 314
    rgb_image = cv2.imread("../data/rawData/rgb.jpg")
    depth_image = cv2.imread("../data/rawData/depth.jpg", cv2.IMREAD_ANYDEPTH)
    april_tag_rgb = rgb_image[y_start:y_end, x_start:x_end]
    april_tag_depth = depth_image[y_start:y_end, x_start:x_end]
    # cv2.imshow('april_tag', april_tag_rgb)
    # cv2.waitKey(0)
    all_pts = []
    print april_tag_depth
    for i in range(x_start, x_end):
        for j in range(y_start, y_end):
            depth = depth_image[i, j] / 1000.0
            if (depth != 0):
                x = (i - px) * depth / fx
                y = (i - py) * depth / fy
                all_pts.append([x, y, depth])
    sample_cov = 0.05
    samples = np.array(all_pts)
    print "samples Depth"
    print samples
    cov = np.asarray([sample_cov] * samples.shape[0])
    depth_plane_est = bayesplane.fit_plane_bayes(samples, cov)

    # For now hard code the test data x y values
    # Generate homogenous matrix for pose
    x_r = 0.970411
    y_r = 0.013975
    z_r = -0.0396695
    w_r = -0.23777
    M = tf.quaternion_matrix([w_r, x_r, y_r, z_r])
    x_t = 0.0122108
    y_t = 0.045555
    z_t = 0.831281
    M[0, 3] = x_t
    M[1, 3] = y_t
    M[2, 3] = z_t
    M = np.delete(M, 3, 0)
    print "M"
    print M  # pose extrinsics
    origin = np.array([0, 0, 0, 1])
    np.transpose(origin)
    C = np.dot(I, M)
    coord = np.dot(C, origin)
    x_coord = coord[0] / coord[2]
    y_coord = coord[1] / coord[2]
    cv2.circle(rgb_image, (int(x_coord), int(y_coord)), 3, (255, 0, 0))
    cv2.imshow('april_tag', rgb_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    x_samples = np.linspace(-0.02, 0.02, num=10)
    y_samples = np.linspace(-0.02, 0.02, num=10)
    sample_points = []
    for i in x_samples:
        for j in y_samples:
            sample_points.append([i, j, 0, 1])
    sample_points = np.transpose(np.array(sample_points))
    sample_points_viz = np.dot(C, sample_points)
    sample_points = np.transpose(np.dot(M, sample_points))
    for i in range(0, 50):
        x_coord = sample_points_viz[0, i] / sample_points_viz[2, i]
        y_coord = sample_points_viz[1, i] / sample_points_viz[2, i]
        cv2.circle(rgb_image, (int(x_coord), int(y_coord)), 3, (255, 0, 0))
    cv2.imshow('april_tag', rgb_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    print "Sample_points RGB"
    print sample_points[1:3, :]
    rgb_plane_est = bayesplane.fit_plane_bayes(sample_points, cov)
Exemplo n.º 4
0
def run_experiment(data_index):
    print("----------- Begin Exp:" + str(data_index))
    rgb_image_path = ("../data/iros_data4/rgb_frame%04d.png" % (data_index, ))
    depth_image_path = ("../data/iros_data4/depth_frame%04d.png" %
                        (data_index, ))
    info_file_path = ("../data/iros_data4/apriltag_info_%04d.txt" %
                      (data_index, ))
    rgb_image = cv2.imread(rgb_image_path)
    depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)
    info_file = info_file_path

    # Getting Apriltag groundtruth information from the chessboard
    info_array = info_parser(info_file)
    corner1 = [float(info_array[1]), float(info_array[2])]
    corner2 = [float(info_array[4]), float(info_array[5])]
    corner3 = [float(info_array[7]), float(info_array[8])]
    corner4 = [float(info_array[10]), float(info_array[11])]

    px = float(info_array[14])
    py = float(info_array[15])
    pz = float(info_array[16])
    rx = float(info_array[17])
    ry = float(info_array[18])
    rz = float(info_array[19])
    rw = float(info_array[20])

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    objp = np.zeros((3 * 4, 3), np.float32)
    objp[:, :2] = np.mgrid[0:4, 0:3].T.reshape(-1, 2)
    objp = objp * square_size
    axis = np.float32([[0.08, 0, 0], [0, 0.08, 0], [0, 0,
                                                    -0.08]]).reshape(-1, 3)
    objpoints = []
    imgpoints = []

    gray = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
    ret, corners = cv2.findChessboardCorners(gray, (4, 3), None)
    print ret
    if ret == True:
        corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                    criteria)
        rvecs, tvecs, inliers = cv2.solvePnPRansac(objp, corners, mtx, dist)
        imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist)
        rgb_image = draw(rgb_image, corners, imgpts)
        board_h = cv2hom(rvecs, tvecs)  # The pose of the board

        aptag_h = np.eye(4)
        aptag_offset = np.array([0.265, -0.003,
                                 0]).reshape(3, 1)  # Board measurements
        aptag_h[0:3][:, 3] = aptag_offset.reshape(3)
        aptag_h[0:3][:, 0:3] = np.array([1, 0, 0, 0, -1, 0, 0, 0,
                                         -1]).reshape(3,
                                                      3)  # Orientation offset
        groundtruth_h = np.dot(
            board_h, aptag_h)  # The pose of the tag measured from the board
        print "Groundtruth H:"
        print groundtruth_h
        gt_rvec, _ = cv2.Rodrigues(groundtruth_h[0:3][:, 0:3])
        gt_tvec = groundtruth_h[0:3][:, 3].reshape(3, 1)

        exp_rmat = tf.quaternion_matrix([rw, rx, ry, rz])
        exp_h = exp_rmat
        exp_rvec, _ = cv2.Rodrigues(exp_h[0:3][:, 0:3])
        exp_tvec = np.array([px, py, pz]).reshape(3)
        exp_h[
            0:3][:,
                 3] = exp_tvec  #The pose of the tag measured from the Rosnode
        print "Experimental H:"
        print exp_h

        angle_diff = quatAngleDiff(groundtruth_h[0:3][:, 0:3], exp_h[0:3][:,
                                                                          0:3])
        print angle_diff

        origin_point = np.float32([[0, 0, 0]])
        origin_pt, _ = cv2.projectPoints(origin_point, gt_rvec, gt_tvec, mtx,
                                         dist)
        x = origin_pt[0][0][0]
        y = origin_pt[0][0][1]
        cv2.circle(rgb_image, (x, y), 2, (0, 0, 255), -1)

        # cv2.namedWindow('img', 1)
        # imgpts2, _ = cv2.projectPoints(axis, exp_rvec, exp_tvec, mtx, dist)
        # rgb_image = draw_origin(rgb_image, origin_pt, imgpts2)
        # cv2.imshow('img', rgb_image)
        # cv2.waitKey(0)
    else:
        print("Cannot localize using Chessboard")
        return False
    #### Experiment
    print corner1
    im_pt1 = corner1
    im_pt2 = corner2
    im_pt3 = corner3
    im_pt4 = corner4

    im_pts = im_pt1 + im_pt2 + im_pt3 + im_pt4
    image_pts = np.array(im_pts).reshape(4, 2)
    ob_pt1 = [-tag_radius, -tag_radius, 0.0]
    ob_pt2 = [tag_radius, -tag_radius, 0.0]
    ob_pt3 = [tag_radius, tag_radius, 0.0]
    ob_pt4 = [-tag_radius, tag_radius, 0.0]
    ob_pts = ob_pt1 + ob_pt2 + ob_pt3 + ob_pt4
    object_pts = np.array(ob_pts).reshape(4, 3)

    retval, cv2rvec, cv2tvec = cv2.solvePnP(object_pts,
                                            image_pts,
                                            mtx,
                                            dist,
                                            flags=cv2.ITERATIVE)

    ################# Baseline ###########################
    print "----------- Basic Test ----------------"
    print("Baseline rvec:")
    print cv2rvec
    print("Baseline tvec:")
    print cv2tvec

    nrvec, ntvec = fuse.solvePnP_RGBD(rgb_image, depth_image, object_pts,
                                      image_pts, mtx, dist, 0)
    print("Test rvec:")
    print nrvec
    print("Test tvec:")
    print ntvec

    rot_difference = lm.quatAngleDiff(cv2rvec, gt_rvec)
    print("Baseline Rotational Difference:")
    print rot_difference

    trans_difference = np.linalg.norm(cv2tvec - gt_tvec)
    print("Baseline Translation Difference:")
    print trans_difference

    rot_difference = lm.quatAngleDiff(nrvec, gt_rvec)
    print("Baseline Rotational Difference:")
    print rot_difference

    trans_difference = np.linalg.norm(ntvec - gt_tvec)
    print("Baseline Translation Difference:")
    print trans_difference

    ################ Experiment 1 ########################
    print "----------- 0.5 Noise ----------------"
    baseline_diff = []
    test_diff = []
    sample_size = 1000
    n = 4
    for k in range(sample_size):
        modified_img_pts = image_pts
        normal_noise = np.random.normal(0, 1, 8).reshape(4, 2)
        modified_img_pts = modified_img_pts + normal_noise
        retval, cv2rvec, cv2tvec = cv2.solvePnP(object_pts,
                                                modified_img_pts,
                                                mtx,
                                                dist,
                                                flags=cv2.ITERATIVE)
        baseline_rvec_difference = lm.quatAngleDiff(cv2rvec, gt_rvec)
        baseline_tvec_difference = np.linalg.norm(cv2tvec - gt_tvec)
        baseline_diff = baseline_diff + [[
            baseline_rvec_difference, baseline_tvec_difference
        ]]
        nrvec, ntvec = fuse.solvePnP_RGBD(rgb_image, depth_image, object_pts,
                                          modified_img_pts, mtx, dist, 0)
        test_rvec_difference = lm.quatAngleDiff(nrvec, gt_rvec)
        test_tvec_difference = np.linalg.norm(ntvec - gt_tvec)
        test_diff = test_diff + [[test_rvec_difference, test_tvec_difference]]

    bins_define = np.arange(0, 180, 1)
    baseline_diff = np.array(baseline_diff)
    baseline_rot = baseline_diff[:, 0]
    test_diff = np.array(test_diff)
    test_rot = test_diff[:, 0]
    counter = 0
    for current_rot in baseline_rot:
        if current_rot > 30:
            counter = counter + 1.0

    error = counter / len(baseline_rot)
    print "Error percentage:"
    print error
    # f, axarr = plt.subplots(2, sharex=True, sharey=True)
    # axarr[0].hist(baseline_rot, bins_define, normed = 1, facecolor='red', alpha=0.75)
    # plt.xlabel('Rotation Error')
    # plt.ylabel('Frequnecy')
    # axarr[0].axis([0, 180, 0, 0.15])
    # axarr[0].grid(True)

    # axarr[1].hist(test_rot, bins_define, normed = 1, facecolor='blue', alpha=0.75)
    # axarr[1].axis([0, 180, 0, 0.15])
    # axarr[1].grid(True)
    # savepath = ("../data/iros_results/result_%04d.png" % (data_index, ))
    # f.savefig(savepath)
    return error
	img = draw(img, corners, imgpts)
	board_h = cv2hom(rvecs, tvecs) # The pose of the board


	aptag_h = np.eye(4)
	aptag_offset = np.array([0.305, -0.006, 0]).reshape(3,1)  # Board measurements
	aptag_h[0:3][:, 3] = aptag_offset.reshape(3)
	aptag_h[0:3][:, 0:3] = np.array([1, 0, 0, 0, -1, 0,0,0,-1]).reshape(3,3) # Orientation offset
	groundtruth_h = np.dot(board_h, aptag_h) # The pose of the tag measured from the board
	print "groundtruth h"
	print groundtruth_h
	groundtruth_rvec, _ = cv2.Rodrigues(groundtruth_h[0:3][:,0:3])
	groundtruth_tvec = groundtruth_h[0:3][:,3].reshape(3,1)
	

	exp_rmat = tf.quaternion_matrix([0.116816084143, 0.991342961954, 0.00679171280228, -0.0595567536713])
	exp_h = exp_rmat
	exp_rvec, _ = cv2.Rodrigues(exp_h[0:3][:, 0:3])
	exp_tvec = np.array([0.0343233400823, -0.00234641109975, 0.918781027116]).reshape(3)
	exp_h[0:3][:, 3] = exp_tvec #The pose of the tag measured from the Rosnode
	print exp_h
	angle_diff = quatAngleDiff(groundtruth_h[0:3][:,0:3], exp_h[0:3][:, 0:3])
	print angle_diff

	origin_point = np.float32([[0,0,0]])
	origin_pt, _ = cv2.projectPoints(origin_point, groundtruth_rvec, groundtruth_tvec, mtx, dist)
	x = origin_pt[0][0][0]
	y = origin_pt[0][0][1]
	cv2.circle(img, (x,y), 5, (0,0,255), -1)

	imgpts2, _ = cv2.projectPoints(axis, exp_rvec, exp_tvec, mtx, dist)
Exemplo n.º 6
0
def main(args):
	# Declare Test Variables
	# Camera Intrinsics
	tag_size = 0.0480000004172
	tag_radius = tag_size / 2.0
	fx = 529.29
	fy = 531.28
	px = 466.96
	py = 273.26
	I = np.array([fx, 0 , px, 0, fy, py, 0, 0, 1]).reshape(3,3)
	D = np.zeros((5,1))
	im_pt1 = [584.5,268.5]
	im_pt2 = [603.5,274.5]
	im_pt3 = [604.5,254.5]
	im_pt4 = [585.5,249.5]    #586.5 bad 585.5 good
	im_pts = im_pt1 + im_pt2 + im_pt3 + im_pt4
	image_pts = np.array(im_pts).reshape(4,2)
	ob_pt1 = [-tag_radius, -tag_radius, 0.0]
	ob_pt2 = [ tag_radius, -tag_radius, 0.0]
	ob_pt3 = [ tag_radius,  tag_radius, 0.0]
	ob_pt4 = [-tag_radius,  tag_radius, 0.0]
	ob_pts = ob_pt1 + ob_pt2 + ob_pt3 + ob_pt4
	object_pts = np.array(ob_pts).reshape(4,3)
	# print_att("object_pts", object_pts)
	# print_att("image_pts", image_pts)
	retval, rvec, tvec = cv2.solvePnP(object_pts, image_pts, I, D, flags=cv2.ITERATIVE)
	print "cv2 rvec:"
	print rvec
	print "cv2 tvec:"
	print tvec
	cv2rvec = rvec
	rotM = cv2.Rodrigues(rvec)[0]
	camera_extrinsics = np.eye(4)
	camera_extrinsics[0:3, 0:3] = rotM
	camera_extrinsics[0:3, 3:4] = tvec
	cv2H = camera_extrinsics
	# print "Extrinsics using rgb corner: "
	# print camera_extrinsics

	## Init the plots
	fig = plt.figure()
	ax = fig.add_subplot(111, projection='3d')
	ax.set_xlabel('X Label')
	ax.set_ylabel('Y Label')
	ax.set_zlabel('Z Label')

	## plot depth plane and normals
	depth_plane_est, samples_depth = sample_depth_plane()
	depth_normal = depth_plane_est.mean.vectorize()[0:3]
	ax = plot_samples(samples_depth, ax, 'g')
	depth_center = samples_depth[75, :]
	depth_center = [depth_center[0], depth_center[1], depth_center[2]]
	start = [x + y for x, y in zip([0,0,0], depth_normal)] 
	end = [depth_normal[0], depth_normal[1], depth_normal[2]]
	depth_normal_vec = start+end
	# print_att("depth_normal_vec", depth_normal_vec)
	# depthplane = depth_plane_est.mean.plot(center=np.array(depth_center), scale= 1.5, color='g', ax=ax)
	# ax = plot_vector(depth_normal_vec, ax)

	# Sample rgb plane and normals
	rgb_plane_est, samples_rgb = sample_rgb_plane()
	rgb_normal = rgb_plane_est.mean.vectorize()[0:3]
	rgb_normal = [rgb_normal[0], rgb_normal[1], rgb_normal[2]]
	ax = plot_samples(samples_rgb, ax, 'r')
	# depth_center = samples_rgb[75, :]
	# depth_center = [depth_center[0], depth_center[1], depth_center[2]]
	start = [x + y for x, y in zip([0,0,0], rgb_normal)] 
	end = [rgb_normal[0], rgb_normal[1], rgb_normal[2]]
	rgb_normal_vec = start+end
	# print_att("rgb_normal_vec", rgb_normal_vec)
	# rgb_plane = rgb_plane_est.mean.plot(center=np.array([0,0,0]), scale= 1.5, color='r', ax=ax)
	# ax = plot_vector(rgb_normal_vec, ax)
	# For now hard code the test data x y values
	# Generate homogenous matrix for pose from the message
	x_r = 0.970358818444
	y_r = 0.105224751742
	z_r = 0.145592452085
	w_r = 0.161661229068
	M = tf.quaternion_matrix([w_r,x_r,y_r,z_r]) 
	x_t = 0.290623142918
	y_t = -0.0266687975686
	z_t = 1.20030737138
	M[0, 3] = x_t
	M[1, 3] = y_t
	M[2, 3] = z_t
	M_d = np.delete(M, 3, 0)
	# print "Extrinsics"
	# print M # pose extrinsics
	
	# Calculating the new pose based on the depth
	init_vector = [0,0,10]
	normal_z = [0,0,0,0,0,1]
	# ax = plot_vector(normal_z, ax)
	testPoint1 = generate_depth_correspondence(im_pt1, depth_plane_est) 
	testPoint2 = generate_depth_correspondence(im_pt2, depth_plane_est)
	testPoint3 = generate_depth_correspondence(im_pt3, depth_plane_est) 
	testPoint4 = generate_depth_correspondence(im_pt4, depth_plane_est)
	print_att("TEST POINT1", testPoint1)
	print_att("TEST POINT2", testPoint2)
	print_att("TEST POINT3", testPoint3)
	print_att("TEST POINT4", testPoint4)
	test_ob_point1 = np.array([-tag_radius, -tag_radius, 0.0, 1.0])
	test_ob_point2 = np.array([ tag_radius, -tag_radius, 0.0, 1.0])
	test_ob_point3 = np.array([ tag_radius,  tag_radius, 0.0, 1.0])
	test_ob_point4 = np.array([-tag_radius,  tag_radius, 0.0, 1.0])
	# result_pt1 = np.dot(cv2H, test_ob_point1.reshape(4,1))
	# result_pt2 = np.dot(cv2H, test_ob_point2.reshape(4,1))
	# result_pt3 = np.dot(cv2H, test_ob_point3.reshape(4,1))
	# result_pt4 = np.dot(cv2H, test_ob_point4.reshape(4,1))
	# print_att("FROM PIXEL1", result_pt1.reshape(1,4))
	# print_att("FROM PIXEL2", result_pt2.reshape(1,4))
	# print_att("FROM PIXEL3", result_pt3.reshape(1,4))
	# print_att("FROM PIXEL4", result_pt4.reshape(1,4))
	test_pts = testPoint1 + testPoint2 + testPoint3 + testPoint4
	depth_points = np.array(test_pts).reshape(4,3)
	tag_pts = np.hstack((test_ob_point1, test_ob_point2, test_ob_point3, test_ob_point4)).reshape(4, 4)
	# print depth_points
	# print tag_pts
	print_att('object_pts', object_pts)
	print_att('depth_pts', depth_points)
	Rdepth, t_depth = rtrans.rigid_transform_3D(object_pts, depth_points)
	print Rdepth
	print t_depth
	depthH = np.eye(4)
	depthH[0:3, 0:3] = Rdepth
	depthH[0:3, 3:4] = t_depth.reshape(3,1)
	print depthH
	result_pt1 = np.dot(depthH, test_ob_point1.reshape(4,1))
	result_pt2 = np.dot(depthH, test_ob_point2.reshape(4,1))
	result_pt3 = np.dot(depthH, test_ob_point3.reshape(4,1))
	result_pt4 = np.dot(depthH, test_ob_point4.reshape(4,1))
	print_att("same as test1", result_pt1.reshape(1,4))
	print_att("same as test2", result_pt2.reshape(1,4))
	print_att("same as test3", result_pt3.reshape(1,4))
	print_att("same as test4", result_pt4.reshape(1,4))
	

	# rvec_init, R = normal_transfomation(init_vector, depth_normal)
	rvec_init, jacob = cv2.Rodrigues(Rdepth)
	tvec_init = t_depth.reshape(3,1)

	# rotated_vector = np.dot(R, np.array(init_vector).reshape(3,1))
	# plot_norm2 = [rotated_vector[0,0], rotated_vector[1,0], rotated_vector[2,0]]
	# ax = plot_vector(plot_norm2 + plot_norm2, ax)
	# tvec_init = np.array(depth_center).reshape(3,1)
	# print_att("rvec_init", rvec_init.reshape(1, 3))
	# print_att("tvec_init", tvec_init.reshape(1, 3))
	nrvec, ntvec = lm.PnPMin(rvec_init, tvec_init, object_pts, image_pts, I, D)
	print_att("new rvec:", nrvec) 
	print_att("new tvec:", ntvec)
	# for x in range(0, 100):
	# 	tvec_init = np.array(depth_center).reshape(3,1)
	# 	rvec_init = np.random.rand(3,1)
	# 	print rvec_init
	# 	rvec, tvec, inliners = cv2.solvePnPRansac(object_pts, image_pts, I, D, rvec=rvec_init, tvec=tvec_init, flags=cv2.ITERATIVE)
	# 	rotM = cv2.Rodrigues(rvec)[0]

	# 	camera_extrinsics = np.eye(4)

	# 	camera_extrinsics[0:3, 0:3] = rotM
	# 	camera_extrinsics[0:3, 3:4] = tvec
	# 	print "extrinsics_depth_corrected: "
	# 	print camera_extrinsics	


	groundtruth = [[ 0.7279743, -0.06218355, -0.68277861],[ 0.26747103, -0.89120857, 0.3663421 ],[-0.6312786, -0.44931113, -0.63214464]]
	groundtruth = np.array(groundtruth)
	gt_vec, jacob = cv2.Rodrigues(groundtruth)
	cv2error = lm.quatAngleDiff(gt_vec, cv2rvec)
	nerror = lm.quatAngleDiff(gt_vec, nrvec)
	print_att("cv2 error:", cv2error)
	print_att("recompute error:", nerror)
	## rotating the norm purely based off of the rotation matrix
	rotM = cv2.Rodrigues(nrvec)[0]
	init_norm = [0,0, 1]
	init_norm = np.array(init_norm).reshape(3,1)
	rotated_norm = np.dot(rotM, init_norm)
	# print_att("rotated_norm", rotated_norm)
	plot_norm = [rotated_norm[0,0], rotated_norm[1,0], rotated_norm[2,0]]

	# ax = plot_vector(plot_norm + plot_norm, ax)

	rotM2 = cv2.Rodrigues(cv2rvec)[0]
	# init_norm = [0,0,-1]
	# init_norm = np.array(init_norm).reshape(3,1)
	# rotM2 = np.eye(3)
	rotated_norm2 = np.dot(rotM2, init_norm)
	# print_att("rotated_norm", rotated_norm)
	plot_norm2 = [rotated_norm2[0,0], rotated_norm2[1,0], rotated_norm2[2,0]]
	# ax = plot_vector(plot_norm2 + plot_norm2, ax)
	# print rotM
	# print rotM2

	plt.show()
def main(args):
    # Declare Test Variables
    # Camera Intrinsics
    fx = 529.29
    fy = 531.28
    px = 466.96
    py = 273.26
    I = np.array([fx, 0, px, 0, fy, py, 0, 0, 1]).reshape(3, 3)

    x_start = 544
    x_end = 557
    y_start = 207
    y_end = 224
    rgb_image = cv2.imread("../data/iros_data2/rgb_frame0000.png")
    depth_image = cv2.imread("../data/iros_data2/depth_frame0000.png",
                             cv2.IMREAD_ANYDEPTH)
    april_tag_rgb = rgb_image[y_start:y_end, x_start:x_end]
    april_tag_depth = depth_image[y_start:y_end, x_start:x_end]
    cv2.imshow('april_tag', april_tag_rgb)
    cv2.waitKey(0)
    all_pts = []
    for i in range(x_start, x_end):
        for j in range(y_start, y_end):
            depth = depth_image[j, i] / 1000.0
            if (depth != 0):
                x = (i - px) * depth / fx
                print x
                y = (j - py) * depth / fy
                all_pts.append([x, y, depth])
    sample_cov = 0.9
    samples_depth = np.array(all_pts)
    print "Sample points from the depth sensor"
    print samples_depth[0:5, :]
    cov = np.asarray([sample_cov] * samples_depth.shape[0])
    depth_plane_est = bayesplane.fit_plane_bayes(samples_depth, cov)

    # For now hard code the test data x y values
    # Generate homogenous matrix for pose
    x_r = 0.885966679653
    y_r = -0.0187847792584
    z_r = -0.459534989083
    w_r = 0.0594791427379
    M = tf.quaternion_matrix([w_r, x_r, y_r, z_r])
    x_t = 0.201772100798
    y_t = -0.139835385971
    z_t = 1.27532936921
    M[0, 3] = x_t
    M[1, 3] = y_t
    M[2, 3] = z_t
    M_d = np.delete(M, 3, 0)
    print "Extrinsics"
    print M  # pose extrinsics
    origin = np.array([0, 0, 0, 1])
    np.transpose(origin)
    C = np.dot(I, M_d)
    coord = np.dot(C, origin)
    x_coord = coord[0] / coord[2]
    y_coord = coord[1] / coord[2]
    x_samples = np.linspace(-0.1, 0.1, num=10)
    y_samples = np.linspace(-0.1, 0.1, num=10)
    sample_points = []
    for i in x_samples:
        for j in y_samples:
            sample_points.append([i, j, 0, 1])
    sample_points = np.transpose(np.array(sample_points))
    sample_points_viz = np.dot(C, sample_points)
    sample_rgb = np.transpose(np.dot(M_d, sample_points))
    # for i in range(0, 100):
    # 	x_coord = sample_points_viz[0, i] / sample_points_viz[2, i]
    # 	y_coord = sample_points_viz[1, i] / sample_points_viz[2, i]
    # 	cv2.circle(rgb_image, (int(x_coord), int(y_coord)), 5 - int(math.pow(8 * (sample_points_viz[2, i] - 1), 2)), (255, 0,0))
    cv2.imshow('april_tag', rgb_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    print "Sample points from the RGB sensor"
    print sample_rgb[0:5, :]
    cov = np.asarray([0.9] * sample_rgb.shape[0])
    rgb_plane_est = bayesplane.fit_plane_bayes(sample_rgb, cov)

    rgb_center = sample_rgb[50, :]
    depth_center = samples_depth[50, :]
    scale = 0.01
    ## Plotting for visual effects
    print "rgb_plane_est cov: "
    print rgb_plane_est.cov
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_xlabel('X Label')
    ax.set_ylabel('Y Label')
    ax.set_zlabel('Z Label')
    # ax.scatter(sample_rgb[:, 0], sample_rgb[:, 1], sample_rgb[:, 2], c='b')
    ax.scatter(samples_depth[:, 0],
               samples_depth[:, 1],
               samples_depth[:, 2],
               c='g')
    # rgbplane = rgb_plane_est.mean.plot(center=np.array(rgb_center), scale= scale, color='b', ax=ax)
    # depthplane = depth_plane_est.mean.plot(center=np.array(depth_center), scale= scale, color='g', ax=ax)
    #plt.show()

    ## Kalman Update stage
    mean_rgb = rgb_plane_est.mean.vectorize()[:, np.newaxis].T
    mean_depth = depth_plane_est.mean.vectorize()[:, np.newaxis].T
    #cov_rgb = rgb_plane_est.cov
    #cov_depth = depth_plane_est.cov
    print "cov_depth: "
    print depth_plane_est.cov
    print "cov_rgb: "
    print rgb_plane_est.cov
    cov_rgb = np.eye(4)
    cov_depth = np.eye(4)
    cov_rgb_sq = np.dot(cov_rgb.T, cov_rgb)
    cov_depth_sq = np.dot(cov_depth.T, cov_depth)
    mean_fused = np.dot(
        (np.dot(mean_rgb, cov_rgb_sq) + np.dot(mean_depth, cov_depth_sq)),
        np.linalg.inv(cov_rgb_sq + cov_depth_sq))
    mean_fused = mean_fused.flatten()
    fuse_plane = plane.Plane(mean_fused[0:3], mean_fused[3])
    # fuse_plane_plot = fuse_plane.plot(center=np.array([0.26, -0.03, 1.16]), scale= scale, color='b', ax=ax)
    average_mean = (rgb_plane_est.mean.vectorize() +
                    depth_plane_est.mean.vectorize()) / 2
    average_plane = plane.Plane(average_mean[0:3], average_mean[3])
    # average_plane_plot = average_plane.plot(center=np.array([0.26, -0.03, 1.16]), scale= scale, color='r', ax=ax)
    print "mean_rgb: "
    print mean_rgb
    print "mean_depth: "
    print mean_depth
    print "mean_fused: "
    print mean_fused / np.linalg.norm(mean_fused)
    print average_mean / np.linalg.norm(average_mean)

    vector_rgb = rgb_plane_est.mean.vectorize()[0:3]
    vector_depth = depth_plane_est.mean.vectorize()[0:3]
    vector_cross = np.cross(vector_rgb, vector_depth)
    vector_sin = np.linalg.norm(vector_cross)
    vector_cos = np.dot(vector_rgb, vector_depth)
    vector_skew = np.array([[0, -vector_cross[2], vector_cross[1]],
                            [vector_cross[2], 0, -vector_cross[0]],
                            [-vector_cross[1], vector_cross[0], 0]])
    vector_eye = np.eye(3)
    R = vector_eye + vector_skew + np.linalg.matrix_power(
        vector_skew, 2) * (1 - vector_cos) / (vector_sin * vector_sin)
    print R
    mean_rgb_rotated = rgb_plane_est.mean.vectorize()[0:3, np.newaxis]
    mean_rgb_rotated = np.dot(R, mean_rgb_rotated)
    mean_rgb_rotated_r = mean_rgb_rotated.flatten()
    mean_rgb_rotated_d = np.dot(mean_rgb_rotated_r, rgb_center)
    print np.append(mean_rgb_rotated_r, mean_rgb_rotated_d)
    plane_rotated = plane.Plane(mean_rgb_rotated_r, mean_rgb_rotated_d)
    # plane_rotated_plot = plane_rotated.plot(center=np.array(rgb_center), scale= scale, color='r', ax=ax)

    rotate_mat = np.eye(4)
    rotate_mat[0:3, 0:3] = R
    sub_center = np.eye(4)
    sub_center[0:3, 3] = -1 * rgb_center.T
    add_center = np.eye(4)
    add_center[0:3, 3] = rgb_center.T
    post_rotate = np.dot(add_center, np.dot(rotate_mat, sub_center))
    print "post rotate"
    print post_rotate
    M_r = np.dot(post_rotate, M)

    Mr_d = np.delete(M_r, 3, 0)
    C_r = np.dot(I, Mr_d)
    sample_points_viz_rotate = np.dot(C_r, sample_points)
    sample_rgb_rotate = np.transpose(np.dot(Mr_d, sample_points))
    # ax.scatter(sample_rgb_rotate[:, 0], sample_rgb_rotate[:, 1], sample_rgb_rotate[:, 2], c='r')
    plt.show()
    for i in range(0, 100):
        x_coord = sample_points_viz_rotate[0, i] / sample_points_viz_rotate[2,
                                                                            i]
        y_coord = sample_points_viz_rotate[1, i] / sample_points_viz_rotate[2,
                                                                            i]
        cv2.circle(
            rgb_image, (int(x_coord), int(y_coord)),
            5 - int(math.pow(8 * (sample_points_viz_rotate[2, i] - 1), 2)),
            (0, 255, 0))
    cv2.imshow('april_tag', rgb_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()