コード例 #1
0
    def draw_head_pose(self, neck_H_base, cam_H_neck, ns, color):
        base_H_neck = linalg.inv(neck_H_base)
        neck_H_cam = linalg.inv(cam_H_neck)
        
        self.draw_transform(base_H_neck, scale=0.1, ns=ns, idnum=0)
        cam_H_base = np.dot(cam_H_neck, neck_H_base)
        base_H_cam = linalg.inv(cam_H_base)
        self.draw_transform(base_H_cam, scale=0.04, ns=ns, idnum=3)

        # draw a line from neck to camera
        b_neck_origin = geom.transform_points(np.zeros(3), base_H_neck)
        b_cam_origin = geom.transform_points(np.zeros(3), base_H_cam)
        m = markers.line_list('/BASE', ns, 7, 0.002, color, [b_neck_origin, b_cam_origin])
        self.viz.add(m)
        self.viz.update()
コード例 #2
0
 def __init__(self, tf_target_points, base_H_target, cam_H_neck, frame):
     self.tf_target_points = tf_target_points
     self.base_H_target = base_H_target
     self.cam_H_neck = cam_H_neck
     self.frame = frame
     self.bf_target_points = geom.transform_points(self.tf_target_points, self.base_H_target)
     self.pm = FCParametrizer()
コード例 #3
0
    def draw_observed_points(self, frame, cam_H_neck, ns, color, cam_mark_lines=False):
        cf_points = np.array([cf_p for (marker_i, cf_p) in frame.visible_markers])
        base_H_cam = np.dot(linalg.inv(frame.neck_H_base), linalg.inv(cam_H_neck))
        bf_points = geom.transform_points(cf_points, base_H_cam)
        m = markers.points('/BASE', ns, 0, 0.003, color, bf_points)
        self.viz.add(m)

        # draw lines from the camera to the points it sees
        if cam_mark_lines:
            bf_cam_origin = geom.transform_points(np.zeros(3), base_H_cam)
            line_points = []
            for bf_p in bf_points:
                line_points.append(bf_cam_origin)
                line_points.append(bf_p)
            m = markers.line_list('/BASE', ns, 7, 0.001, color, line_points)
            self.viz.add(m)
        
        self.viz.update()
コード例 #4
0
 def draw_transform(self, H, scale, ns, idnum):
     b_origin = geom.transform_points(np.array((0., 0., 0.)), H)
     b_ivec = geom.transform_vectors(np.array((1., 0., 0.)), H)
     b_jvec = geom.transform_vectors(np.array((0., 1., 0.)), H)
     b_kvec = geom.transform_vectors(np.array((0., 0., 1.)), H)
     m_list = markers.axes(
         '/BASE', ns, idnum, b_origin, b_ivec, b_jvec, b_kvec, scale, 0.05*scale)
     for m in m_list:
         self.viz.add(m)
     self.viz.update()
コード例 #5
0
    def calc_error(self, params):
        #params[:2] = 0.0
        cam_H_ccam = self.pm.params_to_matrix(params)
        neck_H_ccam = np.dot(linalg.inv(self.cam_H_neck), cam_H_ccam)

        err = 0.0
        num_corr = 0
        base_H_ccam = np.dot(linalg.inv(self.frame.neck_H_base), neck_H_ccam)
        for m_i, cf_p in self.frame.visible_markers:
            # position of marker in base frame as seen by camera
            bf_p = geom.transform_points(cf_p, base_H_ccam)

            # position of marker in base frame based on target position
            bf_p_target = self.bf_target_points[m_i]

            # error
            err += linalg.norm(bf_p - bf_p_target)
            num_corr += 1
        err /= num_corr
        self.errors.append(err)
        #print len(self.errors), ':', params, ':', '%.3f' % err
        return err
コード例 #6
0
 def draw_target(self, base_H_target, ns='target_points', color=(0.0, 0.0, 1.0, 1.0)):
     bf_target_points = geom.transform_points(self.tf_target_points, base_H_target)
     m = markers.points('/BASE', ns, 0, 0.007, color, bf_target_points)
     self.viz.add(m)
     self.viz.update()
コード例 #7
0
params = FCParams()
rospy.init_node('FiducialCal')
rospy.sleep(1.0)
fcviz = FCViz('/FiducialCal/markers', params.tf_target_points)

# for our own purposes, save the estimated transform in a shelf
cache_dir = roslib.packages.get_pkg_subdir('arm_fiducial_cal', 'cache')
s = store.Store(cache_dir)
frames = s['frames']
est_base_H_target = s['est_base_H_target']
est_cam_H_neck = s['est_cam_H_neck']


upright_frames = []
for f_i, f in enumerate(frames):
    bf_head_origin  = geom.transform_points(np.zeros(3), linalg.inv(f.neck_H_base))

    # hack to filter out poses where the lower pan-tilt isn't straight up
    if bf_head_origin[2] > 1.7:
        print 'Skipping frame %d (pan-tilt not in upright pose)' % f_i
        upright_frames.append(f)

opt = FCOptimizer(frames, params.tf_target_points, est_base_H_target, est_cam_H_neck, fix_target_transform=True,
                  W=np.diag([1.0, 1.0, 10.0]))
new_est_base_H_target, new_est_cam_H_neck = opt.optimize()

opt.print_stats()

print np.dot(linalg.inv(new_est_cam_H_neck), est_cam_H_neck)

true_color = (1.0, 0.0, 1.0, 1.0)
コード例 #8
0
# use the camera to neck transform from the last frame (should be the same for all frames though)
initial_cam_H_neck = linalg.inv(ros_util.transform_to_matrix(msg.cam_transform_head))

# run the optimization
for f_i in range(len(frames)):
    markers_i = set([m_k for (m_k, p) in frames[f_i].visible_markers])
    print "Markers visible in frame %d:" % f_i, sorted(markers_i)

print ""
print "=========================="
print "Optimizing..."

opt = FCOptimizer(frames, params.tf_target_points, params.initial_base_H_target, initial_cam_H_neck)
est_base_H_target, est_cam_H_neck = opt.optimize()

est_bf_target_points = geom.transform_points(params.tf_target_points, est_base_H_target)
print "Target average z=%f" % est_bf_target_points[:, 2].mean()

# print np.dot(linalg.inv(initial_cam_H_neck), est_cam_H_neck)

print ""
print "=========================="
print "Results:"

opt.print_stats()

plt.plot(opt.errors)
plt.show()

true_color = (1.0, 0.0, 1.0, 1.0)
initial_color = (0.0, 1.0, 1.0, 1.0)
コード例 #9
0
    est_base_H_target, est_cam_H_neck = opt.optimize()

    # assuming the base to target transform we just computed is correct, optimize
    # the top-of-pan-tilt to left bumblebee transform
    print ''
    print 'Optimizing camera to top-of-pan-tilt transform'
    opt = FCOptimizer(frames, tf_target_points, est_base_H_target, est_cam_H_neck, opt_frame='cam')
    est_base_H_target, est_cam_H_neck = opt.optimize()

print ''
print '===================================================================='
print 'Calibration Results:'
print '===================================================================='

# print some statisticas about the optimization
est_bf_target_points = geom.transform_points(params.tf_target_points, est_base_H_target)
average_table_z = est_bf_target_points[:,2].mean()
print 'Target average z=%f' % average_table_z
opt.print_stats()

# write out the resulting cal file to the urdf
urdf_path = os.path.join(
    roslib.packages.get_pkg_dir("arm_robot_model"), "models/sensorsValues.urdf.xacro")
print 'Cal Successful! Writing fixed offset result to URDF to %s' % urdf_path
bb_left_neck_tf = ros_util.matrix_to_transform(linalg.inv(est_cam_H_neck))
update_sensors_values_urdf(urdf_path, bb_left_neck_tf, None, None, None)

# write out the fixed offset correction
fixed_bb_offset_path = os.path.join(roslib.packages.get_pkg_dir("arm_fiducial_cal"), "calib/fixed_offset.txt")
print 'Cal Successful! Writing fixed offset result to %s' % fixed_bb_offset_path
print 'Notation is x y z qw qx qy qz.'
コード例 #10
0
estimated_color = (1.0, 1.0, 0.0, 1.0)
gp_color = (0.0, 1.0, 0.0, 1.0)

params = FCParams()
rospy.init_node('FiducialCal')
rospy.sleep(1.0)
fcviz = FCViz('/FiducialCal/markers', params.tf_target_points)

fcviz.draw_target(est_base_H_target, 'estimated_target_pose', true_color)
for f_i, f in enumerate(upright_frames):
    # use GP to predict actual base to camera transform
    cam_H_base = np.dot(est_cam_H_neck, f.neck_H_base)
    base_H_cam = linalg.inv(cam_H_base)
    base_H_ccam = correcter.get_corrected_transform(base_H_cam)
 
    # project points using corrected transform
    cf_marker_points = np.array([cf_p for (m_i, cf_p) in f.visible_markers])

    bf_marker_points = geom.transform_points(cf_marker_points, base_H_cam)
    m = markers.points('/BASE', 'points_%d' % f_i, 0, 0.003, estimated_color, bf_marker_points)
    fcviz.viz.add(m)
    
    corrected_bf_marker_points = geom.transform_points(cf_marker_points, base_H_ccam)
    m = markers.points('/BASE', 'corrected_points_%d' % f_i, 0, 0.003, gp_color, corrected_bf_marker_points)
    fcviz.viz.add(m)
    fcviz.update()

while not rospy.is_shutdown():
    fcviz.update()
    rospy.sleep(0.1)