Ejemplo n.º 1
0
    def draw_observed_points(self, frame, cam_H_neck, ns, color, cam_mark_lines=False):
        cf_points = np.array([cf_p for (marker_i, cf_p) in frame.visible_markers])
        base_H_cam = np.dot(linalg.inv(frame.neck_H_base), linalg.inv(cam_H_neck))
        bf_points = geom.transform_points(cf_points, base_H_cam)
        m = markers.points('/BASE', ns, 0, 0.003, color, bf_points)
        self.viz.add(m)

        # draw lines from the camera to the points it sees
        if cam_mark_lines:
            bf_cam_origin = geom.transform_points(np.zeros(3), base_H_cam)
            line_points = []
            for bf_p in bf_points:
                line_points.append(bf_cam_origin)
                line_points.append(bf_p)
            m = markers.line_list('/BASE', ns, 7, 0.001, color, line_points)
            self.viz.add(m)
        
        self.viz.update()
Ejemplo n.º 2
0
 def draw_target(self, base_H_target, ns='target_points', color=(0.0, 0.0, 1.0, 1.0)):
     bf_target_points = geom.transform_points(self.tf_target_points, base_H_target)
     m = markers.points('/BASE', ns, 0, 0.007, color, bf_target_points)
     self.viz.add(m)
     self.viz.update()
Ejemplo n.º 3
0
            spread = dist_from_line
    if spread < 0.1:
        print 'Skipping frame %d (spread is only %f)' % (f_i, spread)

    # train gp based on head joint angles
    head_joint_angles = np.array(f.joint_states.position)
    gp_training_inputs.append(head_joint_angles)

    opt = FCSingleFrameOpt(params.tf_target_points, est_base_H_target, est_cam_H_neck, f)
    est_params = opt.optimize()
    gp_training_outputs.append(est_params)

    # project points using corrected transform
    cam_H_ccam = pm.params_to_matrix(est_params)
    base_H_ccam = np.dot(base_H_cam, cam_H_ccam)
    m = markers.points('/BASE', 'points_%d' % f_i, 0, 0.003, estimated_color, bf_marker_points)
    fcviz.viz.add(m)
    
    corrected_bf_marker_points = geom.transform_points(cf_marker_points, base_H_ccam)
    m = markers.points('/BASE', 'corrected_points_%d' % f_i, 0, 0.003, corrected_color, corrected_bf_marker_points)
    fcviz.viz.add(m)
    fcviz.update()

    print 'Frame %d: n_markers=%d  spread=%.2f: %s -> %s' % (
        f.frame_id, len(f.visible_markers), spread, ' '.join(['%6.3f' % a for a in head_joint_angles]),
        ' '.join(['%6.3f' % p for p in est_params]))

    upright_frames.append(f)
gp_training_inputs = np.array(gp_training_inputs)
gp_training_outputs = np.array(gp_training_outputs)