コード例 #1
0
 def show(problem):
     global robot, blocks
     from klampt import visualization
     visualization.clear()
     robot.setConfig(problem.start)
     visualization.add("robot", robot)
     visualization.add("target", [problem.goal[0], 0.0, problem.goal[1]])
     for i in xrange(W):
         for j in xrange(H):
             if problem.env[i, j]:
                 visualization.add("block %d,%d" % (i, j), blocks[i, j])
     if problem.solution != None:
         visualization.animate(
             "robot",
             RobotTrajectory(robot,
                             times=range(len(problem.solution)),
                             milestones=problem.solution))
     visualization.dialog()
コード例 #2
0
    if abs(theta) < 1e-8:
        m = [0, 0, 0]
    else:
        m = vectorops.mul(vectorops.unit(q[0:3]), theta)
    return so3.from_moment(m)


if __name__ == "__main__":
    print "trajectorytest.py: This example demonstrates several types of trajectory"
    if len(sys.argv) <= 1:
        print "USAGE: trajectorytest.py [robot or world file]"
        print "With no arguments, shows some 3D trajectories"

        #add a point to the visualizer and animate it
        point = coordinates.addPoint("point")
        visualization.add("point", point)
        traj = trajectory.Trajectory()
        for i in range(10):
            traj.times.append(i)
            traj.milestones.append([
                random.uniform(-1, 1),
                random.uniform(-1, 1),
                random.uniform(-1, 1)
            ])

        traj2 = trajectory.HermiteTrajectory()
        traj2.makeSpline(traj)
        visualization.animate("point", traj2)

        #add a transform to the visualizer and animate it
        xform = visualization.add("xform", se3.identity())
コード例 #3
0
ファイル: visual_test.py プロジェクト: duke-iml/ece490-s2016
from klampt import *
from klampt import visualization
from klampt import robotcollide
from klampt import robotcspace
from klampt import cspace
from klampt import resource

world = WorldModel()
world.readFile('../ece490-s2016/apc2015/klampt_models/apc.xml')
#sets the world to the state depicted in the file

obj = ik.objective(world.robot(0).link(22),local=[0,0,0],world=[1,0,1])

visualization.add("world",world)
#visualization.add("ik objective",obj)
q0 = world.robot(0).getConfig()
#copy vector of world robot config
q1 = q0[:]
#q1[16] = 1
#q2 = q1[:]
#q2[16] = 0
#path = [q0,q1,q2]
q1 = resource.get("goal.config",description="Goal config for left arm",doedit=True,default=q1,editor='visual',world=world)


#robot links under suspicion?
subset = [15,16,17,18,19,20,21,22]
collider = robotcollide.WorldCollider(world)
#probably want to ignore collisions beteween other arm/links and the world to make things faster...
space = robotcspace.RobotSubsetCSpace(world.robot(0),subset,collider)
planner = cspace.MotionPlan(space, "rrt*")
コード例 #4
0
ファイル: planningtest.py プロジェクト: haewon-mit/Klampt
        BB = geom.getBB()
        print BB[0],BB[1]
        BBgeom = GeometricPrimitive()
        BBgeom.setAABB(BB[0],BB[1])
        geom.setGeometricPrimitive(BBgeom)

robot = world.robot(0)
#this line replaces the robot's normal geometry with bounding boxes.
#it makes planning faster but sacrifices accuracy.  Uncomment the line
#visualization.dialog() below to examine whether the simplified robot looks
#ok
if DO_SIMPLIFY:
    simplify(robot)

#add the world elements individually to the visualization
visualization.add("robot",robot)
for i in range(1,world.numRobots()):
    visualization.add("robot"+str(i),world.robot(i))
for i in range(world.numRigidObjects()):
    visualization.add("rigidObject"+str(i),world.rigidObject(i))
for i in range(world.numTerrains()):
    visualization.add("terrain"+str(i),world.terrain(i))
#if you want to just see the robot in a pop up window...
if DO_SIMPLIFY and DEBUG_SIMPLIFY:
    visualization.dialog()

#Automatic construction of space
if not CLOSED_LOOP_TEST:
    if not MANUAL_SPACE_CREATION:
        space = robotplanning.makeSpace(world=world,robot=robot,
                                        edgeCheckResolution=1e-2,
コード例 #5
0
ファイル: planningtest.py プロジェクト: paperstiger/Klampt
        print BB[0], BB[1]
        BBgeom = GeometricPrimitive()
        BBgeom.setAABB(BB[0], BB[1])
        geom.setGeometricPrimitive(BBgeom)


robot = world.robot(0)
#this line replaces the robot's normal geometry with bounding boxes.
#it makes planning faster but sacrifices accuracy.  Uncomment the line
#visualization.dialog() below to examine whether the simplified robot looks
#ok
if DO_SIMPLIFY:
    simplify(robot)

#add the world elements individually to the visualization
visualization.add("robot", robot)
for i in range(1, world.numRobots()):
    visualization.add("robot" + str(i), world.robot(i))
for i in range(world.numRigidObjects()):
    visualization.add("rigidObject" + str(i), world.rigidObject(i))
for i in range(world.numTerrains()):
    visualization.add("terrain" + str(i), world.terrain(i))
#if you want to just see the robot in a pop up window...
if DO_SIMPLIFY and DEBUG_SIMPLIFY:
    visualization.dialog()

#Automatic construction of space
if not CLOSED_LOOP_TEST:
    if not MANUAL_SPACE_CREATION:
        space = robotplanning.makeSpace(world=world,
                                        robot=robot,
コード例 #6
0
ファイル: calibrate.py プロジェクト: duke-iml/ece490-s2016
        dq = [random.uniform(-jointEncoderError,jointEncoderError) for i in range(len(q0))]
        dobs = (so3.from_moment([random.uniform(-sensorErrorRads,sensorErrorRads) for i in range(3)]),[random.uniform(-sensorErrorMeters,sensorErrorMeters) for i in range(3)])
        calibrationConfigs.append(vectorops.add(q0,dq))
        observations.append(se3.mul(obs0,dobs))
        trueObservations.append(obs0)

    if DO_VISUALIZATION:    
        rgroup = coordinates.addGroup("calibration ground truth")
        rgroup.addFrame("camera link",worldCoordinates=pc.getTransform())
        rgroup.addFrame("marker link",worldCoordinates=pm.getTransform())
        rgroup.addFrame("camera (ground truth)",parent="camera link",relativeCoordinates=Tc0)
        rgroup.addFrame("marker (ground truth)",parent="marker link",relativeCoordinates=Tm0)
        for i,(obs,obs0) in enumerate(zip(observations,trueObservations)):
            rgroup.addFrame("obs"+str(i)+" (ground truth)",parent="camera (ground truth)",relativeCoordinates=obs0)
            rgroup.addFrame("obs"+str(i)+" (from camera)",parent="camera (ground truth)",relativeCoordinates=obs)
        visualization.add("world",world)
        for i,q in enumerate(calibrationConfigs):
            visualization.add("config"+str(i),q)
            app = lc.appearance().clone()
            app.setColor(0.5,0.5,0.5,0.1)
            visualization.setAppearance("config"+str(i),app)
        visualization.add("simulated coordinates",rgroup)
        visualization.dialog()

    res = calibrate_robot_camera(robot,camera_link,
                                 calibrationConfigs,
                                 observations,
                                 [marker_link]*len(calibrationConfigs))
    print 
    print "Per-observation reconstruction error:",res[0]/numObs
    print "Estimated camera transform:",res[1]
コード例 #7
0
ファイル: calibrate.py プロジェクト: duke-iml/ece490-s2016
def calibrate_xform_camera(camera_link_transforms,
                           marker_link_transforms,
                           marker_observations,
                           marker_ids,
                           observation_relative_errors=None,
                           camera_initial_guess=None,
                           marker_initial_guess=None,
                           regularizationFactor=0,
                           maxIters=100,
                           tolerance=1e-7):
    """Single camera calibration function for a camera and markers on some
    set of rigid bodies.
    
    Given body transforms and a list of estimated calibration marker observations
    in the camera frame, estimates both the camera transform relative to the
    camera link as well as the marker transforms relative to their links.

    M: is the set of m markers. By default there is at most one marker per link.
       Markers can either be point markers (e.g., a mocap ball), or transform
       markers (e.g., an AR tag or checkerboard pattern).
    O: is the set of n observations, consisting of a reading (Tc_i,Tm_i,o_i,l_i)
       where Tc_i is the camera link's transform, Tm_i is the marker link's
       transform, o_i is the reading which consists of either a point or transform
       estimate in the camera frame, and l_i is the ID of the marker (by default,
       just its link)

    Output: a tuple (err,Tc,marker_dict) where err is the norm of the
    reconstruction residual, Tc is the estimated camera transform relative to the
    camera's link, and marker_dict is a dict mapping each marker id to its
    estimated position or transform on the marker's link.

    Arguments:
    - camera_link: an integer index or a RobotModelLink instance on which
      the camera lies.
    - calibration_configs: a list of the RobotModel configurations q_1,...,q_n
      that generated the marker_observations list.
    - marker_observations: a list of estimated positions or transformations
      of calibration markers o_1,...,o_n, given in the camera's reference
      frame (z forward, x right, y down).

      If o_i is a 3-vector, the marker is considered to be a point marker.
      If a se3 element (R,t) is given, the marker is considered to be
      a transform marker.  You may not mix point and transform observations
      for a single marker ID.
    - marker_ids: a list of marker ID #'s l_1,...,l_n corresponding to
      each observation, e.g., the link index on which each marker lies.
    - observation_relative_errors: if you have an idea of the magnitude of
      each observation error, it can be placed into this list.  Must be
      a list of n floats, 3-lists (point markers), or 6-lists (transform
      markers).  By default, errors will be set proportionally to the
      observed distance between the camera and marker.
    - camera_initial_guess: if not None, an initial guess for the camera transform
    - marker_initial_guess: if not None, a dictionary containing initial guesses
      for the marker transforms
    - regularizationFactor: if nonzero, the optimization penalizes deviation
      of the estimated camera transform and marker transforms from zero
      proportionally to this factor.
    - maxIters: maximum number of iterations for optimization.
    - tolerance: optimization convergence tolerance.  Stops when the change of
      estimates falls below this threshold
    """
    if len(camera_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as camera transforms")
    if len(marker_link_transforms) != len(marker_ids):
        raise ValueError("Must provide the same number of marker IDs as marker transforms")
    if len(marker_observations) != len(marker_ids):
        raise ValueError("Must provide the same number of marker observations as marker transforms")
    #get all unique marker ids
    marker_id_list = list(set(marker_ids))
    #detect marker types
    marker_types = dict((v,None) for v in marker_id_list)
    for i,(obs,id) in enumerate(zip(marker_observations,marker_ids)):
        if len(obs)==3:
            if marker_types[id] == 't':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 'p'
        elif len(obs)==2 and len(obs[0])==9 and len(obs[1])==3:
            if marker_types[id] == 'p':
                raise ValueError("Provided both point and transform observations for observation #%d, id %s\n"%(i,str(id)))
            marker_types[id] = 't'
        else:
            raise ValueError("Invalid observation for observation #%d, id %s\n"%(i,str(id)))

    n = len(marker_observations)
    m = len(marker_id_list)

    #get all the observation weights
    observation_weights = []
    if observation_relative_errors is None:
        #default weights: proportional to distance
        for obs in marker_observations:
            if len(obs) == 3:
                observation_weights.append(1.0/vectorops.norm(obs))
            else:
                observation_weights.append(1.0/vectorops.norm(obs[1]))
        observation_weights = [1.0]*len(observation_weights)
    else:
        if len(observation_relative_errors) != n:
            raise ValueError("Invalid length of observation errors")
        for err in observation_relative_errors:
            if hasattr(err,'__iter__'):
                observation_weights.append([1.0/v for v in err])
            else:
                observation_weights.append(1.0/err)

    #initial guesses
    if camera_initial_guess == None:
        camera_initial_guess = se3.identity()
        if any(v == 't' for v in marker_types.itervalues()):
            #estimate camera rotation from point estimates because rotations are more prone to initialization failures
            point_observations = []
            marker_point_rel = []
            for i,obs in enumerate(marker_observations):
                if len(obs)==2:
                    point_observations.append(obs[1])
                else:
                    point_observations.append(obs)
                marker_point_rel.append(se3.mul(se3.inv(camera_link_transforms[i]),marker_link_transforms[i])[1])
            camera_initial_guess = (point_fit_rotation_3d(point_observations,marker_point_rel),[0.0]*3)
            print "Estimated camera rotation from points:",camera_initial_guess
    if marker_initial_guess == None:
        marker_initial_guess = dict((l,(se3.identity() if marker_types[l]=='t' else [0.0]*3)) for l in marker_id_list)
    else:
        marker_initial_guess = marker_initial_guess.copy()
        for l in marker_id_list:
            if l not in marker_initial_guess:
                marker_initial_guess[l] = (se3.identity() if marker_types[l]=='t' else [0.0]*3)
    camera_transform = camera_initial_guess
    marker_transforms = marker_initial_guess.copy()

    if DO_VISUALIZATION:
        rgroup = coordinates.addGroup("calibration")
        rgroup.addFrame("camera link",worldCoordinates=camera_link_transforms[-1])
        rgroup.addFrame("marker link",worldCoordinates=marker_link_transforms[-1])
        rgroup.addFrame("camera estimate",parent="camera link",relativeCoordinates=camera_transform)
        rgroup.addFrame("marker estimate",parent="marker link",relativeCoordinates=marker_transforms.values()[0])
        for i,obs in enumerate(marker_observations):
            rgroup.addFrame("obs"+str(i)+" estimate",parent="camera estimate",relativeCoordinates=obs)
        visualization.add("coordinates",rgroup)
        visualization.dialog()

    point_observations_only = all(marker_types[marker] == 'p' for marker in marker_id_list)
    xform_observations_only = all(marker_types[marker] == 't' for marker in marker_id_list)
    if not point_observations_only and not xform_observations_only:
        raise NotImplementedError("Can't calibrate camera from mixed point/transform markers yet")
    for iters in range(maxIters):
        #attempt to minimize the error on the following over all observations i
        #camera_link_transform(q_i)*camera_transform*observation_i = marker_link_transform(l_i,q_i)*marker_transform(l_i)
        #first, we'll assume the camera transform is fixed and then optimize the marker transforms.
        #then, we'll assume the marker transforms are fixed and then optimize the camera transform.
        #finally we'll check the error to detect convergence
        #1. Estimate marker transforms from current camera transform
        new_marker_transforms = dict((l,(TransformStats(zero=marker_initial_guess[l],prior=regularizationFactor) if marker_types[l]=='t' else VectorStats(value,zero=[0.0]*3,prior=RegularizationFactor))) for l in marker_id_list)
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Trel = se3.mul(se3.inv(Tmlink),se3.mul(Tclink,camera_transform))
            if marker_types[marker] == 't':
                estimate = se3.mul(Trel,obs)
            else:
                estimate = se3.apply(Trel,obs)
            new_marker_transforms[marker].add(estimate,observation_weights[i])
        print "ITERATION",iters
        #print "  ESTIMATED MARKER TRANSFORMS:",dict((k,v.average) for (k,v) in new_marker_transforms.iteritems())
        #2. Estimate camera transform from current marker transforms
        new_camera_transform = TransformStats(zero=camera_initial_guess,prior=regularizationFactor)
        if point_observations_only:
            #TODO: weighted point fitting
            relative_points = []
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                pRel = se3.apply(se3.inv(Tclink),se3.apply(Tmlink,new_marker_transforms[marker].average))
                relative_points.append(pRel)
            new_camera_transform.add(point_fit_xform_3d(marker_observations,relative_points),sum(observation_weights))
        else:
            for i in xrange(n):
                marker = marker_ids[i]
                Tclink = camera_link_transforms[i]
                Tmlink = marker_link_transforms[i]
                obs = marker_observations[i]
                Trel = se3.mul(se3.inv(Tclink),se3.mul(Tmlink,new_marker_transforms[marker].average))
                estimate = se3.mul(Trel,se3.inv(obs))
                new_camera_transform.add(estimate,observation_weights[i])
        #print "  ESTIMATED CAMERA TRANSFORMS:",new_camera_transform.average
        #3. compute difference between last and current estimates
        diff = 0.0
        diff += vectorops.normSquared(se3.error(camera_transform,new_camera_transform.average))
        for marker in marker_id_list:
            if marker_types[marker]=='t':
                diff += vectorops.normSquared(se3.error(marker_transforms[marker],new_marker_transforms[marker].average))
            else:
                diff += vectorops.distanceSquared(marker_transforms[marker],new_marker_transforms[marker].average)
        camera_transform = new_camera_transform.average
        for marker in marker_id_list:
            marker_transforms[marker] = new_marker_transforms[marker].average
        if math.sqrt(diff) < tolerance:
            #converged!
            print "Converged with diff %g on iteration %d"%(math.sqrt(diff),iters)
            break
        print "  ESTIMATE CHANGE:",math.sqrt(diff)
        error = 0.0
        for i in xrange(n):
            marker = marker_ids[i]
            Tclink = camera_link_transforms[i]
            Tmlink = marker_link_transforms[i]
            obs = marker_observations[i]
            Tc = se3.mul(Tclink,camera_transform)
            if marker_types[marker] == 't':
                Tm = se3.mul(Tmlink,marker_transforms[marker])
                error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
            else:
                Tm = se3.apply(Tmlink,marker_transforms[marker])
                error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
        print "  OBSERVATION ERROR:",math.sqrt(error)
        #raw_input()
        if DO_VISUALIZATION:
            rgroup.setFrameCoordinates("camera estimate",camera_transform)
            rgroup.setFrameCoordinates("marker estimate",marker_transforms.values()[0])
            for i,obs in enumerate(marker_observations):
                rgroup.setFrameCoordinates("obs"+str(i)+" estimate",obs)
            visualization.add("coordinates",rgroup)
            visualization.dialog()
    if math.sqrt(diff) >= tolerance:
        print "Max iters reached"
    error = 0.0
    for i in xrange(n):
        marker = marker_ids[i]
        Tclink = camera_link_transforms[i]
        Tmlink = marker_link_transforms[i]
        obs = marker_observations[i]
        Tc = se3.mul(Tclink,camera_transform)
        if marker_types[marker] == 't':
            Tm = se3.mul(Tmlink,marker_transforms[marker])
            error += vectorops.normSquared(se3.error(se3.mul(Tc,obs),Tm))
        else:
            Tm = se3.apply(Tmlink,marker_transforms[marker])
            error += vectorops.distanceSquared(se3.apply(Tc,obs),Tm)
    return (math.sqrt(error),camera_transform,marker_transforms)
コード例 #8
0
ファイル: trajectorytest.py プロジェクト: arocchi/Klampt
    theta = math.acos(q[3])*2.0
    if abs(theta) < 1e-8:
        m = [0,0,0]
    else:
        m = vectorops.mul(vectorops.unit(q[0:3]),theta)
    return so3.from_moment(m)

if __name__ == "__main__":
    print "trajectorytest.py: This example demonstrates several types of trajectory"
    if len(sys.argv)<=1:
        print "USAGE: trajectorytest.py [robot or world file]"
        print "With no arguments, shows some 3D trajectories"
        
        #add a point to the visualizer and animate it
        point = coordinates.addPoint("point")
        visualization.add("point",point)
        traj = trajectory.Trajectory()
        for i in range(10):
            traj.times.append(i)
            traj.milestones.append([random.uniform(-1,1),random.uniform(-1,1),random.uniform(-1,1)])

        traj2 = trajectory.HermiteTrajectory()
        traj2.makeSpline(traj)
        visualization.animate("point",traj2)

        #add a transform to the visualizer and animate it
        xform = visualization.add("xform",se3.identity())
        traj3 = trajectory.SE3Trajectory()
        for i in range(10):
            traj3.times.append(i)
            rrot = random_rotation()
コード例 #9
0
    print "visplugin.py: This example demonstrates how to simulate a world and read user input using the klampt.visualization framework"
    if len(sys.argv)<=1:
        print "USAGE: visplugin.py [world_file]"
        exit()

    world = WorldModel()
    for fn in sys.argv[1:]:
        res = world.readFile(fn)
        if not res:
            raise RuntimeError("Unable to load model "+fn)
    world.enableInitCollisions(True)

    print "Press 'q' to exit"
    #the plugin is used to get interactivity with the visualizer... if you
    #don't care about interactivity, you may leave it out.  See vistemplate.py for
    #an example of this
    plugin = MyGLPlugin(world)
    visualization.setPlugin(plugin)
    #add the world to the visualizer
    visualization.add("world",world)
    #run the visualizer in a separate thread
    visualization.show()
    while visualization.shown() and not plugin.quit:
        visualization.lock()
        #TODO: you may modify the world here
        visualization.unlock()
        #changes to the visualization must be done outside the lock
        time.sleep(0.01)
    print "Ending visualization."
    visualization.kill()
コード例 #10
0
ファイル: resourcetest.py プロジェクト: ol3577600/Klampt
from klampt import resource
from klampt import visualization
from klampt import *

print """resourcetest.py: This program gives an example of how to use the
resource module."""

worldfile = "../../data/athlete_plane.xml"
robotname = 'athlete'

world = WorldModel()
world.readFile(worldfile)

print "Showing robot in modal dialog box"
visualization.add("robot",world.robot(0))
visualization.add("ee",world.robot(0).link(11).getTransform())
visualization.dialog()

import threading
import time

print "Showing threaded visualization"
lock = threading.Lock()
visualization.show(lock)
for i in range(3):
    lock.acquire()
    q = world.robot(0).getConfig()
    q[9] = 3.0
    world.robot(0).setConfig(q)
    lock.release()
    time.sleep(1.0)
コード例 #11
0
ファイル: resourcetest.py プロジェクト: arocchi/Klampt
from klampt import resource
from klampt import visualization
from klampt import *

print """resourcetest.py: This program gives an example of how to use the
resource module."""

worldfile = "../../data/athlete_plane.xml"
robotname = "athlete"

world = WorldModel()
world.readFile(worldfile)

"""
#tests of visualization module interacting with the resource module
print "Showing robot in modal dialog box"
visualization.add("robot",world.robot(0))
visualization.add("ee",world.robot(0).link(11).getTransform())
visualization.dialog()
import time
"""

"""
#tests of visualization module interacting with the resource module
print "Showing threaded visualization"
visualization.show()
for i in range(3):
    visualization.lock()
    q = world.robot(0).getConfig()
    q[9] = 3.0
    world.robot(0).setConfig(q)
コード例 #12
0
ファイル: resourcetest.py プロジェクト: paperstiger/Klampt
from klampt import resource
from klampt import visualization
from klampt import *

print """resourcetest.py: This program gives an example of how to use the
resource module."""

worldfile = "../../data/athlete_plane.xml"
robotname = 'athlete'

world = WorldModel()
world.readFile(worldfile)
"""
#tests of visualization module interacting with the resource module
print "Showing robot in modal dialog box"
visualization.add("robot",world.robot(0))
visualization.add("ee",world.robot(0).link(11).getTransform())
visualization.dialog()
import time
"""
"""
#tests of visualization module interacting with the resource module
print "Showing threaded visualization"
visualization.show()
for i in range(3):
    visualization.lock()
    q = world.robot(0).getConfig()
    q[9] = 3.0
    world.robot(0).setConfig(q)
    visualization.unlock()
    time.sleep(1.0)
コード例 #13
0
ファイル: vistemplate.py プロジェクト: arocchi/Klampt
    print "vistemplate.py: This example demonstrates how to run the visualization framework"
    if len(sys.argv)<=1:
        print "USAGE: vistemplate.py [world_file]"
        exit()

    #creates a world and loads all the items on the command line
    world = WorldModel()
    for fn in sys.argv[1:]:
        res = world.readFile(fn)
        if not res:
            raise RuntimeError("Unable to load model "+fn)

    coordinates.setWorldModel(world)

    #add the world to the visualizer
    visualization.add("world",world)
    #add the coordinate Manager to the visualizer
    visualization.add("coordinates",coordinates.manager())
    #test a point
    pt = [2,5,1]
    visualization.add("some point",pt)
    #test a rigid transform
    visualization.add("some blinking transform",[so3.identity(),[1,3,0.5]])
    #test an IKObjective
    link = world.robot(0).link(world.robot(0).numLinks()-1)
    #point constraint
    #obj = ik.objective(link,local=[[0,0,0]],world=[pt])
    #hinge constraint
    obj = ik.objective(link,local=[[0,0,0],[0,0,0.1]],world=[pt,[pt[0],pt[1],pt[2]+0.1]])
    #transform constraint
    #obj = ik.objective(link,R=link.getTransform()[0],t=pt)
コード例 #14
0
ファイル: vistemplate.py プロジェクト: paperstiger/Klampt
    print "vistemplate.py: This example demonstrates how to run the visualization framework"
    if len(sys.argv) <= 1:
        print "USAGE: vistemplate.py [world_file]"
        exit()

    #creates a world and loads all the items on the command line
    world = WorldModel()
    for fn in sys.argv[1:]:
        res = world.readFile(fn)
        if not res:
            raise RuntimeError("Unable to load model " + fn)

    coordinates.setWorldModel(world)

    #add the world to the visualizer
    visualization.add("world", world)
    #add the coordinate Manager to the visualizer
    visualization.add("coordinates", coordinates.manager())
    #test a point
    pt = [2, 5, 1]
    visualization.add("some point", pt)
    #test a rigid transform
    visualization.add("some blinking transform", [so3.identity(), [1, 3, 0.5]])
    #test an IKObjective
    link = world.robot(0).link(world.robot(0).numLinks() - 1)
    #point constraint
    #obj = ik.objective(link,local=[[0,0,0]],world=[pt])
    #hinge constraint
    obj = ik.objective(link,
                       local=[[0, 0, 0], [0, 0, 0.1]],
                       world=[pt, [pt[0], pt[1], pt[2] + 0.1]])
コード例 #15
0
def main():
    #add the world elements individually to the visualization
    visualization.add("robot", ROBOT)
    print "Added robot to visualization"
    for i in range(1, WORLD.numRobots()):
        visualization.add("robot" + str(i), WORLD.robot(i))
        print "Added robot ", str(i)
    for i in range(WORLD.numRigidObjects()):
        visualization.add("rigidObject" + str(i), WORLD.rigidObject(i))
        print "Added rigidObject ", str(i)
    for i in range(WORLD.numTerrains()):
        visualization.add("terrain" + str(i), WORLD.terrain(i))
        print "Added terrain ", str(i)

    # SETUP CONFIGS
    print "-------------------- Path Configs --------------------"
    print 'Parsing', JSON_PATHNAME, 'path from JSON'
    CONFIGS = parseJson(JSON_PATHNAME)
    print 'CONFIGS:'
    for i in range(len(CONFIGS)):
        print '  ', i, ':', CONFIGS[i]

    # MOTION PLANNER
    planTypeDict = {}
    planTypeDict["sbl"] = {
        'type': "sbl",
        'perturbationRadius': 0.5,
        'bidirectional': 1,
        'shortcut': 1,
        'restart': 1,
        'restartTermCond': "{foundSolution:1,maxIters:1000"
    }

    print "-------------------- Motion Planning --------------------"
    settings = planTypeDict[PLANNER_TYPE]
    print "Planner type", PLANNER_TYPE
    print "Planner settings", settings

    wholepath = [CONFIGS[0]]
    for i in range(len(CONFIGS) - 1):
        path = myPlanner(CONFIGS[i], CONFIGS[i + 1], settings)
        if path is None or len(path) == 0:
            print 'Failed to find path between configation %d and %d' % (i,
                                                                         i + 1)
            exit(0)
        wholepath += path[1:]

    # CONTROLLER
    if len(wholepath) > 1:
        print 'Path:', len(wholepath)
        # for q in wholepath:
        #     print '  ', q

    for q in wholepath:
        #draw the path as a RobotTrajectory (you could just animate wholepath, but for robots with non-standard joints
        #the results will often look odd).  Animate with 5-second duration
        times = [i * 5.0 / (len(wholepath) - 1) for i in range(len(wholepath))]
        traj = trajectory.RobotTrajectory(ROBOT,
                                          times=times,
                                          milestones=wholepath)
        # print traj.interpolate(wholepath[0], wholepath[1], 10, 1)
        #show the path in the visualizer, repeating for 60 seconds
        visualization.animate("robot", traj)
        visualization.spin(60)

    visualization.kill()

    print("Done.")
コード例 #16
0
ファイル: ex3.py プロジェクト: paperstiger/Klampt
    world = WorldModel()
    res = world.readFile("ex2_file.xml")
    if not res: raise RuntimeError("Unable to load world file")

    linkindex = 7
    localpos = (0.17,0,0)
    robot = world.robot(0)
    link = robot.link(linkindex)

    coordinates.setWorldModel(world)
    goalpoint = [0,0,0]
    ptlocal = coordinates.addPoint("ik-constraint-local",localpos,robot.getName()+":"+link.getName())
    ptworld = coordinates.addPoint("ik-constraint-world",goalpoint,"world")
    print coordinates.manager().frames.keys()
    
    visualization.add("robot",robot)
    visualization.add("coordinates",coordinates.manager())
    visualization.show()
    iteration = 0
    while visualization.shown():
        visualization.lock()
        #set the desired position goalpoint to move in a circle
        r = 0.4
        t = visualization.animationTime()
        goalpoint[0],goalpoint[1],goalpoint[2] = 0.8,r*math.cos(t),0.7+r*math.sin(t)
        q = solve_ik(link,localpos,goalpoint)
        robot.setConfig(q)
        #this updates the coordinates module
        coordinates.updateFromWorld()
        
        visualization.unlock()