예제 #1
0
    def __init__(self, xml: str, stats: Stats, args: MagicDict):
        """
        :param xml: the xml filename for Klampt to read the world settings
        :param stats: the Stats object to keep track of stats
        :param args: an instance of the input arguments
        """
        super().__init__(stats)
        import klampt
        from klampt.plan import robotplanning

        world = klampt.WorldModel()
        world.readFile(xml)  # very cluttered
        robot = world.robot(0)

        # this is the CSpace that will be used.
        # Standard collision and joint limit constraints will be checked
        space = robotplanning.makeSpace(world, robot, edgeCheckResolution=0.1)

        self.space = space
        self.robot = robot
        self.world = world

        import copy

        self.template_pos = copy.copy(robot.getConfig())
        self.template_pos[1:7] = [0] * 6
예제 #2
0
def getSim(simfile):
    if simfile in simTable:
        return simTable[simfile][1]
    world = klampt.WorldModel()
    if not world.readFile(simfile):
        raise ValueError("Invalid simulation file " + simfile)
    sim = klampt.Simulator(world)
    simTable[simfile] = (world, sim)
    return sim
예제 #3
0
    def __init__(self, robot, bulbId):
        if isinstance(robot, str):
            self.world = klampt.WorldModel()
            self.world.loadRobot(robot)
            robot = self.world.robot(0)

        self.robot = robot
        self.bulbId = bulbId
        self.localPos = ShaderMesh(robot.link(bulbId).geometry()).centroid()
        self.frag = '''
예제 #4
0
    def __init__(self, random_plan = False):
        self.world = klampt.WorldModel()
        self.world.readFile(world_config_file)
        self.robot = self.world.robot(0)

        self.obstacles = []
        for i in range(self.world.numTerrains()):
            self.obstacles.append(self.world.terrain(i))

        self.initialize(random_plan)
        while not self.test(self.start_conf) or not self.test(self.goal_conf):
            self.initialize(random_plan)
 def __init__(self,worldfn = None):
     GLRealtimeProgram.__init__(self,"Haptic UI Viewer")
     self.statelock = threading.Lock()
     self.state = None
     if worldfn == None:
         self.world = None
         self.robot = None
     else:
         self.world = klampt.WorldModel()
         if not self.world.readFile(worldfn):
             print "HapticUIViewer: Warning, could not load world file",worldfn
             print "Proceeding without robot feedback"
             self.robot = None
             self.world = None
         else:
             self.robot = self.world.robot(0)
             self.qcmdGetter = TopicListener(system_state_addr,'.robot.command.qcmd')
             self.qdestGetter = TopicListener(system_state_addr,'.controller.traj_q_end')
예제 #6
0
    def print_config(self):
        config = self.robotWidget.get()
        print("Config:",config)
        self.world.robot(0).setConfig(config)
    
    def save_world(self):
        fn = "widgets_test_world.xml"
        print("Saving file to",fn)
        self.world.saveFile(fn)



if __name__ == "__main__":
    print("==============================================================================")
    print("gl_vis_widgets.py: This example demonstrates how to manually add visualization")
    print("widgets to pose a robot using the GLWidgetPlugin interface")
    if len(sys.argv)<=1:
        print()
        print("USAGE: gl_vis_widgets.py [world_file]")
    print("==============================================================================")
    if len(sys.argv)<=1:
        exit()
    world = klampt.WorldModel()
    for fn in sys.argv[1:]:
        res = world.readFile(fn)
        if not res:
            raise RuntimeError("Unable to load model "+fn)
    viewer = MyGLViewer(world)
    vis.run(viewer)
def main():
    parser = argparse.ArgumentParser(description="Get imgs")
    parser.add_argument(
        '-w',
        type=str,
        default="/home/motion/TRINA/Motion/data/TRINA_world_cholera.xml",
        help="world file")
    parser.add_argument('-n', type=str, default="cholera", help="codename")
    parser.add_argument(
        '-c',
        nargs='*',
        default=['zed_slam_left', 'zed_slam_right', 'realsense_slam_l515'],
        help="cameras to use")
    parser.add_argument('--components',
                        nargs='*',
                        default=['left_limb', 'right_limb', 'base'],
                        help="components to use")
    parser.add_argument('-m',
                        type=str,
                        default='Kinematic',
                        help='mode to operate in')
    args = parser.parse_args()
    mc = Motion.MotionClient()
    mc.restartServer(mode=args.m, components=args.components, codename=args.n)
    world = klampt.WorldModel()
    world.loadFile(args.w)
    sensor_module = Camera_Robot(mc, world=world, cameras=args.c)
    time.sleep(1)
    camera_names = ["realsense_slam_l515", "zed_slam_left", "zed_slam_right"]
    topic_names = []
    rgb_suf = "_rgb"
    d_suf = "_d"
    for cn in camera_names:
        topic_names.append(cn + rgb_suf)
        topic_names.append(cn + d_suf)
    rate = rospy.Rate(30)
    pubs = {}
    topic_pref = "images/"
    for name in topic_names:
        full_name = topic_pref + name
        pub = rospy.Publisher(full_name, Image, queue_size=10)
        pubs[full_name] = pub
    #rospy.init_node("sim_pipe", anonymous=True)
    bridge = CvBridge()
    max_d = 6.0
    min_d = 0.0
    while not rospy.is_shutdown():
        img_dict = sensor_module.get_rgbd_images()
        for name in camera_names:
            img = img_dict[name]
            if len(img) > 0:
                color = img[0]
                # Normalize depth to be 0 <= d < 1
                depth = (img[1] - min_d) / (max_d - min_d)
                # Fit in maximum range of 16 (unsigned) bits
                depth *= (2**16 - 1)
                depth = depth.astype("uint16")
                depth = bridge.cv2_to_imgmsg(depth, "16UC1")
                pubs[topic_pref + name + rgb_suf].publish(
                    bridge.cv2_to_imgmsg(color, "8UC3"))
                pubs[topic_pref + name + d_suf].publish(depth)
        rate.sleep()