def main(): time_step = 0.004 finger = sim_finger.SimFinger( finger_type="trifingerone", time_step=time_step, enable_visualization=True, ) # Important: The cameras need the be created _after_ the simulation is # initialized. cameras = camera.TriFingerCameras() # Move the fingers to random positions while True: goal = np.array( sample.random_joint_positions( number_of_fingers=3, lower_bounds=[-1, -1, -2], upper_bounds=[1, 1, 2], )) finger_action = finger.Action(position=goal) for _ in range(50): t = finger.append_desired_action(finger_action) finger.get_observation(t) images = cameras.get_images() # images are rgb --> convert to bgr for opencv images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images] cv2.imshow("camera60", images[0]) cv2.imshow("camera180", images[1]) cv2.imshow("camera300", images[2]) cv2.waitKey(int(time_step * 1000))
def test_random_point_positions(self): # choose bounds such that the ranges of the three joints are # non-overlapping lower_bounds = [-2, 0, 3] upper_bounds = [-1, 2, 5] # one finger for i in range(100): result = sample.random_joint_positions(1, lower_bounds, upper_bounds) assert_array_less_equal(lower_bounds, result) assert_array_less_equal(result, upper_bounds) # three finger for i in range(100): result = sample.random_joint_positions(3, lower_bounds, upper_bounds) assert_array_less_equal(lower_bounds * 3, result) assert_array_less_equal(result, upper_bounds * 3)
def main(): argparser = argparse.ArgumentParser(description=__doc__) argparser.add_argument( "--control-mode", default="position", choices=["position", "torque"], help="Specify position or torque as the control mode.", ) argparser.add_argument( "--finger-type", default="trifingerone", choices=finger_types_data.get_valid_finger_types(), help="Specify a valid finger type", ) args = argparser.parse_args() time_step = 0.004 finger = sim_finger.SimFinger( finger_type=args.finger_type, time_step=time_step, enable_visualization=True, ) num_fingers = finger.number_of_fingers if args.control_mode == "position": position_goals = visual_objects.Marker(number_of_goals=num_fingers) while True: if args.control_mode == "position": desired_joint_positions = np.array( sample.random_joint_positions(number_of_fingers=num_fingers)) finger_action = finger.Action(position=desired_joint_positions) # visualize the goal position of the finger tip position_goals.set_state( finger.pinocchio_utils.forward_kinematics( desired_joint_positions)) if args.control_mode == "torque": desired_joint_torques = [random.random()] * 3 * num_fingers finger_action = finger.Action(torque=desired_joint_torques) # pursue this goal for one second for _ in range(int(1 / time_step)): t = finger.append_desired_action(finger_action) finger.get_observation(t) time.sleep(time_step)
def test_reproduce_reset_state(self): """ Send hundred states (positions + velocities) to all the 1DOF joints of the fingers and assert they exactly reach these states. """ finger = SimFinger(finger_type="fingerone", ) for _ in range(100): state_positions = sample.random_joint_positions( finger.number_of_fingers) state_velocities = [pos * 10 for pos in state_positions] reset_state = finger.reset_finger_positions_and_velocities( state_positions, state_velocities) reset_positions = reset_state.position reset_velocities = reset_state.velocity np.testing.assert_array_equal(reset_positions, state_positions, verbose=True) np.testing.assert_array_equal(reset_velocities, state_velocities, verbose=True)
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--save-poses", type=str, metavar="FILENAME", help="""If set, the demo stops when the history of the object pose time series is full and stores the buffered poses to the specified file. """, ) parser.add_argument( "--history-size", type=int, default=1000, help="""Size of the object pose time series. Default: %(default)d""", ) parser.add_argument( "--non-real-time", action="store_true", help="""Do not run the simulation in real-time but as fast as possible. """, ) args = parser.parse_args() real_time = not args.non_real_time time_step = 0.004 finger = sim_finger.SimFinger( finger_type="trifingerone", time_step=time_step, enable_visualizartion=True, ) # Object and Object Tracker Interface # =================================== # # Important: These objects need to be created _after_ the simulation is # initialized (i.e. after the SimFinger instance is created). # spawn a cube in the arena cube = collision_objects.Block() # initialize the object tracker interface object_tracker_data = object_tracker.Data("object_tracker", True, args.history_size) object_tracker_backend = object_tracker.SimulationBackend( object_tracker_data, cube, real_time) object_tracker_frontend = object_tracker.Frontend(object_tracker_data) # Move the fingers to random positions so that the cube is kicked around # (and thus it's position changes). while True: goal = np.array( sample.random_joint_positions( number_of_fingers=3, lower_bounds=[-1, -1, -2], upper_bounds=[1, 1, 2], )) finger_action = finger.Action(position=goal) for _ in range(50): t = finger.append_desired_action(finger_action) finger.get_observation(t) if real_time: time.sleep(time_step) # get the latest pose estimate of the cube and print it t_obj = object_tracker_frontend.get_current_timeindex() cube_pose = object_tracker_frontend.get_pose(t_obj) print("Cube Pose: %s" % cube_pose) # if --save-poses is set, stop when the buffer is full and write it to # a file if args.save_poses and t_obj > args.history_size: # the backend needs to be stopped before saving the buffered poses object_tracker_backend.stop() object_tracker_backend.store_buffered_data(args.save_poses) break
args = parser.parse_args() time_step = args.time_step finger = sim_finger.SimFinger( time_step=time_step, enable_visualization=True, finger_type="fingerone", ) # set the finger to a reasonable start position finger.reset_finger_positions_and_velocities([0, -0.7, -1.5]) errors = [] for _ in range(100): target_position = np.array( sample.random_joint_positions(number_of_fingers=1) ) action = finger.Action(position=target_position) positions = [] for _ in range(500): t = finger.append_desired_action(action) observation = finger.get_observation(t) positions.append(observation.position) observation = finger.get_observation(t) error = np.abs(observation.position - target_position) errors.append(error) print("Position error: {}".format(error)) if args.plot:
time_step = args.time_step finger = sim_finger.SimFinger( time_step=time_step, enable_visualization=True, finger_type="fingerpro", # spawn robot higher up to avoid collisions with the table robot_position_offset=(0, 0, 0.5), ) errors = [] for _ in range(100): target_position = np.array( sample.random_joint_positions( number_of_fingers=1, lower_bounds=[-0.33, 0.0, -2.7], upper_bounds=[1.0, 1.57, 0.0], ) ) action = finger.Action(position=target_position) positions = [] steps = 1000 for _ in range(steps): t = finger.append_desired_action(action) observation = finger.get_observation(t) positions.append(observation.position) observation = finger.get_observation(t) error = np.abs(observation.position - target_position) errors.append(error)
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--enable-cameras", "-c", action="store_true", help="Enable camera observations.", ) parser.add_argument( "--iterations", type=int, default=100, help="Number of motions that are performed.", ) parser.add_argument( "--save-action-log", type=str, metavar="FILENAME", help="If set, save the action log to the specified file.", ) args = parser.parse_args() platform = trifinger_platform.TriFingerPlatform( visualization=True, enable_cameras=args.enable_cameras ) # Move the fingers to random positions so that the cube is kicked around # (and thus it's position changes). for _ in range(args.iterations): goal = np.array( sample.random_joint_positions( number_of_fingers=3, lower_bounds=[-1, -1, -2], upper_bounds=[1, 1, 2], ) ) finger_action = platform.Action(position=goal) # apply action for a few steps, so the fingers can move to the target # position and stay there for a while for _ in range(250): t = platform.append_desired_action(finger_action) time.sleep(platform.get_time_step()) # show the latest observations robot_observation = platform.get_robot_observation(t) print("Finger0 Position: %s" % robot_observation.position[:3]) camera_observation = platform.get_camera_observation(t) print("Cube Position: %s" % camera_observation.object_pose.position) if platform.enable_cameras: for i, name in enumerate(("camera60", "camera180", "camera300")): # simulation provides images in RGB but OpenCV expects BGR img = cv2.cvtColor( camera_observation.cameras[i].image, cv2.COLOR_RGB2BGR ) cv2.imshow(name, img) cv2.waitKey(1) print() if args.save_action_log: platform.store_action_log(args.save_action_log)
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--robot", "-r", choices=["trifingerone", "trifingerpro"], default="trifingerone", help="Which robot to use. Default: %(default)s", ) parser.add_argument( "--config-dir", "-c", type=pathlib.Path, help="""Path to the directory containing camera calibration files. This is optional, if not specified, some default values will be used. """, ) parser.add_argument( "--param-file-format", type=str, default="camera{id}.yml", help="""Format of the camera parameter files. '{id}' is replaced with the camera id. Default: %(default)s """, ) args = parser.parse_args() time_step = 0.004 finger = sim_finger.SimFinger( finger_type=args.robot, time_step=time_step, enable_visualization=True, ) # Important: The cameras need the be created _after_ the simulation is # initialized. if args.config_dir: cameras = camera.create_trifinger_camera_array_from_config( args.config_dir, calib_filename_pattern=args.param_file_format) else: cameras = camera.TriFingerCameras() # Move the fingers to random positions while True: goal = np.array( sample.random_joint_positions( number_of_fingers=3, lower_bounds=[-1, -1, -2], upper_bounds=[1, 1, 2], )) finger_action = finger.Action(position=goal) for _ in range(50): t = finger.append_desired_action(finger_action) finger.get_observation(t) images = cameras.get_images() # images are rgb --> convert to bgr for opencv images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images] cv2.imshow("camera60", images[0]) cv2.imshow("camera180", images[1]) cv2.imshow("camera300", images[2]) key = cv2.waitKey(int(time_step * 1000)) if key == ord("q"): return