Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(
        description="Export a robot design as a mesh.")
    parser.add_argument("grammar_file", type=str, help="Grammar file (.dot)")
    parser.add_argument("rule_sequence", nargs="+", help="Rule sequence")
    parser.add_argument("--output_file",
                        type=str,
                        required=True,
                        help="Output file")
    args = parser.parse_args()

    graphs = rd.load_graphs(args.grammar_file)
    rules = [rd.create_rule_from_graph(g) for g in graphs]
    rule_sequence = [int(s.strip(",")) for s in args.rule_sequence]
    graph = make_graph(rules, rule_sequence)
    robot = build_normalized_robot(graph)

    # Simulation is only used to get link/joint transforms
    sim = rd.BulletSimulation()
    sim.add_robot(robot, [0.0, 0.0, 0.0], rd.Quaterniond(1.0, 0.0, 0.0, 0.0))

    obj_file_name = args.output_file
    mtl_file_name = os.path.splitext(args.output_file)[0] + '.mtl'
    with open(obj_file_name, 'w') as obj_file, \
         open(mtl_file_name, 'w') as mtl_file:
        dumper = ObjDumper(obj_file, mtl_file)
        obj_file.write("mtllib {}\n".format(mtl_file_name))
        dump_sim(sim, dumper)
        dumper.finish()
Ejemplo n.º 2
0
def get_robot_image(robot, task):
  sim = rd.BulletSimulation(task.time_step)
  task.add_terrain(sim)
  viewer = rd.GLFWViewer()
  if robot is not None:
    robot_init_pos, _ = presimulate(robot)
    # Rotate 180 degrees around the y axis, so the base points to the right
    sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
    robot_idx = sim.find_robot_index(robot)

    # Get robot position and bounds
    base_tf = np.zeros((4, 4), order='f')
    lower = np.zeros(3)
    upper = np.zeros(3)
    sim.get_link_transform(robot_idx, 0, base_tf)
    sim.get_robot_world_aabb(robot_idx, lower, upper)
    viewer.camera_params.position = base_tf[:3,3]
    viewer.camera_params.yaw = np.pi / 3
    viewer.camera_params.pitch = -np.pi / 6
    viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
  else:
    viewer.camera_params.position = [1.0, 0.0, 0.0]
    viewer.camera_params.yaw = -np.pi / 3
    viewer.camera_params.pitch = -np.pi / 6
    viewer.camera_params.distance = 5.0

  viewer.update(task.time_step)
  
  return viewer.render_array(sim)
Ejemplo n.º 3
0
def presimulate(robot):
  """Find an initial position that will place the robot on the ground behind the
  x=0 plane, and check if the robot collides in its initial configuration."""
  temp_sim = rd.BulletSimulation()
  temp_sim.add_robot(robot, np.zeros(3), rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
  temp_sim.step()
  robot_idx = temp_sim.find_robot_index(robot)
  lower = np.zeros(3)
  upper = np.zeros(3)
  temp_sim.get_robot_world_aabb(robot_idx, lower, upper)
  return [-upper[0], -lower[1], 0.0], temp_sim.robot_has_collision(robot_idx)
Ejemplo n.º 4
0
def save_robot_video_frames(robot, task, opt_seed, thread_count, save_image_dir,
                            frame_interval):
  input_sequence, _ = simulate(robot, task, opt_seed, thread_count)

  sim = rd.BulletSimulation(task.time_step)
  task.add_terrain(sim)
  viewer = rd.GLFWViewer()
  robot_init_pos, _ = presimulate(robot)
  # Rotate 180 degrees around the y axis, so the base points to the right
  sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
  robot_idx = sim.find_robot_index(robot)

  # Get robot position and bounds
  base_tf = np.zeros((4, 4), order='f')
  lower = np.zeros(3)
  upper = np.zeros(3)
  sim.get_link_transform(robot_idx, 0, base_tf)
  sim.get_robot_world_aabb(robot_idx, lower, upper)
  viewer.camera_params.position = base_tf[:3,3]
  viewer.camera_params.yaw = np.pi / 12
  viewer.camera_params.pitch = -np.pi / 12
  #viewer.camera_params.yaw = np.pi / 3
  #viewer.camera_params.pitch = -np.pi / 6
  viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)

  frame_index = 0
  for j in range(input_sequence.shape[1]):
    for k in range(task.interval):
      sim.set_joint_target_positions(robot_idx,
                                     input_sequence[:,j].reshape(-1, 1))
      sim.step()
      sim.get_link_transform(robot_idx, 0, base_tf)
      # Update camera position to track the robot smoothly
      camera_pos = viewer.camera_params.position.copy()
      camera_pos += 4.0 * task.time_step * (base_tf[:3,3] - camera_pos)
      viewer.camera_params.position = camera_pos
      viewer.update(task.time_step)
      if frame_index % frame_interval == 0:
        viewer.render(sim)
        im_data = viewer.get_image()[::-1,:,:]
        im = Image.fromarray(im_data)
        im.save(os.path.join(save_image_dir,
                             f"frame_{frame_index // frame_interval:04}.png"))
      frame_index += 1
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(description="Robot design viewer.")
    parser.add_argument("task", type=str, help="Task (Python class name)")
    parser.add_argument("grammar_file", type=str, help="Grammar file (.dot)")
    parser.add_argument("rule_sequence",
                        nargs="+",
                        help="Rule sequence to apply")
    parser.add_argument("-o",
                        "--optim",
                        default=False,
                        action="store_true",
                        help="Optimize a trajectory")
    parser.add_argument("-s",
                        "--opt_seed",
                        type=int,
                        default=None,
                        help="Trajectory optimization seed")
    parser.add_argument("-e",
                        "--episodes",
                        type=int,
                        default=1,
                        help="Number of optimization episodes")
    parser.add_argument("-j",
                        "--jobs",
                        type=int,
                        required=True,
                        help="Number of jobs/threads")
    parser.add_argument("--input_sequence_file",
                        type=str,
                        help="File to save input sequence to (.csv)")
    parser.add_argument("--save_obj_dir",
                        type=str,
                        help="Directory to save .obj files to")
    parser.add_argument("--save_video_file",
                        type=str,
                        help="File to save video to (.mp4)")
    parser.add_argument("-l",
                        "--episode_len",
                        type=int,
                        default=128,
                        help="Length of episode")
    args = parser.parse_args()

    task_class = getattr(tasks, args.task)
    task = task_class(episode_len=args.episode_len)
    graphs = rd.load_graphs(args.grammar_file)
    rules = [rd.create_rule_from_graph(g) for g in graphs]

    rule_sequence = [int(s.strip(",")) for s in args.rule_sequence]
    if args.opt_seed is not None:
        opt_seed = args.opt_seed
    else:
        opt_seed = random.getrandbits(32)
        print("Using optimization seed:", opt_seed)

    graph = make_graph(rules, rule_sequence)
    robot = build_normalized_robot(graph)
    finalize_robot(robot)
    if args.optim:
        input_sequence, result = simulate(robot, task, opt_seed, args.jobs,
                                          args.episodes)
        print("Result:", result)
    else:
        input_sequence = None

    if args.input_sequence_file and input_sequence is not None:
        import csv
        with open(args.input_sequence_file, 'w', newline='') as input_seq_file:
            writer = csv.writer(input_seq_file)
            for col in input_sequence.T:
                writer.writerow(col)
        print("Saved input sequence to file:", args.input_sequence_file)

    robot_init_pos, has_self_collision = presimulate(robot)

    if has_self_collision:
        print("Warning: robot self-collides in initial configuration")

    main_sim = rd.BulletSimulation(task.time_step)
    task.add_terrain(main_sim)
    # Rotate 180 degrees around the y axis, so the base points to the right
    main_sim.add_robot(robot, robot_init_pos,
                       rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
    robot_idx = main_sim.find_robot_index(robot)

    camera_params, record_step_indices = view_trajectory(
        main_sim, robot_idx, input_sequence, task)

    if args.save_obj_dir and input_sequence is not None:
        import export_mesh

        if record_step_indices:
            print("Saving .obj files for {} steps".format(
                len(record_step_indices)))

        os.makedirs(args.save_obj_dir, exist_ok=True)

        # Save the props/terrain once
        obj_file_name = os.path.join(args.save_obj_dir, 'terrain.obj')
        mtl_file_name = os.path.join(args.save_obj_dir, 'terrain.mtl')
        with open(obj_file_name, 'w') as obj_file, \
             open(mtl_file_name, 'w') as mtl_file:
            dumper = export_mesh.ObjDumper(obj_file, mtl_file)
            obj_file.write("mtllib {}\n".format(
                os.path.split(mtl_file_name)[-1]))
            for prop_idx in range(main_sim.get_prop_count()):
                export_mesh.dump_prop(prop_idx, main_sim, dumper)
            dumper.finish()

        # Save the robot once per step
        def save_obj_callback(step_idx):
            if record_step_indices:
                if step_idx not in record_step_indices:
                    return
            else:
                if step_idx % 128 != 0:
                    return

            obj_file_name = os.path.join(args.save_obj_dir,
                                         'robot_{:04}.obj'.format(step_idx))
            # Use one .mtl file for all steps
            mtl_file_name = os.path.join(args.save_obj_dir, 'robot.mtl')
            with open(obj_file_name, 'w') as obj_file, \
                 open(mtl_file_name, 'w') as mtl_file:
                dumper = export_mesh.ObjDumper(obj_file, mtl_file)
                obj_file.write("mtllib {}\n".format(
                    os.path.split(mtl_file_name)[-1]))
                export_mesh.dump_robot(robot_idx, main_sim, dumper)
                dumper.finish()

        run_trajectory(main_sim, robot_idx, input_sequence, task,
                       save_obj_callback)

    if args.save_video_file and input_sequence is not None:
        import cv2

        if record_step_indices:
            print("Saving video for {} steps".format(len(record_step_indices)))

        viewer = rd.GLFWViewer()

        # Copy camera parameters from the interactive viewer
        viewer.camera_params = camera_params

        tracker = CameraTracker(viewer, main_sim, robot_idx)

        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(args.save_video_file, fourcc, 60.0,
                                 viewer.get_framebuffer_size())
        writer.set(cv2.VIDEOWRITER_PROP_QUALITY, 100)

        def write_frame_callback(step_idx):
            tracker.update(task.time_step)

            # 240 steps/second / 4 = 60 fps
            if step_idx % 4 == 0:
                # Flip vertically, convert RGBA to BGR
                frame = viewer.render_array(main_sim)[::-1, :, 2::-1]
                writer.write(frame)

        run_trajectory(main_sim, robot_idx, input_sequence, task,
                       write_frame_callback)

        writer.release()
Ejemplo n.º 6
0
 def make_sim_fn():
   sim = rd.BulletSimulation(task.time_step)
   task.add_terrain(sim)
   # Rotate 180 degrees around the y axis, so the base points to the right
   sim.add_robot(robot, robot_init_pos, rd.Quaterniond(0.0, 0.0, 1.0, 0.0))
   return sim