Esempio n. 1
0
def main(time_step=0.01):
    parser = create_parser()
    parser.add_argument('-teleport',
                        action='store_true',
                        help='Teleports between configurations')
    parser.add_argument('-viewer',
                        action='store_true',
                        help='enable the viewer while planning')
    # TODO: argument for selecting prior
    args = parser.parse_args()
    print('Arguments:', args)

    # TODO: nonuniform distribution to bias towards other actions
    # TODO: closed world and open world
    real_world = connect(use_gui=not args.viewer)
    add_data_path()
    task, state = get_problem1(localized='rooms',
                               p_other=0.25)  # surfaces | rooms
    for body in task.get_bodies():
        add_body_name(body)

    robot = task.robot
    #dump_body(robot)
    assert (USE_DRAKE_PR2 == is_drake_pr2(robot))
    attach_viewcone(robot)  # Doesn't work for the normal pr2?
    draw_base_limits(get_base_limits(robot), color=(0, 1, 0))
    #wait_for_user()
    # TODO: partially observable values
    # TODO: base movements preventing pick without look

    # TODO: do everything in local coordinate frame
    # TODO: automatically determine an action/command cannot be applied
    # TODO: convert numpy arrays into what they are close to
    # TODO: compute whether a plan will still achieve a goal and do that
    # TODO: update the initial state directly and then just redraw it to ensure uniqueness
    step = 0
    while True:
        step += 1
        print('\n' + 50 * '-')
        print(step, state)
        wait_for_user()
        #print({b: p.value for b, p in state.poses.items()})
        with ClientSaver():
            commands = plan_commands(state, args)
        print()
        if commands is None:
            print('Failure!')
            break
        if not commands:
            print('Success!')
            break
        apply_commands(state, commands, time_step=time_step)

    print(state)
    wait_for_user()
    disconnect()
Esempio n. 2
0
def main(time_step=0.01):
    # TODO: closed world and open world
    real_world = connect(use_gui=True)
    add_data_path()
    task, state = get_problem1(localized='rooms',
                               p_other=0.5)  # surfaces | rooms
    for body in task.get_bodies():
        add_body_name(body)

    robot = task.robot
    #dump_body(robot)
    assert (USE_DRAKE_PR2 == is_drake_pr2(robot))
    attach_viewcone(robot)  # Doesn't work for the normal pr2?
    draw_base_limits(get_base_limits(robot), color=(0, 1, 0))
    #wait_for_interrupt()
    # TODO: partially observable values
    # TODO: base movements preventing pick without look

    # TODO: do everything in local coordinate frame
    # TODO: automatically determine an action/command cannot be applied
    # TODO: convert numpy arrays into what they are close to
    # TODO: compute whether a plan will still achieve a goal and do that
    # TODO: update the initial state directly and then just redraw it to ensure uniqueness
    step = 0
    while True:
        step += 1
        print('\n' + 50 * '-')
        print(step, state)
        #print({b: p.value for b, p in state.poses.items()})
        with ClientSaver():
            commands = plan_commands(state)
        print()
        if commands is None:
            print('Failure!')
            break
        if not commands:
            print('Success!')
            break
        apply_commands(state, commands, time_step=time_step)

    print(state)
    wait_for_interrupt()
    disconnect()