Exemple #1
0
def plan():
    P = Planner(perfect_model)

    observation = env.reset()
    env.render()

    # define start and goal states
    start_state = [math.atan2(observation[1], observation[0]), observation[2]]
    goal_state = [0.0, 0.0]

    # plan a path using A*

    path = P.find_path(start_state=start_state, goal_state=goal_state)

    # action_seq = path[:, 2]
    # action_seq = action_seq[:-10]
    action_seq = path[1:, 2]
    for ind, act in enumerate(action_seq):
        ob, _, _, _ = env.step([act])
        state = [math.atan2(ob[1], ob[0]), ob[2]]
        print('Expected node ', path[ind + 1, :2])
        print('Visited node ', P.state_to_node(state))
        # print('\n')
        env.render()
        sleep(0.3)
Exemple #2
0
def plan():
    P = Planner(M.predict_using_model)

    observation = env.reset()
    env.render()

    # define start and goal states
    start_state = [math.atan2(observation[1], observation[0]), observation[2]]
    goal_state = [0.0, 0.0]

    # plan a path using A*

    path = P.find_path(start_state=start_state, goal_state=goal_state)

    action_seq = path[:, 2]
    action_seq = action_seq[:-10]
    for ind, act in enumerate(action_seq):
        ob, _, _, _ = env.step([act])
        state = [math.atan2(ob[1], ob[0]), ob[2]]
        # print('Expected node ', path_seq[ind])
        # print('Visited node ', P.state_to_node(state))
        print('\n')
        env.render()
        sleep(0.2)

    # Loop to replan from current state
    while True:
        P = Planner(M.predict_using_model)
        start_state = state
        path = P.find_path(start_state=start_state, goal_state=goal_state)
        action_seq = path[:, 2]
        action_seq = action_seq[:-10]
        for ind, act in enumerate(action_seq):
            ob, _, _, _ = env.step([act])
            state = [math.atan2(ob[1], ob[0]), ob[2]]
            # print('Expected node ', path_seq[ind])
            # print('Visited node ', P.state_to_node(state))
            print('\n')
            env.render()
            sleep(0.2)
def plan():

    observation = env.reset()
    env.render()

    # define start and goal states
    start_state = [math.atan2(observation[1], observation[0]), observation[2]]
    goal_state = [0.0, 0.0]

    scale_factor = 1
    steps = []
    scale_axis = []

    scale_factor = 1
    # plan a path using A*
    for i in range(10):
        P = Planner(perfect_model, scale_factor)

        path, n_steps = P.find_path(start_state=start_state,
                                    goal_state=goal_state)

        if path is None:
            print("Not able to compute path")
            n_steps = 0

        # action_seq = path[:, 2]
        # action_seq = action_seq[:-10]
        action_seq = path[1:, 2]
        for ind, act in enumerate(action_seq):
            ob, _, _, _ = env.step([act])
            state = [math.atan2(ob[1], ob[0]), ob[2]]
            # print('Expected node ', path[ind+1, :2])
            # print('Visited node ', P.state_to_node(state))
            # print('\n')
            env.render()

        scale_axis.append(scale_factor)
        steps.append(n_steps)
        print(i, scale_factor, n_steps)
        scale_factor = scale_factor * 0.95

    plt.plot(scale_axis, steps)
    plt.xlabel("% discrete action-space")
    plt.ylabel("No. steps")
    plt.title("Discretization of action space vs no. of steps")
    plt.show()