示例#1
0
    def __init__(self):
        folders = os.listdir("../data_rrtstar")
        start = []
        goal = []
        self.next = []
        sample_ratio = 0.1
        for f in folders:
            subfolder = os.path.join("../data_rrtstar", f)
            if not os.path.isdir(subfolder):
                continue

            plans = os.listdir(subfolder)
            for p in plans:
                plan = np.loadtxt(os.path.join(subfolder, p)).T
                size = plan.shape[0]
                for i in range(size - 1):
                    samples = np.random.choice(size - i - 1,
                                               int(sample_ratio * (size - i)),
                                               replace=False) + (i + 1)
                    if not (i + 1) in samples:
                        samples = np.concatenate([[i + 1], samples])
                    if not (size - 1) in samples:
                        samples = np.concatenate([samples, [size - 1]])
                    start.append(plan[i][None].repeat(samples.shape[0],
                                                      axis=0))
                    goal.append(plan[samples])
                    self.next.append(plan[i + 1][None].repeat(samples.shape[0],
                                                              axis=0))
                # start.append(plan[:-1])
                # goal.append(plan[-1:].repeat(size-1, axis = 0))
                # self.next.append(plan[1:])

        start = np.concatenate(start, axis=0)[:, :2]
        planning_env = CarEnvironment("../map/map.png")

        self.observe = planning_env.get_measurement(start.T) / 4.
        start = start / np.array([1788, 1240])
        goal = np.concatenate(goal, axis=0)[:, :2] / np.array([1788, 1240])
        self.start_goal = np.concatenate([start, goal], axis=1)
        self.next = np.concatenate(self.next, axis=0)[:, :2] / np.array(
            [1788, 1240])

        self.next = (self.next - start[:, :2]) * 20
示例#2
0
def main():
    planning_env = CarEnvironment(args.map)

    it = 0
    max_it = 5
    if not os.path.exists("../data_rrt/"):
        os.mkdir("../data_rrt/")

    while True:
        print("working on %d " % it)
        while True:
            start = planning_env.sample()
            if planning_env.state_validity_checker(start):
                break
        print(start)

        while True:
            goal = planning_env.sample()
            if planning_env.state_validity_checker(
                    goal) and planning_env.compute_distance(start, goal) > 200:
                break
        print(goal)
        planner = RRTPlannerNonholonomic(planning_env, bias=args.bias)
        plan, actions, rollouts = planner.Plan(start, goal)
        measurement = planning_env.get_measurement(rollouts)

        # planning_env.init_visualizer()
        # planning_env.visualize_traj(rollouts, measurement)

        if actions is None:
            continue

        np.savetxt("../data_rrt/action_%d.txt" % it, actions)
        np.savetxt("../data_rrt/rollout_%d.txt" % it, rollouts)
        np.savetxt("../data_rrt/measure_%d.txt" % it, measurement)

        it += 1
        if it == max_it:
            break
示例#3
0
    parser.add_argument('-s', '--start', nargs='+', type=float, required=True)
    parser.add_argument('-g', '--goal', nargs='+', type=float, required=True)
    parser.add_argument('-eps',
                        '--epsilon',
                        type=float,
                        default=1.0,
                        help='Epsilon for A*')
    parser.add_argument('-eta',
                        '--eta',
                        type=float,
                        default=1.0,
                        help='eta for RRT/RRT*')
    parser.add_argument('-b',
                        '--bias',
                        type=float,
                        default=0.2,
                        help='Goal bias for RRT/RRT*')

    args = parser.parse_args()

    # First setup the environment and the robot.
    dim = 3
    args.start = np.array(args.start).reshape(dim, 1)
    args.goal = np.array(args.goal).reshape(dim, 1)
    planning_env = CarEnvironment(args.map, args.start, args.goal)

    # Next setup the planner
    planner = RRTSTARPlannerNonholonomic(planning_env, bias=args.bias)

    main(planning_env, planner, args.start, args.goal, args.planner)
示例#4
0
import sys
sys.path.append('../Env')
import numpy as np
from DPF import DPF
from CarEnvironment import CarEnvironment
import torch

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if __name__ == "__main__":
	
	dpf = DPF()
	dpf.load("../weights/DPFfocal.pt")

	planning_env = CarEnvironment("../map/map.yaml")
	planning_env.init_visualizer()
	dpf.env = planning_env

	particles = []
	while True:
		p = planning_env.sample()
		if planning_env.state_validity_checker(p):
			particles.append(p)
			if len(particles) >= 200:
				break

	particles = np.array(particles)
	particles = particles.squeeze()[None, ...] / np.array([1788, 1240, 1])
	
	planning_env.reset()
	state = planning_env.state
def main():
    planning_env = CarEnvironment(args.map)

    it = 0
    max_it = 10

    if not os.path.exists("../data_rrtstar/"):
        os.mkdir("../data_rrtstar/")

    while True:
        print("working on %d " % it)
        while True:
            start = planning_env.sample()
            if planning_env.state_validity_checker(start):
                break

        #start = np.array([900 + 10*np.random.random() - 5, 1000 + 10*np.random.random(), np.random.random() * np.pi * 2])[None].T
        while True:
            goal = planning_env.sample()
            dist = planning_env.compute_distance(start, goal)
            if planning_env.state_validity_checker(goal):
                break

        planner = RRTSTARPlannerNonholonomic(planning_env, bias=args.bias)
        plan, fail = planner.Plan(start, goal)

        if fail:
            continue

        planning_env.init_visualizer()
        tree = planner.tree
        planning_env.visualize_plan(plan, tree, None)

        np.savetxt("../data_rrtstar/plan_%d.txt" % it, plan)

        it += 1
        if it == max_it:
            break
示例#6
0
    #     tree = planner.tree
    # else:
    #     visited = planner.visited
    #planning_env.visualize_plan(plan, tree, visited)
    #plt.show()
    return cost, plan_time, goal_bool


if __name__ == "__main__":
    times = []
    costs = []
    for i in range(10):

        start = np.array([40, 100, 4.71]).reshape((3, 1))
        goal = np.array([350, 150, 1.57]).reshape((3, 1))
        planning_env = CarEnvironment('car_map.txt', start, goal)
        # Next setup the planner
        bias = 0.05
        eta = 0.5
        plan = 'nonholrrt'
        if plan == 'rrt':
            planner = RRTPlanner(planning_env, bias, eta)
        elif plan == 'rrtstar':
            planner = RRTStarPlanner(planning_env, bias, eta)
        elif plan == 'nonholrrt':
            planner = RRTPlannerNonholonomic(planning_env, bias)
        else:
            print('Unknown planner option')
            exit(0)

        cost, plan_time, goal_bool = main(planning_env, planner, start, goal,
示例#7
0
                    type=float)  # target smoothing coefficient
parser.add_argument('--alpha', default=0.2, type=float)

parser.add_argument('--capacity', default=100000,
                    type=int)  # replay buffer size
parser.add_argument('--hidden_dim', default=64, type=int)

parser.add_argument('--max_episode', default=10000, type=int)  # num of games
parser.add_argument('--last_episode', default=0, type=int)
parser.add_argument('--max_length_trajectory', default=50, type=int)
parser.add_argument('--print_log', default=100, type=int)
parser.add_argument('--exploration_noise', default=0.1)
parser.add_argument('--policy_delay', default=2)

parser.add_argument('--update_iteration', default=10, type=int)
parser.add_argument('--batch_size', default=128, type=int)  # mini batch size

# experiment relater
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--exp_name', default='experiment')
args = parser.parse_args()

planning_env = CarEnvironment("../map/map.yaml")
agent = DDPG(args, planning_env)

if args.mode == "train":
    agent.train()
else:
    agent.load()
    agent.env.init_visualizer()
    agent.evaluate(10, True)
示例#8
0
import sys
sys.path.append('../Env')
import numpy as np
from MPNet import MPNet
from CarEnvironment import CarEnvironment

if __name__ == "__main__":
    mpnet = MPNet()
    mpnet.load()

    planning_env = CarEnvironment("../map/map.png")
    planning_env.init_visualizer()

    size = np.array([1788, 1240])

    while True:
        start = planning_env.sample()
        print(start)
        if planning_env.state_validity_checker(start):
            break

    while True:
        goal = planning_env.sample()
        print(goal)
        dist = planning_env.compute_distance(start, goal)
        if planning_env.state_validity_checker(
                goal) and dist > 200 and dist < 2000:
            break

    # start = np.array([[900, 1000, 0]]).T
    # goal = np.array([[1300, 600, 0]]).T
示例#9
0
from CarEnvironment import CarEnvironment
import numpy as np

planning_env = CarEnvironment("../map/map.png")
#plan = np.loadtxt("data_rrtstar/plan_0.txt")
plan = np.loadtxt("../data_rrtstar/data_rrtstar1/plan_0.txt")
print(plan.shape)
planning_env.init_visualizer()
planning_env.visualize_plan(plan, None, None)
import IPython
IPython.embed()