Exemple #1
0
    def __init__(self):
        folders = os.listdir("../data_rrtstar")
        start = []
        goal = []
        self.next = []
        sample_ratio = 0.1
        for f in folders:
            subfolder = os.path.join("../data_rrtstar", f)
            if not os.path.isdir(subfolder):
                continue

            plans = os.listdir(subfolder)
            for p in plans:
                plan = np.loadtxt(os.path.join(subfolder, p)).T
                size = plan.shape[0]
                for i in range(size - 1):
                    samples = np.random.choice(size - i - 1,
                                               int(sample_ratio * (size - i)),
                                               replace=False) + (i + 1)
                    if not (i + 1) in samples:
                        samples = np.concatenate([[i + 1], samples])
                    if not (size - 1) in samples:
                        samples = np.concatenate([samples, [size - 1]])
                    start.append(plan[i][None].repeat(samples.shape[0],
                                                      axis=0))
                    goal.append(plan[samples])
                    self.next.append(plan[i + 1][None].repeat(samples.shape[0],
                                                              axis=0))
                # start.append(plan[:-1])
                # goal.append(plan[-1:].repeat(size-1, axis = 0))
                # self.next.append(plan[1:])

        start = np.concatenate(start, axis=0)[:, :2]
        planning_env = CarEnvironment("../map/map.png")

        self.observe = planning_env.get_measurement(start.T) / 4.
        start = start / np.array([1788, 1240])
        goal = np.concatenate(goal, axis=0)[:, :2] / np.array([1788, 1240])
        self.start_goal = np.concatenate([start, goal], axis=1)
        self.next = np.concatenate(self.next, axis=0)[:, :2] / np.array(
            [1788, 1240])

        self.next = (self.next - start[:, :2]) * 20
Exemple #2
0
def main():
    planning_env = CarEnvironment(args.map)

    it = 0
    max_it = 5
    if not os.path.exists("../data_rrt/"):
        os.mkdir("../data_rrt/")

    while True:
        print("working on %d " % it)
        while True:
            start = planning_env.sample()
            if planning_env.state_validity_checker(start):
                break
        print(start)

        while True:
            goal = planning_env.sample()
            if planning_env.state_validity_checker(
                    goal) and planning_env.compute_distance(start, goal) > 200:
                break
        print(goal)
        planner = RRTPlannerNonholonomic(planning_env, bias=args.bias)
        plan, actions, rollouts = planner.Plan(start, goal)
        measurement = planning_env.get_measurement(rollouts)

        # planning_env.init_visualizer()
        # planning_env.visualize_traj(rollouts, measurement)

        if actions is None:
            continue

        np.savetxt("../data_rrt/action_%d.txt" % it, actions)
        np.savetxt("../data_rrt/rollout_%d.txt" % it, rollouts)
        np.savetxt("../data_rrt/measure_%d.txt" % it, measurement)

        it += 1
        if it == max_it:
            break
Exemple #3
0
	particles = []
	while True:
		p = planning_env.sample()
		if planning_env.state_validity_checker(p):
			particles.append(p)
			if len(particles) >= 200:
				break

	particles = np.array(particles)
	particles = particles.squeeze()[None, ...] / np.array([1788, 1240, 1])
	
	planning_env.reset()
	state = planning_env.state

	# particles = np.array(particles).squeeze()
	obs = planning_env.get_measurement(state)
	encoding = dpf.encoder(torch.FloatTensor(obs/4.0).to(device))
	particles_pro = dpf.propose_particles(encoding, 100).detach()


	#particles = state.repeat(16, axis = 1).T
	dpf.initial_particles(particles_pro)
	action = planning_env.sample_action()
	for i in range(50):
		state, obs = planning_env.step_action(action)
		next_, prob = dpf.update(action, obs)
		pred = (next_ * prob[..., None]).sum(axis = 1)
		print(np.linalg.norm(state.T - pred.numpy()))
		planning_env.render(particles = dpf.particles.cpu().numpy()*np.array([1788, 1240, 1.0]), dt = 0.5)

Exemple #4
0
        if planning_env.state_validity_checker(
                goal) and dist > 200 and dist < 2000:
            break

    # start = np.array([[900, 1000, 0]]).T
    # goal = np.array([[1300, 600, 0]]).T

    start = start[:2, :].T / size

    print(start)
    goal_ = goal[:2, :].T / size
    plan = []
    plan.append(np.concatenate([start * size, [[0]]], axis=1))
    delta = np.zeros(2)
    for i in range(100):
        obs = planning_env.get_measurement((start * size).T) / 4.0
        start_goal = np.concatenate([start, goal_], axis=1)
        delta = mpnet.predict(start_goal, obs)

        start = planning_env.steerTo(start * size, delta / 20. * size)

        #start = start + next_ / 20.
        #print(next_)
        plan.append(np.concatenate([start, [[0]]], axis=1))

        if planning_env.goal_criterion(start.T, goal, 30, no_angle=True):
            print("goal reach!")
            break

        start = start / size