def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--player", help="player 1-6", type=int, default=1)
    args = parser.parse_args()

    config = Config(CONFIG_FILE, args.player)

    comms_config = {
        'rx_ip': config.client_ip,
        'rx_port': config.client_port,
        'tx_ip': config.sim_ip,
        'tx_port': config.sim_port
    }
    print("Rx at {}:{}".format(comms_config["rx_ip"], comms_config["rx_port"]))
    print("Tx to {}:{}".format(comms_config["tx_ip"], comms_config["tx_port"]))

    commands_mutex = Lock()

    # Launch comms in background thread
    comms = CommsThread(comms_config, False, commands_mutex)
    comms.daemon = True
    comms.start()

    # Launch perception, motion planning, and controls in main thread
    sweep_builder = SweepBuilder()
    perception = Perception(config)
    planning = Planning(config)
    controls = Controls(config)
    visualize = Visualize(config)

    try:
        while True:
            vehicle_state = sweep_builder.run(comms.get_vehicle_states())
            if vehicle_state is not None:
                t1 = time.time()

                world_state = perception.run(vehicle_state)
                plan = planning.run(world_state)
                vehicle_commands = controls.run(plan)

                t2 = time.time()
                freq = 1 / (t2 - t1)
                print(f"Running at {freq} Hz")

                vehicle_commands['draw'] = visualize.run(world_state, plan)
                with commands_mutex:
                    # hold the lock to prevent the Comms thread from
                    # sending the commands dict while we're modifying it
                    comms.vehicle_commands.update(vehicle_commands)
    except KeyboardInterrupt:
        pass
Beispiel #2
0
    def setUp(self):
        self.config = Mock()
        self.config.alliance = "Blue"
        self.config.blue_goal_region = Polygon(
            make_square_vertices(side_length=0.5, center=(-2.5, -3.5)))
        self.config.blue_chute_pos = np.array([10, 10])
        self.config.occupancy_grid_num_cols = 6
        self.config.occupancy_grid_num_rows = 6
        self.config.occupancy_grid_cell_resolution = 1
        self.config.occupancy_grid_origin = (0, 0)
        self.config.occupancy_grid_dilation_kernel_size = 3
        self.config.ball_probability_decay_factor = 0
        self.config.ball_probability_growth_factor = 1
        self.config.ball_probability_threshold = 1
        self.config.obstacle_probability_decay_factor = 0
        self.config.obstacle_probability_growth_factor = 1
        self.config.obstacle_probability_threshold = 1
        self.config.lidar_deadzone_radius = 0.85

        self.planning = Planning(self.config)
Beispiel #3
0
class TestRun(unittest.TestCase):
    def setUp(self):
        self.config = Mock()
        self.config.field_columns = []
        self.config.field_trenches = []
        self.config.alliance = "Blue"
        self.config.blue_goal_region = Polygon(
            make_square_vertices(side_length=0.5, center=(-2.5, -3.5)))
        self.config.blue_chute_pos = np.array([10, 10])
        self.config.occupancy_grid_num_cols = 6
        self.config.occupancy_grid_num_rows = 6
        self.config.occupancy_grid_cell_resolution = 1
        self.config.occupancy_grid_origin = (0, 0)
        self.config.occupancy_grid_dilation_kernel_size = 3
        self.config.ball_probability_decay_factor = 0
        self.config.ball_probability_growth_factor = 1
        self.config.ball_probability_threshold = 1
        self.config.obstacle_probability_decay_factor = 0
        self.config.obstacle_probability_growth_factor = 1
        self.config.obstacle_probability_threshold = 1

        self.planning = Planning(self.config)

    def test_run(self):
        world_state = {
            'pose': ((-2.5, -2.5), 0),
            'balls': [(2.5, 2.5)],
            'obstacles': [],
            'numIngestedBalls': 0,
        }

        self.planning.run(world_state)

        expected = 2
        actual = len(world_state['trajectory'])

        self.assertEqual(expected, actual)
import rospy
import argparse
import matplotlib.pyplot as plt
import matplotlib.animation as animation

from modules.planning.proto import planning_pb2
from subplot_st_main import StMainSubplot
from subplot_path import PathSubplot
from subplot_sl_main import SlMainSubplot
from subplot_st_speed import StSpeedSubplot
from subplot_speed import SpeedSubplot
from localization import Localization
from planning import Planning

planning = Planning()
localization = Localization()


def update(frame_number):
    #st_main_subplot.show(planning)
    #st_speed_subplot.show(planning)
    map_path_subplot.show(planning, localization)
    dp_st_main_subplot.show(planning)
    qp_st_main_subplot.show(planning)
    speed_subplot.show(planning)
    sl_main_subplot.show(planning)
    st_speed_subplot.show(planning)


def planning_callback(planning_pb):
Beispiel #5
0
def main(desired_iterations, save_path):
    # Define a log file to use with tensorboard
    # Not that we currently make use of tensorboard at all
    LOG_DIR = tempfile.mkdtemp()
    print "Tensorboard Log: " + LOG_DIR + '\n'

    # The directory to save the animations to
    SAVE_DIR = save_path

    # Define the simulation
    sim = Planning(get_noodle_environment())

    # Tensorflow!
    tf.reset_default_graph()
    session = tf.InteractiveSession()
    journalist = tf.train.SummaryWriter(LOG_DIR)
    brain = MLP([
        sim.observation_size,
    ], [200, 200, sim.num_actions], [tf.tanh, tf.tanh, tf.identity])
    optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=0.9)

    # DiscreteDeepQ object
    current_controller = DiscreteDeepQ(sim.observation_size,
                                       sim.num_actions,
                                       brain,
                                       optimizer,
                                       session,
                                       random_action_probability=0.2,
                                       discount_rate=0.9,
                                       exploration_period=1000,
                                       max_experience=10000,
                                       store_every_nth=1,
                                       train_every_nth=1,
                                       summary_writer=journalist)

    # Initialize the session
    session.run(tf.initialize_all_variables())
    session.run(current_controller.target_network_update)
    # journalist.add_graph(session.graph)

    # Run the simulation and let the robot learn
    num_simulations = 0

    iterations_needed = []
    total_rewards = []

    try:
        for game_idx in range(desired_iterations + 1):
            current_random_prob = current_controller.random_action_probability
            update_random_prob = game_idx != 0 and game_idx % 200 == 0
            if update_random_prob and 0.01 < current_random_prob <= 0.1:
                current_controller.random_action_probability = current_random_prob - 0.01
            elif update_random_prob and 0.1 < current_random_prob:
                current_controller.random_action_probability = current_random_prob - 0.1
            game = Planning(get_noodle_environment())
            game_iterations = 0

            observation = game.observe()
            while not game.is_over():
                action = current_controller.action(observation)
                reward = game.collect_reward(action)
                new_observation = game.observe()
                current_controller.store(observation, action, reward,
                                         new_observation)
                current_controller.training_step()
                observation = new_observation
                game_iterations += 1
            total_rewards.append(sum(game.collected_rewards))
            iterations_needed.append(game_iterations)
            rewards = []
            if game_idx % 50 == 0:
                print "\rGame %d:\nIterations before end: %d." % (
                    game_idx, game_iterations)
                if game.collected_rewards[-1] == 10:
                    print "Hit target!"
                print "Total Rewards: %s\n" % (sum(game.collected_rewards))
                if SAVE_DIR is not None:
                    game.save_path(SAVE_DIR, game_idx)

    except KeyboardInterrupt:
        print "Interrupted"

    # Plot the iterations and reward
    plt.figure(figsize=(12, 8))
    plt.plot(total_rewards, label='Reward')
    # plt.plot(iterations_needed, label='Iterations')
    plt.legend()
    plt.show()
Beispiel #6
0
def main():
    input_dir = mit.nth(sys.argv, 1, 'test_data')
    output_dir = mit.nth(sys.argv, 2, 'out/test_data')
    planning = Planning(input_dir, output_dir)
    planning.import_example_data()
    planning.setup_solver()
    planning.print_solver()
    planning.solve()
    planning.output_result()
    planning.export_results()
Beispiel #7
0
 def __init__(self):
     self.mesh = None
     self.rob_parser = Rapid()
     self.planning = Planning()
Beispiel #8
0
class RobPath():
    def __init__(self):
        self.mesh = None
        self.rob_parser = Rapid()
        self.planning = Planning()

    def load_mesh(self, filename):
        self.mesh = Mesh(filename)

    def translate_mesh(self, position):
        self.mesh.translate(position)

    def resize_mesh(self, size):
        self.mesh.scale(size / self.mesh.size)

    def set_track(self, height, width, overlap):
        self.track_height = height
        self.track_width = width
        self.track_overlap = overlap
        self.track_distance = (1 - overlap) * width

    def set_process(self, speed, power, focus):
        self.rob_parser.track_speed = speed
        self.rob_parser.power = power
        self.track_focus = focus

    def set_powder(self, carrier_gas, stirrer, turntable):
        self.rob_parser.carrier_gas = carrier_gas
        self.rob_parser.stirrer = stirrer
        self.rob_parser.turntable = turntable

    def init_process(self):
        self.k = 0
        self.path = []
        self.slices = []
        self.pair = False
        self.levels = self.mesh.get_zlevels(self.track_height)
        return self.levels

    def update_process(self, filled=True, contour=False):
        tool_path = []
        slice = self.mesh.get_slice(self.levels[self.k])
        if slice is not None:
            self.slices.append(slice)
            if filled:
                tool_path = self.planning.get_path_from_slices(
                    [slice],
                    self.track_distance,
                    self.pair,
                    focus=self.track_focus)
                self.pair = not self.pair
                self.path.extend(tool_path)
            if contour:
                tool_path = self.planning.get_path_from_slices(
                    [slice], focus=self.track_focus)
                self.path.extend(tool_path)
        self.k = self.k + 1
        print 'k, levels:', self.k, len(self.levels)
        return tool_path

    def save_rapid(self, filename='etna.mod', directory='ETNA'):
        routine = self.rob_parser.path2rapid(self.path)
        self.rob_parser.save_file(filename, routine)
        self.rob_parser.upload_file(filename, directory)
        print routine
Beispiel #9
0
    action_costs = (10.0, 12.0, 12.0)
    path_followed = [
    ]  #this will be the path with the nodes left to reach (when we reach a node, we delete it)
    distance_tolerance = 0.2  #if we are closer to the next node than this distance, we will consider it has been reached

    start_time = time.time()

    try:
        while not goal_reached(robot_handle, goal, localized):

            if localized == False and pf.localized == True:
                #we enter here only when we pass from non-localized to localized
                localized = True  #set the flag

                #and create the path
                planning = Planning(m, action_costs)
                path = planning.a_star((pf.centroid[0], pf.centroid[1]), goal)
                smoothed_path = planning.smooth_path(path,
                                                     data_weight=0.3,
                                                     smooth_weight=0.1)
                path_followed = smoothed_path.copy()
                planning.show(smoothed_path, block=False)
                print("localized at ")
                print(path_followed[0])
                #path_followed.pop(0)                     #delete the start node, as it has been already reached

            if not localized:
                #if we are not localized yet, we explore the map and try to get our particle filter converged
                v, w = navigation.explore(z_us, z_v, z_w)
                robot.move(v, w)
                z_us, z_v, z_w = robot.sense()
Beispiel #10
0
class TestMotionPlanning(unittest.TestCase):
    def setUp(self):
        self.config = Mock()
        self.config.field_columns = []
        self.config.field_trenches = []
        self.config.occupancy_grid_num_cols = 6
        self.config.occupancy_grid_num_rows = 6
        self.config.occupancy_grid_cell_resolution = 1
        self.config.occupancy_grid_origin = (0, 0)
        self.config.occupancy_grid_dilation_kernel_size = 3
        self.config.ball_probability_decay_factor = 0
        self.config.ball_probability_growth_factor = 1
        self.config.ball_probability_threshold = 1
        self.config.obstacle_probability_decay_factor = 0
        self.config.obstacle_probability_growth_factor = 1
        self.config.obstacle_probability_threshold = 1
        self.config.alliance = "Blue"
        self.config.blue_goal_region = Polygon(
            make_square_vertices(side_length=0.5, center=(-2.5, -2.5)))
        self.config.blue_chute_pos = np.array([10, 10])

        self.pose = ((-2.5, -2.5), 0)
        self.goal = (2.5, 2.5)

        self.planning = Planning(self.config)

    def test_motion_planning_with_empty_world(self):
        world_state = {
            'obstacles': [],
            'pose': self.pose,
            'goal': self.goal,
            'flail': False,
        }

        self.planning.motion_planning(world_state)

        expected_trajectory_length = 2
        actual_trajectory_length = len(world_state['trajectory'])
        expected_occupancy_grid = np.zeros(shape=(6, 6))
        actual_occupancy_grid = world_state['grid'].occupancy

        self.assertEqual(expected_trajectory_length, actual_trajectory_length)
        np.testing.assert_array_equal(expected_occupancy_grid,
                                      actual_occupancy_grid)

    def test_motion_planning_avoids_obstacles(self):
        self.planning.obstacle_grid.occupancy[1:5, 1:5] = 1

        world_state = {
            'obstacles': [],
            'pose': self.pose,
            'goal': self.goal,
            'flail': False,
        }

        self.planning.motion_planning(world_state)

        expected_occupancy_grid = np.zeros(shape=(6, 6), dtype=np.uint8)
        expected_occupancy_grid[1:5, 1:5] = np.ones(shape=(4, 4))
        actual_occupancy_grid = world_state['grid'].occupancy
        expected_trajectory_length = 4

        actual_trajectory_length = len(world_state['trajectory'])
        np.testing.assert_array_equal(expected_occupancy_grid,
                                      actual_occupancy_grid)
        self.assertEqual(expected_trajectory_length, actual_trajectory_length)

    def test_motion_planning_returns_none_when_no_feasible_trajectory(self):
        world_state = {
            'obstacles': [((1, 1), (2, 2))],
            'pose': self.pose,
            'goal': self.goal,
            'flail': False,
        }

        goal_cell = self.planning.obstacle_grid.get_cell(self.goal)
        self.planning.obstacle_grid.occupancy[goal_cell.indices] = 1
        self.planning.motion_planning(world_state)

        self.assertIsNone(world_state['trajectory'])

    def test_motion_planning_returns_trivial_plan_when_goal_reached(self):
        world_state = {
            'obstacles': [],
            'pose': self.pose,
            'goal': self.pose[0],
            'flail': False,
        }

        self.planning.motion_planning(world_state)

        expected = 2
        actual = len(world_state['trajectory'])
        self.assertEqual(expected, actual)

    def test_flail_returns_no_trajectory(self):
        world_state = {
            'obstacles': [],
            'pose': self.pose,
            'goal': self.pose[0],
            'flail': True,
        }

        self.planning.motion_planning(world_state)

        self.assertIsNone(world_state['trajectory'])
Beispiel #11
0
class TestBehaviorPlanning(unittest.TestCase):
    def setUp(self):
        self.config = Mock()
        self.config.alliance = "Blue"
        self.config.blue_goal_region = Polygon(
            make_square_vertices(side_length=0.5, center=(-2.5, -3.5)))
        self.config.blue_chute_pos = np.array([10, 10])
        self.config.occupancy_grid_num_cols = 6
        self.config.occupancy_grid_num_rows = 6
        self.config.occupancy_grid_cell_resolution = 1
        self.config.occupancy_grid_origin = (0, 0)
        self.config.occupancy_grid_dilation_kernel_size = 3
        self.config.ball_probability_decay_factor = 0
        self.config.ball_probability_growth_factor = 1
        self.config.ball_probability_threshold = 1
        self.config.obstacle_probability_decay_factor = 0
        self.config.obstacle_probability_growth_factor = 1
        self.config.obstacle_probability_threshold = 1
        self.config.lidar_deadzone_radius = 0.85

        self.planning = Planning(self.config)

    def test_robot_runs_intake_and_goes_to_nearest_ball_while_far_from_goal_and_has_fewer_than_five_balls(
            self):
        world_state = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [(2.5, 2.5)],
            'obstacles': []
        }

        self.planning.behavior_planning(world_state)

        expected = {'goal': (2.5, 2.5), 'direction': 1, 'tube_mode': 'INTAKE'}
        actual = {
            k: world_state[k]
            for k in ('goal', 'direction', 'tube_mode')
        }
        self.assertEqual(expected, actual)

    def test_robot_drives_to_goal_backwards_when_it_has_five_balls_and_is_far_from_goal(
            self):
        world_state = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 5,
            'balls': [(2.5, 2.5)],
            'obstacles': []
        }

        self.planning.behavior_planning(world_state)

        expected = {
            'goal': self.planning.scoring_zone,
            'direction': -1,
            'tube_mode': 'INTAKE',
            'flail': False,
        }
        actual = {
            k: world_state[k]
            for k in ('goal', 'direction', 'tube_mode', 'flail')
        }
        self.assertEqual(expected, actual)

    def test_robot_scores_when_it_has_five_balls_and_is_at_goal(self):
        world_state = {
            'pose': ((-2.5, -2.5), 0),
            'numIngestedBalls': 5,
            'balls': [(2.5, 2.5)],
            'obstacles': []
        }

        self.planning.behavior_planning(world_state)

        expected = {
            'goal': self.planning.scoring_zone,
            'direction': 0,
            'tube_mode': 'OUTTAKE',
            'flail': False,
        }
        actual = {
            k: world_state[k]
            for k in ('goal', 'direction', 'tube_mode', 'flail')
        }
        self.assertEqual(expected, actual)

    def test_robot_stays_at_goal_while_scoring(self):
        world_state = {
            'pose': ((-2.5, -2.5), 0),
            'numIngestedBalls': 4,
            'balls': [(2.5, 2.5)],
            'obstacles': []
        }

        self.planning.behavior_planning(world_state)

        expected = {
            'goal': self.planning.scoring_zone,
            'direction': 0,
            'tube_mode': 'OUTTAKE',
            'flail': False,
        }
        actual = {
            k: world_state[k]
            for k in ('goal', 'direction', 'tube_mode', 'flail')
        }
        self.assertEqual(expected, actual)

    def test_robot_flails_when_inside_obstacle(self):
        world_state = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [],
            'obstacles': []
        }

        self.planning.obstacle_grid.occupancy[:, :] = 1
        self.planning.behavior_planning(world_state)

        self.assertTrue(world_state['flail'])
Beispiel #12
0
class TestWorldStatePreprocessor(unittest.TestCase):
    def setUp(self):
        config = Mock()
        config.field_columns = []
        config.field_trenches = []
        config.alliance = "Blue"
        config.blue_goal_region = Polygon(
            make_square_vertices(side_length=0.5, center=(-2.5, -3.5)))
        config.blue_chute_pos = np.array([10, 10])
        config.occupancy_grid_num_cols = 6
        config.occupancy_grid_num_rows = 6
        config.occupancy_grid_cell_resolution = 1
        config.occupancy_grid_origin = (0, 0)
        config.occupancy_grid_dilation_kernel_size = 3
        config.ball_probability_decay_factor = 1
        config.ball_probability_growth_factor = 1
        config.ball_probability_threshold = 1
        config.obstacle_probability_decay_factor = 1
        config.obstacle_probability_growth_factor = 1
        config.obstacle_probability_threshold = 1
        config.lidar_deadzone_radius = 1.0

        self.planning = Planning(config)

    def test_robot_remembers_balls_within_lidar_deadzone(self):
        world_state = {
            'pose': ((0, 0), 0),
            'balls': [(0.5, 0.5)],
            'obstacles': []
        }

        self.planning.preprocess_world_state(world_state)

        world_state = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [],
            'obstacles': [],
        }

        self.planning.preprocess_world_state(world_state)

        expected = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [(0.5, 0.5)],
            'obstacles': [],
        }
        actual = world_state
        self.assertEqual(expected, actual)

    def test_robot_forgets_balls_outside_lidar_deadzone(self):
        world_state = {
            'pose': ((0, 0), 0),
            'balls': [(2.5, 2.5)],
            'obstacles': []
        }

        self.planning.preprocess_world_state(world_state)

        world_state = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [],
            'obstacles': [],
        }

        self.planning.preprocess_world_state(world_state)

        expected = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [],
            'obstacles': [],
        }
        actual = world_state
        self.assertEqual(expected, actual)

    def test_robot_forgets_obstacles_that_appear_sporadically(self):
        world_state = {
            'pose': ((0, 0), 0),
            'balls': [],
            'obstacles': [((-1, -1), (1, 1))]
        }

        self.planning.preprocess_world_state(world_state)

        world_state = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [],
            'obstacles': [],
        }

        self.planning.preprocess_world_state(world_state)

        expected = {
            'pose': ((0, 0), 0),
            'numIngestedBalls': 0,
            'balls': [],
            'obstacles': [],
        }
        actual = world_state
        self.assertEqual(expected, actual)
Beispiel #13
0
FCInterface.start()
FCInterfaceThread.start()

SE = StateEstimator(sb)
SEThread = threading.Thread(
    target=SE.loop)  # could probably move thread into the StateEstimator class
SE.start()
SEThread.start()

Map = Mapping(sb)
MapThread = threading.Thread(
    target=Map.loop)  # could probably move thread into the Mapping class
Map.start()
MapThread.start()

Planner = Planning(sb)
PlannerThread = threading.Thread(
    target=Planner.loop)  # could probably move thread into the Planning class
Planner.start()
PlannerThread.start()

start_time = timelib.time()
time = start_time
while time < start_time + 10:
    print(f'time = {time}')
    print('')

    # create a vpython box object above
    # publish vehicle states in lilsim.py
    # add a subscriber to get them here
    # update box states here with box.rotate() or something