Ejemplo n.º 1
0
  def init(cls) -> None:
    PowerMgmt.register_management_change_callback(cls._set_power_save_timeouts)
    cls._set_power_save_timeouts(PowerMgmt.get_plan()) # to be set defaults
    Planner.plan(cls._check_time_to_power_save, True)

    Logging.add_logger(BleLogger())
    cls._start_ble() #to be allocated big blocks in the beginning it should prevent memory fragmentation
Ejemplo n.º 2
0
    def __init__(self, model, **kwargs):
        """Inits the Fitted Q-Iteration planner with discount factor, instantiated model learner, and additional parameters.

        Args:
            model: The model learner object
            gamma=1.0: The discount factor for the domain
            **kwargs: Additional parameters for use in the class.
        """
        Planner.__init__(self, model, **kwargs)
        self.fa_name = self.params.setdefault('basis', 'trivial')
        self.params.setdefault('iterations', 200)
        self.params.setdefault('support_size', 200)
        self.basis = None

        # Set up regressor
        learn_name = self.params.setdefault('regressor', 'linreg')
        if learn_name == 'linreg':
            self.learner = linear_model.LinearRegression()
        elif learn_name == 'ridge':
            self.learner = linear_model.Ridge(
                alpha=self.params.setdefault('l2', 0.5))
        elif learn_name == 'tree':
            self.learner = tree.DecisionTreeRegressor()
        elif learn_name == 'svm':
            self.learner = SVR()
        else:
            self.learner = None
Ejemplo n.º 3
0
def plan():
    P = Planner(perfect_model)

    observation = env.reset()
    env.render()

    # define start and goal states
    start_state = [math.atan2(observation[1], observation[0]), observation[2]]
    goal_state = [0.0, 0.0]

    # plan a path using A*

    path = P.find_path(start_state=start_state, goal_state=goal_state)

    # action_seq = path[:, 2]
    # action_seq = action_seq[:-10]
    action_seq = path[1:, 2]
    for ind, act in enumerate(action_seq):
        ob, _, _, _ = env.step([act])
        state = [math.atan2(ob[1], ob[0]), ob[2]]
        print('Expected node ', path[ind + 1, :2])
        print('Visited node ', P.state_to_node(state))
        # print('\n')
        env.render()
        sleep(0.3)
Ejemplo n.º 4
0
    def __init__(self, connection):
        super().__init__(connection)

        self.WAYPOINT_HIT_RADIUS_DURING = 5.0
        self.WAYPOINT_HIT_RADIUS_LANDING = 2.0

        self.waypoint_hit_radius = self.WAYPOINT_HIT_RADIUS_DURING

        self.in_mission = True
        self.takeoff_altitude = 3.0

        # create state diagram and set the initial state
        self.flight_state, self.state_diagram = self.create_state_diagram()

        self.plot = Plot()
        self.plan_status = PlanResult.NOT_PLANNED
        self.planner = Planner()

        self.current_waypoint = (0, 0, 0)  # current waypoint to be followed
        self.current_wp_index = 0  # index into the waypoint np.array
        self.all_waypoints = []  # all waypoints computed by the planner

        self.min_altitude = 5
        self.max_altitude = 50

        self.path2d_with_cost = [
        ]  # 2d path returned by the planner. This has the cost
        # of reaching goal from each state. Is used for 3d planning

        self.landing_waypoint_index = 0  # index in the waypoints where landing starts. This is used for decreasing the WAYPOINT_HIT_RADIUS

        super().register_callback(MsgID.GLOBAL_POSITION,
                                  self.record_initial_gps)
Ejemplo n.º 5
0
    def __init__(self):
        self.state = State.Init
        self.config = {
            'hover_height':
            1.0,

            # States for which movement.fix_hover() will NOT be called (to make sure the drone is at `hover_height`)
            'exclude_from_fix_hover': [
                State.Init,
                State.Takeoff,
                State.Land,
                State.Done,
            ],

            # Radius in meters around a blacklisted goal that the robot will ignore
            'blacklisted_goal_radius':
            2.0
        }

        self.store = DataStore()
        self.movement = MovementHandler(core=self, datastore=self.store)
        self.actions = Actions(core=self,
                               datastore=self.store,
                               movement_handler=self.movement)
        self.planner = Planner(core=self,
                               datastore=self.store,
                               movement_handler=self.movement)

        # Aux files
        self.temp_data = {}
        self.last_goal = None
    def __init__(self, participant, n=20):
        f_temp = FingerController()
        f_temp.setup_mpcc(False)

        self.fingers = [FingerController() for i in range(n)]
        for f in self.fingers:
            f.setup_mpcc(False)

        self.planner = Planner()
        self.participant = participant
        filen = "fittslaw_simple_p{}.csv".format(participant)
        my_data = np.genfromtxt(filen, delimiter=',')
        self.my_data = my_data[2:]

        self.pixels_per_meter = (900 / 100) * 1e3

        self.pbounds = {
            'average_activation': (1e-1, 1e0),
            'max_motor_units': (1e2, 1e3),
            'w_contour': (1e3, 1e4),
            'w_input_t': (1e-3, 1e-1),
            'w_input_xy': (1e-3, 1e-1),
            'w_input_z': (1e-3, 1e-1),
            'w_lag': (1e3, 1e4),
            'w_vel': (1e1, 1e3),
        }

        # self.bounds_transformer = SequentialDomainReductionTransformer()
        self.optimizer = BayesianOptimization(
            f=self.cost,
            pbounds=self.pbounds,
            random_state=1,
            verbose=2,
            # bounds_transformer=self.bounds_transformer
        )
Ejemplo n.º 7
0
def plan():
    map_ = load_map(MAP_PATH)
    coordinates = load_coordinates(COORDINATE_PATH)

    image = captureImage()
    if image is None:
        return jsonify({"error": "image is None"})
    is_empty_map = getEmptySpaces(image, coordinates)

    print(is_empty_map)
    for tile in map_:
        if not is_empty_map.get((tile.y, tile.x), True):
            tile.is_temporarily_blocked = True

    for tile in map_:
        print("~~~ R%dC%d  %s" % (tile.y, tile.x, tile.is_temporarily_blocked))

    robot, cars = parse_args(request.args)

    p = Planner()
    plan = p.plan(tiles=map_, cars=cars, robot=robot)
    return jsonify({
        "plan": plan,
        "is_empty": {str(k): bool(v)
                     for k, v in is_empty_map.items()}
    })
Ejemplo n.º 8
0
  def _check_time_to_power_save(cls, wake_up):
    #micropython.mem_info(False)
    go_to_power_save = False
    try: #to do not be _check_time_to_power_save interupted due to a BLE issue
      if wake_up:
        PowerMgmt.block_power_save()
        if not cls._time_to_power_save: #can be reset from constructor
          cls._time_to_power_save = cls._running_time_up
        cls._start_ble()
        #cls._advertise() #just for sure it is turned on

        print("BLE power-save blocked")
      else:
        if cls._time_to_power_save: #if time was not reset externally (e.g. ble connection)
          cls._time_to_power_save -= 1
          if cls._time_to_power_save == 0: #if time has been reset by decreasing
            go_to_power_save = True
            # ble is disabled automatically when power save is activated and is enabled again when the program runs again
            # but it is not reliable (advertisement si not started) - lets do it manually
            #cls.disconnect()
            cls._stop_ble()
            PowerMgmt.unblock_power_save()
            print("BLE power-save allowed")

    except Exception as error:
      print("BLE error")
      sys.print_exception(error)
      #micropython.mem_info(0)

    delay = cls._time_down if go_to_power_save else 1
    Planner.postpone(delay, cls._check_time_to_power_save, go_to_power_save)
Ejemplo n.º 9
0
 def change_period(self, new_period):
   self._renew_period = new_period
   if self._renew_handle:
     Planner.kill_task(self._renew_handle)
     self._renew_handle = None
   if self._renew_period > 0:
     self._renew_handle = Planner.repeat(self._renew_period, self._update_value)
Ejemplo n.º 10
0
def plan_view():
    map_ = load_map(MAP_PATH)
    coordinates = load_coordinates(COORDINATE_PATH)

    image = captureImage()
    if image is None:
        return jsonify({
            "error": "image is None"
        })
    is_empty_map = getEmptySpaces(image, coordinates, THRESHOLD)

    for tile in map_:
        if not is_empty_map.get((tile.row, tile.column), True):
            tile.is_temporarily_blocked = True

    robot, cars = parse_args(request.args)

    p = Planner()
    plan = p.plan(
        tiles=map_,
        cars=cars,
        robot=robot
    )
    return jsonify({
        "plan": plan,
        "is_empty": {f"R{k[0]}C{k[1]}": bool(v) for k, v in is_empty_map.items()}
    })
Ejemplo n.º 11
0
    def __init__(self, model, **kwargs):
        """Inits the Fitted Q-Iteration planner with discount factor, instantiated model learner, and additional parameters.

        Args:
            model: The model learner object
            gamma=1.0: The discount factor for the domain
            **kwargs: Additional parameters for use in the class.
        """
        Planner.__init__(self, model, **kwargs)
        self.fa_name = self.params.setdefault("basis", "trivial")
        self.params.setdefault("iterations", 200)
        self.params.setdefault("support_size", 200)
        self.basis = None

        # Set up regressor
        learn_name = self.params.setdefault("regressor", "linreg")
        if learn_name == "linreg":
            self.learner = linear_model.LinearRegression()
        elif learn_name == "ridge":
            self.learner = linear_model.Ridge(alpha=self.params.setdefault("l2", 0.5))
        elif learn_name == "tree":
            self.learner = tree.DecisionTreeRegressor()
        elif learn_name == "svm":
            self.learner = SVR()
        else:
            self.learner = None
Ejemplo n.º 12
0
 def test_forking_can_return_fragment(self):
     self.planner = Planner(
         [self.u, self.z, self.w, self.v, self.x, self.y, self.a, self.b])
     data = self.planner.get_route_data()
     print(data['fragment'])
     self.assertTrue("zw" in data['fragment'] and "wv" in data['fragment']
                     and "ux" in data['fragment'] and
                     ("vy" in data['fragment'] or "va" in data['fragment']
                      or "vb" in data['fragment']))
Ejemplo n.º 13
0
 def remove_trigger(self, handle):
   for listener in self._listeners:
     if listener[0] == handle:
       self._listeners.remove(listener)
       if not self._listeners:
         Planner.kill_task(self._renew_handle)
         self._renew_handle = None
       return True
   return False
Ejemplo n.º 14
0
 def test_solve_dinner(self):
     planner = Planner()
     self.assertEqual(
         planner.solve('dinner/dinner.pddl', 'dinner/pb1.pddl'), [
             Action('cook', [], [['clean']], [], [['dinner']], []),
             Action('wrap', [], [['quiet']], [], [['present']], []),
             Action('carry', [], [['garbage']], [], [],
                    [['garbage'], ['clean']])
         ])
Ejemplo n.º 15
0
    def command_request(self, data):
        if data and len(data) > 0:
            command = data[0]
            if command == _cmd_version:
                return self._b_true

            elif command == _cmd_stop_program:
                print("cmd_stop_program")
                if self.file_exists(self.events_file_name):
                    print("events_file will be renamed")
                    self.rename_file(self.events_file_name,
                                     "." + self.events_file_name)
                    print("events_file renamed")
                print("reboot planned")
                Planner.postpone(0.1, self._reboot)
                return self._b_true

            elif command == _cmd_start_program:
                return self._b_true if self._import_events() else self._b_false

            elif command == _cmd_get_next_file_info:
                if not self.dir_content:
                    self.dir_content = os.listdir("/")
                if self.dir_pos >= len(self.dir_content):
                    return self._b_false
                name = self.dir_content[self.dir_pos]
                self.dir_pos += 1
                return self._get_file_checksum(name) + name.encode("utf-8")

            elif command == _cmd_remove_file:
                filename = data[1:]
                self.remove_file(filename)
                return self._b_true

            elif command == _cmd_handle_file:
                self.handeled_file_path = data[1:]
                self.new_file = True
                return self._b_true

            elif command == _cmd_get_file_checksum:
                return self._get_file_checksum(self.handeled_file_path)

            elif command == _cmd_append:
                data = data[1:]
                if self.new_file:
                    file = open(self.handeled_file_path, "wb")
                    self.new_file = False
                else:
                    file = open(self.handeled_file_path, "ab")

                file.write(data)
                file.close()
                return self._b_true

            else:
                return None
Ejemplo n.º 16
0
 def __init__(self, participant):
     self.finger = FingerController()
     self.finger.setup_mpcc(False)
     self.planner = Planner()
     self.participant = participant
     filen = "fittslaw_simple_p{}.csv".format(participant)
     my_data = np.genfromtxt(filen, delimiter=',')
     self.my_data = my_data[2:]
     self.pixels_per_meter = (900 / 100) * 1e3
     self.saved_data = []
Ejemplo n.º 17
0
 def __init__(self):
     self.planner = Planner()  # creates a planner in the menu
     self.choices = {
         "1": self.show_tasks,
         "2": self.search_tasks,
         "3": self.add_task,
         "4": self.modify_task,
         "5": self.complete_task,
         "6": self.quit,
     }
Ejemplo n.º 18
0
def main():
    p = Planner()

    plan = p.plan(
        None,
        cars=[Car(0, 3, Car.CarStatus.AWAITING_PARKING)],
        robot=Robot(2, 0)
    )
    print("PLAN")
    for step in plan:
        print(f"\t{step}")
Ejemplo n.º 19
0
    def run(self):
        self.proxy.start()

        command = ""

        if self.mode == "physical":
            self.setup_output()
            self.planner.gnuplot_file = self.planner_paths_file

            print("Type \"begin\" to start run...")

            while command != "quit":
                if self.planner.finished:
                    break

                # This method only works on POSIX.
                stdin, o, e = select.select([sys.stdin], [], [], 1)

                if stdin:
                    command = sys.stdin.readline().strip()

                    if command == "begin":
                        if not self.planner.finished:
                            self.planner.start()
                    elif command == "quit":
                        self.planner.finished = True

            self.write_results()
        elif self.mode == "simulated":
            # Ignore specified start for now and just plan from every cell
            # that is not occupied excluding the goal.
            for x in range(self.map.cells_square):
                for y in range(self.map.cells_square):
                    if self.map.grid[x][y].state == 1 and (
                            x != self.map.goal_x or y != self.map.goal_y):
                        self.setup_output()

                        # Attempt to plan a path from this free cell.
                        self.robot.trail = []
                        self.robot.change_odometry(x * self.cell_size,
                                                   y * self.cell_size, 1.57)
                        self.algorithm.__init__(self.map)
                        self.planner = Planner(self.map, self.algorithm,
                                               self.proxy, self.output_file,
                                               self.planner_paths_file)
                        self.planner.start()

                        while not self.planner.finished:
                            continue

                        self.write_results()

        self.lock.acquire()
Ejemplo n.º 20
0
def main():
    employees = get_employees()
    settings = get_settings()
    preferences = get_preferences()
    date_from = get_date_from()
    date_to = get_date_to()

    planner = Planner(employees, settings)
    plan = planner.plan_month(date_from, date_to, preferences)

    printer = ConsolePrinter()
    printer.print_plan(plan)
Ejemplo n.º 21
0
    def __init__(self, maze_dim, params=[]):
        '''
        Use the initialization function to set up attributes that your robot
        will use to learn and navigate the maze. Some initial attributes are
        provided based on common information, including the size of the maze
        the robot is placed in.
        '''

        self.planner = Planner(maze_dim, params)
        self.maze_dim = maze_dim

        self.race = False #False: trial 1,  True: trial 2
        self.t = 0 #timestep
Ejemplo n.º 22
0
    def __init__(self):
        # Init pygame
        pygame.init()

        # Creat pygame window
        pygame.display.set_caption("Tello video stream")
        self.screen = pygame.display.set_mode([960, 360])

        # create object detector
        coco_frozen_graph_path = './ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb'
        coco_labels_path = './ssd_mobilenet_v2_coco_2018_03_29/mscoco_label_map.pbtxt'
        self.coco_obj_detector = Object_Detector(coco_frozen_graph_path,
                                                 coco_labels_path,
                                                 debug_mode=True)

        # create object detector
        face_frozen_graph_path = './ssd_face_detection/frozen_inference_graph_face.pb'
        face_labels_path = './ssd_face_detection/face_label_map.pbtxt'
        self.face_obj_detector = Object_Detector(face_frozen_graph_path,
                                                 face_labels_path,
                                                 debug_mode=True)
        #create tracker object
        self.tracker = Object_Tracker(class_id=1)
        # create depth detector
        self.depth_detector = Depth_Detector()
        self.object_depth_detector = Object_Depth_Detector()
        #create planner
        self.planner = Planner()

        self.face_bounding_box = [
            0.3, 0.4, 0.5, 0.6, 0.1, 0.2
        ]  #[min_y, min_x, max_y, max_x, min_size, max_size]
        self.person_bounding_box = [
            0.45, 0.45, 0.55, 0.55, 0.3, 0.5
        ]  #[min_y, min_x, max_y, max_x, min_size, max_size]

        # Init Tello object that interacts with the Tello drone
        self.tello = Tello()

        # Drone velocities between -100~100
        self.for_back_velocity = 0
        self.left_right_velocity = 0
        self.up_down_velocity = 0
        self.yaw_velocity = 0
        self.speed = 10
        self.follow_human = False

        self.send_rc_control = False

        # create update timer
        pygame.time.set_timer(USEREVENT + 1, 50)
Ejemplo n.º 23
0
class Robot(object):
    def __init__(self, maze_dim, params=[]):
        '''
        Use the initialization function to set up attributes that your robot
        will use to learn and navigate the maze. Some initial attributes are
        provided based on common information, including the size of the maze
        the robot is placed in.
        '''

        self.planner = Planner(maze_dim, params)
        self.maze_dim = maze_dim

        self.race = False #False: trial 1,  True: trial 2
        self.t = 0 #timestep

    def next_move(self, sensors):
        '''
        Use this function to determine the next move the robot should make,
        based on the input from the sensors after its previous move. Sensor
        inputs are a list of three distances from the robot's left, front, and
        right-facing sensors, in that order.

        Outputs should be a tuple of two values. The first value indicates
        robot rotation (if any), as a number: 0 for no rotation, +90 for a
        90-degree rotation clockwise, and -90 for a 90-degree rotation
        counterclockwise. Other values will result in no rotation. The second
        value indicates robot movement, and the robot will attempt to move the
        number of indicated squares: a positive number indicates forwards
        movement, while a negative number indicates backwards movement. The
        robot may move a maximum of three units per turn. Any excess movement
        is ignored.

        If the robot wants to end a run (e.g. during the first training run in
        the maze) then returing the tuple ('Reset', 'Reset') will indicate to
        the tester to end the run and return the robot to the start.
        '''
        if self.race:
            rotation, movement = self.planner.race(self.t)
        else:
            rotation, movement = self.planner.get_next_move(self.t, sensors)
        self.t += 1

        if self.planner.found_goal and self.planner.found_shortest_path:
            self.race = True
            self.t = 0
            self.planner.visited_goal = False
            self.planner.found_goal = False

        return rotation, movement
Ejemplo n.º 24
0
 def __init__(self, model, logger, planning_time):
     self.model = model
     self.logger = logger
     self.planner = Planner(planning_time)
     if isnan(planning_time):
         planning_time = 0
     self.planning_time = planning_time
Ejemplo n.º 25
0
def run_trial(planning_horizon):

    blocks_world_builder = BlocksWorldBuilder(blocks_world_size)
    ctrl = SimpleMonteCarloControl()
    planner = Planner(planning_horizon)
    mc = MonteCarlo(blocks_world_builder,
                    planner,
                    control=ctrl,
                    max_episode_length=blocks_world_size * 2,
                    planning_factor=0,
                    plan_on_empty_policy=True,
                    exploring_starts=True,
                    exploring_factor=0)

    mc.learn_policy(number_episodes=number_of_episodes,
                    show_progress_bar=True,
                    evaluate_return_ratio=False)

    data = pd.DataFrame({
        'episode': range(len(mc.returns)),
        #'return_ratio': mc.return_ratios,
        'observed_returns': mc.returns,
        #'optimal_returns': mc.optimal_returns
    })

    return data
Ejemplo n.º 26
0
 def __init__(self):
     self.config = PepperConfiguration(_ROBOT_NAME)
     if(not self.config.isAvailable()):
         Logger.err("ControlFlow", "checkAvailability", "name: " + self.config.Name + ", ip: " + self.config.Ip + " not reachable!")
         sys.exit(1) #Abort since robot is not available...
     self.robot = Robot(self.config)
     Speech(self.robot) #Initialize Speech (static class, no reference needed)
     DoorChecker(self.robot) #Initialize DoorChecker (static class, no reference needed)
     TabletHandler(self.robot) #Initialize TabletHandler (static class, no reference needed)
     Filetransfer(self.config) #Initialize Filetransfer (static class, no reference needed)
     #self.tracer = Tracer(self.robot) # enable to gather sensor data
     self.sensorhandler = SensorHandler(self.robot)
     self.planner = Planner()
     self.movement = Movement(self.robot)
     self.poscalib = PositionCalibrator(self.robot)
     self.currentpos = _START_COORDINATE
def plan():

    observation = env.reset()
    env.render()

    # define start and goal states
    start_state = [math.atan2(observation[1], observation[0]), observation[2]]
    goal_state = [0.0, 0.0]

    scale_factor = 1
    steps = []
    scale_axis = []

    scale_factor = 1
    # plan a path using A*
    for i in range(10):
        P = Planner(perfect_model, scale_factor)

        path, n_steps = P.find_path(start_state=start_state,
                                    goal_state=goal_state)

        if path is None:
            print("Not able to compute path")
            n_steps = 0

        # action_seq = path[:, 2]
        # action_seq = action_seq[:-10]
        action_seq = path[1:, 2]
        for ind, act in enumerate(action_seq):
            ob, _, _, _ = env.step([act])
            state = [math.atan2(ob[1], ob[0]), ob[2]]
            # print('Expected node ', path[ind+1, :2])
            # print('Visited node ', P.state_to_node(state))
            # print('\n')
            env.render()

        scale_axis.append(scale_factor)
        steps.append(n_steps)
        print(i, scale_factor, n_steps)
        scale_factor = scale_factor * 0.95

    plt.plot(scale_axis, steps)
    plt.xlabel("% discrete action-space")
    plt.ylabel("No. steps")
    plt.title("Discretization of action space vs no. of steps")
    plt.show()
Ejemplo n.º 28
0
def login():
	if request.method == 'POST':
		username = request.form['username']
		password = request.form['password']
		p = Planner('208')
		plans = p.build_plans(username, password)

		complete = []
		for plan in plans:
			plan_names = []
			for code in plan:
				plan_names.append(p.course_info['classes'][code]['name'])
			complete.append(plan_names)
		
		return render_template('index.html', plans=complete)

	return render_template('login.html')
Ejemplo n.º 29
0
 def init_planner(self, init_plan_scheme=None):
     # Initialize the planner with the optimizer.
     r = self.traj.reward(self.reward.reward_th, fw=tt)
     self.optimizer = Maximizer(r, self.traj.u_th)
     self.planner = Planner(self.traj,
                            self.optimizer,
                            self.bounds,
                            name=self.name)
Ejemplo n.º 30
0
def one_plan():
    model = new_model()
    plan, lam_x, lam_g = Planner.create_plan(model)
    plan, lam_x,  lam_g = Planner.create_belief_plan(
        model, warm_start=True,
        x0=plan, lam_x0=lam_x, lam_g0=lam_g
    )
    x_all = plan.prefix['X']
    u_all = plan.prefix['U']
    eb_all = Simulator.simulate_eb_trajectory(model, u_all)

    # Plot
    fig, ax = plt.subplots()
    fig.tight_layout()
    handles = Plotter.plot_plan(ax, eb_all)
    ax.legend(handles=handles, loc='upper left')
    ax.set_aspect('equal')
    plt.show()
Ejemplo n.º 31
0
    def __init__(self, domain, problem, logfile='/dev/null'):
        # Parser
        self.parser = PDDL_Parser()
        self.parser.parse_domain(domain)
        self.parser.parse_problem(problem)
        # Parsed data
        self.state = self.parser.state
        self.goal_pos = self.parser.positive_goals
        self.goal_not = self.parser.negative_goals

        self.history = []
        self.movelog = []

        self.logfile = logfile

        self.planner = Planner()
        # Do nothing
        if self.planner.applicable(self.state, self.goal_pos, self.goal_not):
            print('Puzzle is already solved! Double-check your problem file!')
Ejemplo n.º 32
0
	def get_best_plan_with_keys_recursive(self,graph, keys):

		if keys == 0:
			planner = Planner()
			planner.graph = graph
			path = planner.solve(self.actual_position, [self.target])
			return path, self.keys_available - keys

		best_plan = None
		best_plan_dist = 999999
		best_keys_num = 9999999
		removed_walls = []

		for node, walls in self.walls.items():
			aux = None
			for i,w in enumerate(walls):

				if walls[Orientation.up] == 1 and i == Orientation.up:
					aux = (node[0]+1,node[1], Orientation.up)
				elif walls[Orientation.left] == 1 and i == Orientation.left:
					aux = (node[0],node[1]-1, Orientation.left)
				elif walls[Orientation.down] == 1 and i == Orientation.down:
					aux = (node[0]-1,node[1], Orientation.down)
				elif walls[Orientation.right] == 1 and i == Orientation.right:
					aux = (node[0],node[1]+1, Orientation.right)

				if aux in graph.nodes:
					graph.add_edge( (node[0],node[1],i),aux)

					path, keys_used = self.get_best_plan_with_keys_recursive(graph,keys - 1)

					graph.disconect( (node[0],node[1],i),aux)

					if(len(path) < best_plan_dist ):
						best_plan_dist = len(path)
						best_keys_num = keys_used
						best_plan = path
					if( len(path) == best_plan_dist and keys_used < best_keys_num ):
						best_plan_dist = len(path)
						best_keys_num = keys_used
						best_plan = path

		return best_plan, best_keys_num
Ejemplo n.º 33
0
	def plan_action(self):
		planner = Planner()

		if self.current_state == RobotState.searching:
			planner.graph = self.graph
			path = planner.solve(self.actual_position, [self.target])

		elif self.current_state == RobotState.returning:
			planner.graph = self.original_graph
			path,keys_used = self.best_plan(self.original_graph, self.keys_available)

		self.current_plan = path

		if len(path) <= 1:
			return Action.nothing
		else:
			next_state = path[1]
			turn = next_state[2] - self.actual_position[2]

			if( not (next_state in self.graph.edges[self.actual_position]) and
					( abs(next_state[0] - self.actual_position[0]) +
						abs(next_state[1] - self.actual_position[1]) == 1 ) and turn == 0):

				if self.waiting_for_door == False:
					print "Waiting For Door!"
					self.waiting_for_door = True
					return Action.open_door
				else:
					self.keys_available -= 1
					print 'USING KEY ! left: ',self.keys_available
					self.waiting_for_door = False

			if turn == 0:
				return Action.move
			elif turn==-1:
				return Action.turn_right
			elif turn==1:
				return Action.turn_left
			elif turn == 3:
				return Action.turn_right
			elif turn == -3:
				return Action.turn_left
Ejemplo n.º 34
0
	def plan_action(self):
		planner = Planner()
		planner.graph = self.graph
		path = planner.solve(self.location, self.goal)
		self.current_plan = path

		if len(path) <= 1:
			return False
		else:
			next_state = path[1]
			turn = next_state[2] - self.location[2]
			if turn == 0:
				return Action.move
			elif turn==-1:  
				return Action.turn_right
			elif turn==1:
				return Action.turn_left
			elif turn == 3:
				return Action.turn_right
			elif turn == -3:
				return Action.turn_left
Ejemplo n.º 35
0
    def __init__(self, window, env, schedule, max_step):

        self.env = env
        self.schedule = schedule
        self.max_step = max_step
        self.steps = -1
        self.window = window
        self.traj = dict()
        self.agents_num = env.agents_num

        self.planner = Planner(env, schedule)

        self.poses = self.env.agents_pose
        self.goals = self.env.agents_goal
Ejemplo n.º 36
0
    def run(self):
        self.proxy.start()

        command = ""

        if self.mode == "physical":
            self.setup_output()
            self.planner.gnuplot_file = self.planner_paths_file

            print("Type \"begin\" to start run...")

            while command != "quit":
                if self.planner.finished:
                    break

                # This method only works on POSIX.
                stdin, o, e = select.select([sys.stdin], [], [], 1)

                if stdin:
                    command = sys.stdin.readline().strip()

                    if command == "begin":
                        if not self.planner.finished:
                            self.planner.start()
                    elif command == "quit":
                        self.planner.finished = True

            self.write_results()
        elif self.mode == "simulated":
            # Ignore specified start for now and just plan from every cell
            # that is not occupied excluding the goal.
            for x in range(self.map.cells_square):
                for y in range(self.map.cells_square):
                    if self.map.grid[x][y].state == 1 and (x != self.map.goal_x or y != self.map.goal_y):
                        self.setup_output()

                        # Attempt to plan a path from this free cell.
                        self.robot.trail = []
                        self.robot.change_odometry(x * self.cell_size, y * self.cell_size, 1.57)
                        self.algorithm.__init__(self.map)
                        self.planner = Planner(self.map, self.algorithm, self.proxy, self.output_file,
                                               self.planner_paths_file)
                        self.planner.start()

                        while not self.planner.finished:
                            continue

                        self.write_results()

        self.lock.acquire()
Ejemplo n.º 37
0
def config_params_get_planner(params, sess=None):
    env_name = params['env_name']
    if 'make_env' not in params:

        def make_env():
            env = gym.make(env_name)
            return env

        params['make_env'] = make_env

    env = cached_make_env(params['make_env'])
    env.reset()
    if 'dims' not in params:
        obs, _, _, info = env.step(env.action_space.sample())
        dims = {
            'o': obs['observation'].shape[0],
            'u': env.action_space.shape[0],
            'g': obs['desired_goal'].shape[0],
        }
        for key, value in info.items():
            value = np.array(value)
            if value.ndim == 0:
                value = value.reshape(1)
            dims['info_{}'.format(key)] = value.shape[0]
        params['dims'] = dims
    else:
        dims = params['dims']
    planner_params = dict()
    planner_params.update(DEFAULT_PLANNER_PARAMS)

    for attr in ['hid_size', 'pln_batch_size', 'seq_len']:
        if attr in params:
            planner_params[attr] = params[attr]
    params['seq_len'] = planner_params[
        'seq_len']  # write into params for the rollout worker

    logger.save_params(params=planner_params, filename='planner_params.json')
    sample_func = make_sample_plans()
    planner_params['batch_size'] = planner_params['pln_batch_size']
    del planner_params['pln_batch_size']
    planner_params.update({
        'inp_dim': dims['g'],
        'out_dim': dims['g'],
        'sample_func': sample_func
    })

    planner = Planner(**planner_params, use_mpi=True)

    return planner
 def set_task(self,
              global_goal,
              raw=Plan.GRID,
              local=Plan.SIMPLE,
              drone_alt=5,
              safe_dist=3,
              prune_raw=True,
              prune_local=True,
              visualize=False,
              verbose=False,
              **kwargs):
     self.global_goal = global_goal
     self.planner = Planner("colliders.csv",
                            raw=raw,
                            local=local,
                            drone_alt=drone_alt,
                            safe_dist=safe_dist,
                            prune_raw=prune_raw,
                            prune_local=prune_local,
                            visualize=visualize,
                            verbose=verbose,
                            **kwargs)
     self.drone_alt = drone_alt
     self.safe_dist = safe_dist
Ejemplo n.º 39
0
    def runPlannerOnce(self):
        """
        This method runs the planner once with the currently
        active PlaceNodes (active facts and goals).
        """

        # write active nodes to output folder
        active_facts_pdkb = output_path + "active_facts.pdkb"
        active_goals_pdkb = output_path + "active_goals.pdkb"

        # todo: add filtering
        factsPDDL = ""
        goalsPDDL = ""
        for i in self.memory.retreiveActiveItems():
            node = i.getObject()
            factsPDDL += node.dumpFactsToPDDL()
            goalsPDDL += node.dumpGoalsToPDDL()

        # write the facts PDKB file
        active_facts_file = open(active_facts_pdkb, 'w')
        for l in factsPDDL.split("\n"):
            active_facts_file.write(l + "\n")
        active_facts_file.close()

        # write the goals PDKB file
        active_goals_file = open(active_goals_pdkb, 'w')
        for l in goalsPDDL.split("\n"):
            active_goals_file.write(l + "\n")
        active_goals_file.close()

        # insert the PDKB files into the PDDL templates
        # todo: set the domain id as an option
        domain_template = current_domain_path + "domain-template.pddl"
        problem_template = current_domain_path + "problem-template.pddl"

        # run the planner
        p = Planner.run(current_domain_name, domain_template, problem_template,
                        [active_facts_pdkb],
                        [active_goals_pdkb])

        return p
Ejemplo n.º 40
0
		if not robot.apply_action(action, observation):
			action = action + 1
			robot.apply_action(action, observation)
		observation = do_observation_front(robot)
		loc.add_observation(observation, action=action)
		robot.play_sound(2)
		print 'ACTION: ', action
		print 'PARTIAL LOCATIONS: ', loc.locations

	print 'Final Location: ', loc.locations
	robot.wait(5)
	robot.play_sound(6)

	start=loc.locations.pop()

	#obtaining optimal path
	planifier=Planner();
	[path,walls]=planifier.do_planning(filename,start)

	last_state = path[0]
	for i in range(1,len(path)):
		print '\n Last state: ', last_state
		print ' Current state: ', path[i]
		action=get_action(path[i],last_state)
		robot.apply_action(action, observation)
		observation = do_observation_front(robot)
		last_state = path[i]
		robot.play_sound(5)

	robot.play_sound(6)
Ejemplo n.º 41
0
def solve(goals, world, holding, objects):
    
    planner = Planner(State(copy.deepcopy(world),holding,None,0,None), goals)
    return planner.aSearch()
Ejemplo n.º 42
0
    def mpc(cls, model, model_p):
        # cls: simulate first n_delay time-steps with zero controls
        u_all = model.u.repeated(ca.DMatrix.zeros(model.nu, model.n_delay))
        x_all = cls.simulate_trajectory(model, u_all)
        z_all = cls.simulate_observed_trajectory(model, x_all)
        b_all = cls.filter_observed_trajectory(model_p, z_all, u_all)

        # Store simulation results
        X_all = x_all.cast()
        Z_all = z_all.cast()
        U_all = u_all.cast()
        B_all = b_all.cast()

        # Advance time
        model.set_initial_state(x_all[-1], b_all[-1, 'm'], b_all[-1, 'S'])

        # Iterate until the ball hits the ground
        EB_all = []
        k = 0  # pointer to current catcher observation (= now - n_delay)
        while model.n != 0:
            # Reaction delay compensation
            eb_all_head = cls.simulate_eb_trajectory(
                model_p,
                model_p.u.repeated(U_all[:, k:k+model_p.n_delay])
            )
            model_p.set_initial_state(
                eb_all_head[-1, 'm'],
                eb_all_head[-1, 'm'],
                eb_all_head[-1, 'L'] + eb_all_head[-1, 'S']
            )
            if model_p.n == 0:
                break

            # Planner: plan for model_p.n time steps
            plan, lam_x, lam_g = Planner.create_plan(model_p)
            # plan, lam_x, lam_g = Planner.create_plan(
            #   model_p, warm_start=True,
            #   x0=plan, lam_x0=lam_x, lam_g0=lam_g
            # )
            belief_plan, _, _ = Planner.create_belief_plan(
                model_p, warm_start=True,
                x0=plan, lam_x0=lam_x, lam_g0=lam_g
            )
            u_all = model_p.u.repeated(ca.horzcat(belief_plan['U']))
            # u_all = model_p.u.repeated(ca.horzcat(plan['U']))

            # cls: simulate ebelief trajectory for plotting
            eb_all_tail = cls.simulate_eb_trajectory(model_p, u_all)

            # cls: execute the first action
            x_all = cls.simulate_trajectory(model, [u_all[0]])
            z_all = cls.simulate_observed_trajectory(model, x_all)
            b_all = cls.filter_observed_trajectory(
                model_p, z_all, [u_all[0]]
            )

            # Save simulation results
            X_all.appendColumns(x_all.cast()[:, 1:])
            Z_all.appendColumns(z_all.cast()[:, 1:])
            U_all.appendColumns(u_all.cast()[:, 0])
            B_all.appendColumns(b_all.cast()[:, 1:])
            EB_all.append([eb_all_head, eb_all_tail])

            # Advance time
            model.set_initial_state(x_all[-1], b_all[-1, 'm'], b_all[-1, 'S'])
            model_p.set_initial_state(
                model_p.b(B_all[:, k+1])['m'],
                model_p.b(B_all[:, k+1])['m'],
                model_p.b(B_all[:, k+1])['S']
            )
            k += 1
        return X_all, U_all, Z_all, B_all, EB_all
Ejemplo n.º 43
0
        patch = mpatches.FancyArrow(0, 0.5 * height, width, 0,
                                length_includes_head=True,
                                head_width=0.75 * height, color='r')
        # patch = mpatches.Rectangle([x0, y0], width, height, facecolor='red',
        #                            edgecolor='black', hatch='xx', lw=3,
        #                            transform=handlebox.get_transform())
        handlebox.add_artist(patch)
        return patch



# ============================================================================
#                             Plan trajectory
# ============================================================================
# Find optimal controls
plan, lam_x, lam_g = Planner.create_plan(model)
# plan, lam_x, lam_g = Planner.create_plan(model, warm_start=True,
#                                          x0=plan, lam_x0=lam_x, lam_g0=lam_g)
x_all = plan.prefix['X']
u_all = plan.prefix['U']

# Simulate ebelief trajectory
eb_all = Simulator.simulate_eb_trajectory(model, u_all)

# Plot 2D
fig, ax = plt.subplots()
handles = Plotter.plot_plan(ax, eb_all)
ax.legend(handles=handles, loc='upper left')
ax.set_aspect('equal')
fig.tight_layout()
Ejemplo n.º 44
0
class Tester(threading.Thread):
    def __init__(self, map_file):
        self.proxy = None
        self.robot = None

        self.map_file = map_file

        # Will be assigned in open_map().
        self.cell_size = None
        self.grid_size = None
        self.map = None

        self.algorithm = None
        self.planner = None

        self.mode = None

        self.output_file = None
        self.occupancy_file = None
        self.robot_path_file = None  # Stores actual path the robot took.
        self.planner_paths_file = None  # Stores all of the paths generated by the planner.

        self.output_path = None

        self.lock = None

        Thread.__init__(self)

    def open_map(self):
        infile = Path(self.map_file).open()

        # Do not bother with any validation for now.
        self.grid_size = float(infile.readline())
        self.cell_size = float(infile.readline())

        # Addition of the boundary.
        #self.grid_size += self.cell_size
        self.grid_size = int(self.grid_size / self.cell_size)

        self.robot.cell_size = self.cell_size
        self.map = Map(self.robot, self.grid_size * self.cell_size, self.cell_size)

        y = self.grid_size - 1

        while y >= 0:
            line = infile.readline()

            for x in range(self.grid_size):
                if line[x] == "#":
                    self.map.grid[x][y].state = 2
                elif line[x] == "R":
                    self.robot.change_odometry(round(x * self.cell_size, 2), round(y * self.cell_size, 2), 1.57)

                    #print("Waiting for odometry change...")

                    # Wait for odometry change to take affect.
                    #while self.robot.x != round(x * self.cell_size, 2) and self.robot.y != round(y * self.cell_size, 2):
                    #    continue

                    #print("Odometry change successful!")

                elif line[x] == "G":
                    self.map.goal_x = x
                    self.map.goal_y = y
                elif line[x] == " ":
                    self.map.grid[x][y].state = 1

            y -= 1

        infile.close()

    def setup_output(self):
        # Establish the path to the "output" directory.
        p = Path(str(Path(self.map_file).parents[0]) + "/output")

        # Check to see if the "output" directory actually exists.
        if not p.exists():
            # It does not exist so create it.
            p.mkdir()

        directory_path = str(Path(self.map_file).parents[0]) + "/output/" + str(Path(self.map_file).name) + \
            str(datetime.datetime.utcnow())

        # Create the directory for this run.
        p = Path(directory_path)

        if not p.exists():
            # It does not exist so create it.
            p.mkdir()

        self.output_path = directory_path

        # Create the general output file by tagging the ".output" extension to the existing file and a time stamp.
        file_string = directory_path + "/debug_info.output"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.output_file = p.open(mode='w')

        # Do the same with the planner paths output.
        file_string = directory_path + "/paths.gnuplot"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.planner_paths_file = p.open(mode='w+')

        # Do the same with the robot path output.
        file_string = directory_path + "/robot_path.gnuplot"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.robot_path_file = p.open(mode='w+')

        # Do the same with the occupancy grid output.
        file_string = directory_path + "/occupancy.gnuplot"
        p = Path(file_string)

        if p.exists():
            p.replace(file_string)
        else:
            p.touch()

        self.occupancy_file = p.open(mode='w+')

    def handle_event(self, event):
        if isinstance(event, OdometryReport):
            did_change = self.robot.update_odometry(event)
        elif isinstance(event, StateEvent):
            self.robot.state = event.state

    def run(self):
        self.proxy.start()

        command = ""

        if self.mode == "physical":
            self.setup_output()
            self.planner.gnuplot_file = self.planner_paths_file

            print("Type \"begin\" to start run...")

            while command != "quit":
                if self.planner.finished:
                    break

                # This method only works on POSIX.
                stdin, o, e = select.select([sys.stdin], [], [], 1)

                if stdin:
                    command = sys.stdin.readline().strip()

                    if command == "begin":
                        if not self.planner.finished:
                            self.planner.start()
                    elif command == "quit":
                        self.planner.finished = True

            self.write_results()
        elif self.mode == "simulated":
            # Ignore specified start for now and just plan from every cell
            # that is not occupied excluding the goal.
            for x in range(self.map.cells_square):
                for y in range(self.map.cells_square):
                    if self.map.grid[x][y].state == 1 and (x != self.map.goal_x or y != self.map.goal_y):
                        self.setup_output()

                        # Attempt to plan a path from this free cell.
                        self.robot.trail = []
                        self.robot.change_odometry(x * self.cell_size, y * self.cell_size, 1.57)
                        self.algorithm.__init__(self.map)
                        self.planner = Planner(self.map, self.algorithm, self.proxy, self.output_file,
                                               self.planner_paths_file)
                        self.planner.start()

                        while not self.planner.finished:
                            continue

                        self.write_results()

        self.lock.acquire()

    def write_results(self):
        # Write out the actual path the robot took.
        result_generator.write_paths(self.robot_path_file, [self.robot.trail])

        # Populate the occupancy file.
        for x in range(self.map.cells_square):
            for y in range(self.map.cells_square):
                if self.map.grid[x][y].state == 2:
                    # Its occupied write this point out.
                    self.occupancy_file.write(str(x) + "\t" + str(y) + "\n")

        # Close files.
        self.output_file.close()
        self.occupancy_file.close()
        self.robot_path_file.close()
        self.planner_paths_file.close()

        result_generator.generate_gnuplot(self.output_path, self.algorithm.total_plan_steps, self.grid_size,
                                          GNU_PLOT_OUTPUT)
Ejemplo n.º 45
0
    requests.post("http://7abacf62.ngrok.io", data = data)
    return ""


models_path = "static/models"
@app.route("/models/list")
def listModels(arg=None):
    models = []
    for entry in os.listdir(models_path):
        if entry != ".DS_Store":
            models.append(entry)
    return json.dumps(models)

@app.route("/plan/get")
def get_plan():
   robot, human = planner.get_plans()
   return json.dumps( {"robot":robot, "human":human})

def execute_plan(plan):
    for item in plan:
        print item
        therblig = item["action"]
        print therblig
        time.sleep(float(item["duration"]))

if __name__ == '__main__':
    planner = Planner(True)
    robot, human = planner.get_plans()
    execute_plan(robot)
#    app.run(debug=True)
Ejemplo n.º 46
0
                   'WED2': {'v': 2, 'u': 2},
                   'PrFm': {'v': 2, 'u': 2}
                   }

    source = AdUnisHSR()
    username, password = utils.parse_user_credentials('auth.cfg')
    response = source.signin(username, password)

    filters = [
                restrictions.InTimeRange(time(7, 0), time(18, 00)),
                restrictions.MinChance(30),
                restrictions.FreeTime(3, range(5), time(12), time(23)),
                # restrictions.FreeTime(1, range(5), time(6), time(23))
                ]

    planner = Planner(module_spec.keys(), source)
    solutions = planner.solve(filters)

    for solution in solutions:
        planner.verify(module_spec, solution)

    # Print max. 10 timetables
    nr_timetables_to_print = len(solutions) if len(solutions) < 10 else 10
    for i in range(0, nr_timetables_to_print):
        lectures = []
        for val in solutions[i].values():
            lectures += val
        # printer.verify(lectures)
        printTimeTable(lectures)
        print('\n')
Ejemplo n.º 47
0
class Simulator:

    def __init__(self, model, logger, planning_time):
        self.model = model
        self.logger = logger
        self.planner = Planner(planning_time)
        if isnan(planning_time):
            planning_time = 0
        self.planning_time = planning_time

    def run(self):
        # variables to run simulation
        simulation_time = 0
        new_knowledge = True
        planning_start = 0
        observation_whilst_planning = False
        predicted_model = None

        # variables used to record simulation
        planner_called = 0
        time_planning = 0
        time_waiting_for_actions_to_finish = 0
        time_waiting_for_planner_to_finish = 0
        executed = []

        # initial observations
        self.observe_environment(self.model)

        while new_knowledge and not self.is_goal_in_model():

            log.info("planning")
            if not observation_whilst_planning:
                plan, time_taken = self.planner.get_plan_and_time_taken(self.model)
            else:
                log.info("observation whilst planning, using predicted model")
                plan, time_taken = self.planner.get_plan_and_time_taken(predicted_model)
            self.logger.log_plan(plan)
            time_planning += time_taken
            planner_called += 1
            executed.append(Plan(planning_start, time_taken))
            if observation_whilst_planning:
                executed[-1].partial = True

            planning_finished = time_taken + planning_start
            if simulation_time > planning_finished:
                time_waiting_for_actions_to_finish += simulation_time - planning_finished
            else:
                time_waiting_for_planner_to_finish += planning_finished - simulation_time
            log.info("simulation_time {}", simulation_time)
            log.info("planning_finished {}", planning_finished)

            simulation_time = max(simulation_time, planning_finished)

            observers = self.observe_environment(self.model)
            if observers:
                raise ExecutionError("model in inconsistent state -- should be no possible observations")

            log.info("executing new plan")
            plan = self.adjust_plan(plan, simulation_time)
            original_model = deepcopy(self.model)
            pre_run_plan_simulation_time = simulation_time
            result = self.run_plan(self.model, plan, simulation_time, self.planning_time,
                            flawed_plan=observation_whilst_planning)

            simulation_time = result.simulation_time
            if observation_whilst_planning:
                planning_start = pre_run_plan_simulation_time
            elif result.observations:
                planning_start = min(result.observations)
            else:
                log.warning("no observations in run and no observations whilst planning -- goal should be achieved")
                planning_start = False
                new_knowledge = False
            observation_whilst_planning = any(obs > planning_start for obs in result.observations)
            if observation_whilst_planning:
                predicted_model = self.predict_model(plan, original_model, simulation_time-self.planning_time,
                    simulation_time)
            executed.extend(result.executed)

        log.info("simulation finished")

        goal_achieved = self.is_goal_in_model()
        log.info("Goal achieved: {}", goal_achieved)
        log.info("Planner called: {}", planner_called)
        log.info("Total time taken: {}", simulation_time)
        log.info("Time spent planning: {}", time_planning)
        log.info("time_waiting_for_actions_to_finish {}", time_waiting_for_actions_to_finish)
        log.info("time_waiting_for_planner_to_finish {}", time_waiting_for_planner_to_finish)

        self.logger.log_property("goal_achieved", goal_achieved)
        self.logger.log_property("planner_called", planner_called)
        self.logger.log_property("end_simulation_time", simulation_time)
        self.logger.log_property("total_time_planning", time_planning)
        self.logger.log_property("time_waiting_for_actions_to_finish", time_waiting_for_actions_to_finish)
        self.logger.log_property("time_waiting_for_planner_to_finish", time_waiting_for_planner_to_finish)
        executed_str = "[{}]".format(", ".join(str(action) for action in executed if type(action) is not Observe))
        self.logger.log_property("execution", executed_str, stringify=repr)

        log.info("remaining temp nodes: {}",
            [(name, node) for name, node in self.model["nodes"].items() if name.startswith("temp")])

        return goal_achieved

    @staticmethod
    def agents(action) -> set:
        if type(action) is ExtraClean:
            return {action.agent0, action.agent1}
        return {action.agent}

    @staticmethod
    def observe_environment(model):
        # create Observe action for each agent in model
        actions = [Observe(None, agent_name, agent["at"][1])
                for agent_name, agent in model["agents"].items()]
        # apply observation and collect those agents that observed something new
        return dict(
            (action.agent, action.node) for action in actions if action.apply(model)
        )

    def adjust_plan(self, plan, start_time):
        return tuple(self._adjust_plan_helper(plan, start_time))

    @staticmethod
    def _adjust_plan_helper(plan, start_time):
        for action in plan:
            # adjust for OPTIC starting at t = 0
            action.start_time = action.start_time + start_time
            yield action
            if type(action) is Move:
                yield Observe(action.end_time, action.agent, action.end_node)

    def predict_model(self, plan, model, first_observation, planning_finished):
        log.debug("predict_model() first_observation={}, planning_finished={}", first_observation, planning_finished)
        execution_queue = PriorityQueue()
        for action in plan:
            if type(action) is not Observe or action.end_time <= first_observation:
                execution_queue.put(ActionState(action))

        _result, stalled = self.execute_action_queue(model, execution_queue,
                break_on_new_knowledge=True, deadline=first_observation)

        model_hypothesis = self.convert_to_hypothesis_model(model)

        result, stalled = self.execute_action_queue(model_hypothesis, execution_queue,
                break_on_new_knowledge=False, deadline=planning_finished, stalled=stalled)

        assert result.observations == set()

        self.execute_partial_actions(self.get_executing_actions(execution_queue.queue, planning_finished),
                model_hypothesis, planning_finished)

        return model_hypothesis

    @staticmethod
    def get_executing_actions(action_states, deadline):
        return list(action_state.action for action_state in action_states
                if action_state.state == ExecutionState.executing
                    and action_state.action.start_time < deadline)

    def run_plan(self, model, plan, simulation_time, execution_extension, *, flawed_plan=False):
        log.debug("run_plan() flawed_plan={}", flawed_plan)
        execution_queue = PriorityQueue()
        for action in plan:
            execution_queue.put(ActionState(action))

        if not flawed_plan:
            # execute main plan
            result1, stalled = self.execute_action_queue(model, execution_queue,
                                    break_on_new_knowledge=True, deadline=Decimal("infinity"))
            simulation_time = result1.simulation_time
        else:
            result1 = ExecutionResult(executed=[], observations=set(), simulation_time=simulation_time)
            stalled = {}

        if execution_queue.empty():
            return result1

        self.post_observation_strategy()

        observation_time = simulation_time

        # execute actions during re-plan phase
        deadline = observation_time + execution_extension
        result2, stalled = self.execute_action_queue(model, execution_queue, break_on_new_knowledge=False,
                deadline=deadline, stalled=stalled)

        # add stalled actions
        stalled_actions = list(Stalled(stalled_time, deadline-stalled_time, agent_name)
                for agent_name, stalled_time in stalled.items())

        # attempt to partially execute actions in mid-execution
        mid_executing_actions = self.get_executing_actions(execution_queue.queue, deadline)

        # mid execution action with zero duration is a bug
        assert not any(action for _t, state, action in execution_queue.queue
                if state == ExecutionState.executing and action.duration == 0)

        half_executed_actions = self.execute_partial_actions(mid_executing_actions, model, deadline)

        executed = result1.executed + result2.executed + half_executed_actions + stalled_actions
        observations = result1.observations | result2.observations
        simulation_time = max(a.end_time for a in executed)

        return ExecutionResult(executed, observations, simulation_time)

    def execute_action_queue(self, model, execution_queue, *, break_on_new_knowledge=True, deadline, stalled=None):
        log.debug("execute_action_queue() break={}, deadline={}", break_on_new_knowledge, deadline)
        simulation_time = None
        executed = []
        observations = set()
        if stalled is None:
            stalled = {}

        while not execution_queue.empty():
            action_state = execution_queue.get()
            if action_state.time > deadline:
                execution_queue.put(action_state)
                break
            if self.agents(action_state.action).intersection(stalled):
                continue

            time, state, action = action_state
            simulation_time = time
            if state == ExecutionState.executing:
                action_state.finish()
                new_knowledge = action.apply(model)
                executed.append(action)
                if new_knowledge:
                    observations.add(action.end_time)
                if break_on_new_knowledge and new_knowledge:
                    # allow other concurrently finishing actions to finish rather than immediate break
                    deadline = time
            elif not action.is_applicable(model):
                # order is such that action gets rejected before it starts, but is not rechecked after it has been
                # started
                if break_on_new_knowledge and time < deadline:
                    # if no observations inconsistent with plan then this is an error
                    # however, may be processing actions starting at deadline, so allow these to stall and not
                    # report error
                    #import pdb; pdb.set_trace()
                    raise ExecutionError("action expects model to be in different state -- {}".format(action))
                log.debug("{} has stalled", self.agents(action))
                stalled.update((agent, action.start_time) for agent in self.agents(action))
            elif state == ExecutionState.pre_start:
                action_state.start()
                execution_queue.put(action_state)
            else:
                raise ExecutionError("action in unknown state")

        return ExecutionResult(executed, observations, simulation_time), stalled

    @staticmethod
    def execute_partial_actions(mid_execution_actions, model, deadline):
        log.debug("execute_partial_actions() deadline={}", deadline)
        generator = (action.partially_apply(model, deadline)
            for action in mid_execution_actions)

        result = [a for a in generator if a]
        for a in result:
            log.info("partial: {}", a)
        return result

    def is_goal_in_model(self):
        hard_goals = self.model["goal"]["hard-goals"]
        hard_goals = list(tuple(g) for g in hard_goals)
        goal = hard_goals
        it = ((obj_name, value.get("known", value))
              for obj_name, value in chain(self.model["agents"].items(), self.model["nodes"].items()))
        for obj_name, values in it:
            for pred_name, args in values.items():
                g = self.cons_goal(pred_name, obj_name, args)
                if g in goal:
                    goal.remove(g)

        return not goal

    def cons_goal(self, pred_name, obj_name, args):
        return (pred_name,) + self.substitute_obj_name(obj_name, args)

    @staticmethod
    def substitute_obj_name(obj_name, args):
        if isinstance(args, Iterable):
            return tuple(obj_name if a is True else a for a in args)
        else:
            return obj_name if args is True else args,

    @staticmethod
    def convert_to_hypothesis_model(model):
        assumed_values = model["assumed-values"]
        for node in model["nodes"].values():
            if "known" in node:
                node["known"].update({
                    key: unknown_value_getter(value, key, assumed_values)
                    for key, value in node["unknown"].items()
                })
                node["unknown"].clear()
        return model
Ejemplo n.º 48
0
class MyBot:
    def __init__(self, gamestate):
        # define class level variables, will be remembered between turns
        self.gamestate = gamestate
        self.planner_time = gamestate.turntime / 2
    
    # do_setup is run once at the start of the game
    # after the bot has received the game settings
    def do_setup(self):
        # initialize data structures after learning the game settings
        self.strat_influence = Influence(self.gamestate, STRAT_DECAY)
        self.planner = Planner(self.gamestate, self.strat_influence)
    
    def log_turn(self, turn_no):
        if DETAIL_LOG and os.path.isdir('pickle'):
            # dump gamestate
            pickle_file = open('pickle/turn_' + str(self.gamestate.current_turn) + '.gamestate', 'wb')
            pickle.dump(self.gamestate, pickle_file)
            pickle_file.close()
            
            # dump influence map value
            pickle_file = open('pickle/turn_' + str(self.gamestate.current_turn) + '.influence', 'wb')
            pickle.dump(self.strat_influence, pickle_file)
            pickle_file.close()
    
    # do turn is run once per turn
    def do_turn(self):
        logging.debug('turn ' + str(self.gamestate.current_turn))
        
        # detailed logging
        self.log_turn(self.gamestate.current_turn)
        
        # handle combat
        self.issue_combat_task()
        
        plan_start = self.gamestate.time_remaining()
        # decay strategy influence
        logging.debug('strat_influence.decay().start = %s' % str(self.gamestate.time_remaining())) 
        self.strat_influence.decay()
        logging.debug('strat_influence.decay().finish = %s' % str(self.gamestate.time_remaining())) 
        # use planner to set new influence
        self.planner.do_plan()
        plan_duration = plan_start - self.gamestate.time_remaining()
        self.planner_time = max([plan_duration, self.planner_time])
        
        # diffuse strategy influence
        logging.debug('strat_influence.diffuse().start = %s' % str(self.gamestate.time_remaining())) 
        for i in xrange(3):
            self.strat_influence.diffuse()
            if self.gamestate.time_remaining() < 50:
                logging.debug('stopped diffuse after %d times' % i)
                break
        logging.debug('strat_influence.diffuse().finish = %s' % str(self.gamestate.time_remaining())) 

        # handle explorer
        self.issue_explore_task()
        logging.debug('endturn: ant_count = %d, time_elapsed = %s' % (len(self.gamestate.ant_list), self.gamestate.time_elapsed()))

    def issue_combat_task(self):
        'combat logic'
        logging.debug('issue_combat_task.start = %s' % str(self.gamestate.time_remaining())) 
        zones = battle.get_combat_zones(self.gamestate)
        
        logging.debug('zones = %s' % str(zones))
        for zone in zones:
            logging.debug('group combat loop for = %s' % str(zone))
            if len(zone[0]) > 0:
                battle.do_zone_combat(self.gamestate, zone)
            
            # check if we still have time left to calculate more orders
            if self.gamestate.time_remaining() < self.planner_time + 50:
                break
                
        logging.debug('issue_combat_task.finish = ' + str(self.gamestate.time_remaining())) 
        
    def issue_explore_task(self):
        'explore map'
        logging.debug('issue_explore_task.start = %s' % str(self.gamestate.time_remaining())) 
        # loop through all my un-moved ants and set them to explore
        # the ant_loc is an ant location tuple in (row, col) form
        for cur_loc in self.gamestate.my_unmoved_ants():
            all_locs = [cur_loc] + [self.gamestate.destination(cur_loc, d) 
                                    for d in self.gamestate.passable_directions(cur_loc)]
            loc_influences = [self.strat_influence.map[loc] for loc in all_locs]
            best_directions = self.gamestate.direction(cur_loc, all_locs[loc_influences.index(min(loc_influences))])
            if len(best_directions) > 0:
                self.gamestate.issue_order((cur_loc, choice(best_directions)))
            
            # check if we still have time left to calculate more orders
            if self.gamestate.time_remaining() < 10:
                break
        logging.debug('issue_explore_task.finish = ' + str(self.gamestate.time_remaining())) 


    # static methods are not tied to a class and don't have self passed in
    # this is a python decorator
    @staticmethod
    def run():
        'parse input, update game state and call the bot classes do_turn method'
        gamestate = GameState()
        bot = MyBot(gamestate)
        map_data = ''
        while(True):
            try:
                current_line = sys.stdin.readline().rstrip('\r\n') # string new line char
                if current_line.lower() == 'ready':
                    gamestate.setup(map_data)
                    bot.do_setup()
                    gamestate.finish_turn()
                    map_data = ''
                elif current_line.lower() == 'go':
                    gamestate.update(map_data)
                    # call the do_turn method of the class passed in
                    bot.do_turn()
                    gamestate.finish_turn()
                    map_data = ''
                else:
                    map_data += current_line + '\n'
            except EOFError:
                break
            except KeyboardInterrupt:
                raise
            except:
                # don't raise error or return so that bot attempts to stay alive
                traceback.print_exc(file=sys.stderr)
                sys.stderr.flush()
Ejemplo n.º 49
0
 def do_setup(self):
     # initialize data structures after learning the game settings
     self.strat_influence = Influence(self.gamestate, STRAT_DECAY)
     self.planner = Planner(self.gamestate, self.strat_influence)
Ejemplo n.º 50
0
if __name__== '__main__':
    ############### ROS setup #######################
    node_name = 'mico_planner'
    group_name = 'arm'
    planner_name = 'RRTConnectkConfigDefault'
    ee_link_name = 'mico_link_endeffector'
    roscpp_initialize(sys.argv)
    rospy.init_node(node_name, anonymous=True)

    acHan = ActionHandler(group_name, planner_name, ee_link_name)

    rospy.sleep(1)
    acHan.setWorkspace(-10, -10, 0, 10, 10, 10)
    #################################################
    planner = Planner()
    timing = Timing()
    planner.run()
    # Database setup
    storage = FileStorage.FileStorage("pGraph.fs")
    db = DB(storage)
    conn = db.open()
    pGraphDB = conn.root()

    if not pGraphDB.has_key("graph"):
        pGraphDB["graph"] = PGraph.PGraph()

    # Variables
    pGraph = pGraphDB["graph"]

    # clear persistent program memory
Ejemplo n.º 51
0
class MyBot:
    def __init__(self, gamestate):
        # define class level variables, will be remembered between turns
        self.gamestate = gamestate
        self.diffuse_time = 0
        self.combat_time_history = deque([0, 0, 0, 0, 0])
        self.combat_time = 0
    
    # do_setup is run once at the start of the game
    # after the bot has received the game settings
    def do_setup(self):
        # initialize data structures after learning the game settings
        self.strat_influence = Influence(self.gamestate)
        self.planner = Planner(self.gamestate)
    
    def log_turn(self, turn_no):
        logging.debug('turn ' + str(self.gamestate.current_turn))
        logging.debug('self.diffuse_time = %d' % self.diffuse_time)
        logging.debug('self.combat_time_history = %s' % str(self.combat_time_history))
        logging.debug('self.combat_time = %s' % self.combat_time)
        logging.debug('self.strat_influence.map over 0.01 count: %d' % 
            len([key for key in self.strat_influence.map if math.fabs(self.strat_influence.map[key]) > 0.01]))
            
    def log_detail(self):
        if DETAIL_LOG and os.path.isdir('pickle'):# and int(self.gamestate.current_turn) % 10 == 0:
            # dump gamestate
            #pickle_file = open('pickle/turn_' + str(self.gamestate.current_turn) + '.gamestate', 'wb')
            #pickle.dump(self.gamestate, pickle_file)
            #pickle_file.close()
            
            # dump influence map value
            pickle_file = open('pickle/turn_' + str(self.gamestate.current_turn) + '.influence', 'wb')
            pickle.dump(self.strat_influence, pickle_file)
            pickle_file.close()
    
    # do turn is run once per turn
    def do_turn(self):        
        # detailed logging
        self.log_turn(self.gamestate.current_turn)
        
        # decay strategy influence
        #logging.debug('strat_influence.decay().start = %s' % str(self.gamestate.time_remaining())) 
        self.strat_influence.decay(STRAT_DECAY)
        #logging.debug('strat_influence.decay().finish = %s' % str(self.gamestate.time_remaining())) 
        # use planner to set new influence
        logging.debug('self.planner.do_strategy_plan.start = %s' % str(self.gamestate.time_remaining()))
        self.planner.do_strategy_plan(self.strat_influence)        
        
        # diffuse strategy influence
        logging.debug('strat_influence.diffuse().start = %s' % str(self.gamestate.time_remaining()))
        for i in xrange(3):
            if self.gamestate.time_remaining() <  self.combat_time + 100:
                logging.debug('bailing diffuse after %d times' % (i))
                break
            diffuse_start = self.gamestate.time_remaining()
            self.strat_influence.diffuse(cutoff=0.01)
            diffuse_duration = diffuse_start - self.gamestate.time_remaining()
            self.diffuse_time = max([diffuse_duration, self.diffuse_time])
        logging.debug('strat_influence.diffuse().finish = %s' % str(self.gamestate.time_remaining())) 
                
        # handle combat
        combat_start = self.gamestate.time_remaining()
        self.issue_combat_task()
        self.combat_time_history.append(combat_start - self.gamestate.time_remaining())
        self.combat_time_history.popleft()
        self.combat_time = max(self.combat_time_history)
        
        self.log_detail()
        # do special tasks
        self.planner.do_task(self.strat_influence)
        # handle explorer
        self.issue_explore_task()
        logging.debug('endturn: ant_count = %d, time_elapsed = %s' % (len(self.gamestate.ant_list), self.gamestate.time_elapsed()))

    def issue_combat_task(self):
        'combat logic'
        logging.debug('issue_combat_task.start = %s' % str(self.gamestate.time_remaining())) 
        zones = battle.get_combat_zones(self.gamestate)
        logging.debug('get_combat_zones.finish = %s' % str(self.gamestate.time_remaining())) 
        
        if zones is not None:
            logging.debug('zones.count = %d' % len(zones))
            for zone in zones:
                if len(zone[0]) > 0:
                    #logging.debug('group combat loop for = %s' % str(zone))
                    #logging.debug('do_zone_combat.start = %s' % str(self.gamestate.time_remaining())) 
                    battle.do_zone_combat(self.gamestate, zone)
                    #logging.debug('do_zone_combat.start = %s' % str(self.gamestate.time_remaining())) 
                
                # check if we still have time left to calculate more orders
                if self.gamestate.time_remaining() < 50:
                    break
                
        logging.debug('issue_combat_task.finish = ' + str(self.gamestate.time_remaining())) 

    def issue_explore_task(self):
        'explore map'
        logging.debug('issue_explore_task.start = %s' % str(self.gamestate.time_remaining())) 
        # loop through all my un-moved ants and set them to explore
        # the ant_loc is an ant location tuple in (row, col) form
        for cur_loc in self.gamestate.my_unmoved_ants():
            loc_influences = {}
            for d in self.gamestate.passable_directions(cur_loc):
                # add cur_loc to the mix, to give slight penalty to direction with waters
                # because cur_loc is supposedly have fairly high influence
                direction_row = [cur_loc] + [loc for loc in self.gamestate.direction_row(cur_loc, d, 3) 
                                            if loc not in self.gamestate.water_list]
                direction_inf = sum([self.strat_influence.map[loc] for loc in direction_row])
                # normalize direction influence
                loc_influences[d] = direction_inf / len(direction_row)
                
            #all_locs = [self.gamestate.destination(cur_loc, d) 
            #            for d in self.gamestate.passable_directions(cur_loc)]
            #loc_influences = [self.strat_influence.map[loc] for loc in all_locs]
            #logging.debug('cur_loc = %s, loc_influences = %s' % (str(cur_loc),str(loc_influences)))
            if len(loc_influences) > 0:
                best_directions = min(loc_influences, key=loc_influences.get)
                #logging.debug('best_directions = %s' % str(best_directions))
                self.gamestate.issue_order((cur_loc, (best_directions)))
            
            # check if we still have time left to calculate more orders
            if self.gamestate.time_remaining() < 10:
                break
        logging.debug('issue_explore_task.finish = ' + str(self.gamestate.time_remaining()))

    # static methods are not tied to a class and don't have self passed in
    # this is a python decorator
    @staticmethod
    def run():
        'parse input, update game state and call the bot classes do_turn method'
        gamestate = GameState()
        bot = MyBot(gamestate)
        map_data = ''
        while(True):
            try:
                current_line = sys.stdin.readline().rstrip('\r\n') # string new line char
                if current_line.lower() == 'ready':
                    gamestate.setup(map_data)
                    bot.do_setup()
                    gamestate.finish_turn()
                    map_data = ''
                elif current_line.lower() == 'go':
                    gamestate.update(map_data)
                    # call the do_turn method of the class passed in
                    bot.do_turn()
                    gamestate.finish_turn()
                    map_data = ''
                else:
                    map_data += current_line + '\n'
            except EOFError:
                break
            except KeyboardInterrupt:
                raise
            except:
                # don't raise error or return so that bot attempts to stay alive
                traceback.print_exc(file=sys.stderr)
                sys.stderr.flush()