def main():
    
    mdp = make_grid_world_from_file(args.map)
    planner = Planner(mdp, sample_rate=5, tau=args.tau)
    
    planner.run_vi() 

    mdp.visualize_initial_map()
    action_seq, state_seq, reward_seq = planner.plan(planner.mdp.get_init_state())
    for state, action in zip(state_seq, action_seq):
        print(state, action)
def generate_reward_prior(map_list=[
    "top_right.mp", "top_left.mp", "bottom_right.mp", "bottom_left.mp",
    "center.mp"
],
                          save_prior=False):

    expert_demonstrations = []
    for map_name in map_list:
        mdp = make_grid_world_from_file(map_name)
        mdp.visualize_initial_map()
        planner = Planner(mdp, sample_rate=5)

        expert_demonstrations.extend(
            generate_demonstrations(planner, args.num_demonstrations))

    random.shuffle(expert_demonstrations)

    num_states = mdp.height * mdp.width
    feature_map = np.eye(num_states)
    reward_array = deep_maxent_irl(feature_map, planner, expert_demonstrations,
                                   args.learning_rate, args.num_iterations)

    reward_matrix = np.reshape(reward_array, (mdp.height, mdp.width))

    if save_prior:
        np.save("reward_prior", reward_matrix)

    heatmap_2d(reward_matrix, "Recovered Reward Values")
Example #3
0
 def __init__(self, name, password, world_map: Map = None):
     super().__init__(name, password)
     self.agent_name = name
     self.world_map = world_map
     self.planer = Planner(world_map)
     self.localization = {
         "x": random.randrange(100),
         "y": random.randrange(100)
     }  # lokalizacja {"x": .., "y": ..}
     self.current_path = None  # ścieżka [[x, y, time], ... ]
     self.schedule = {}  # harmonogram {node_id: [start_time, end_time]}
     self.neighbors = {
     }  # sąsiedzi {"name": {"path": .., "time": .., "localization": ..}, ...}
     self.goals = {}  # cele {node_id: needed_time}
     self.clock = 0  # time of day
     print(f"[{self.agent_name}] Agent created (init)")
Example #4
0
 def decide(self):
     actions = self.env.getAllActions()
     u_act = Planner.plan(self.theory, self.state, self.goal, actions)
     #print(u_act)
     if u_act is None:
         u_act = random.choice(actions)
     return u_act
Example #5
0
    def threshold_greedy(self,
                         map_file="L_wall.mp",
                         prior_file="reward_prior.npy"):
        reward_prior = np.load(prior_file)
        reward_location_list = get_potential_rewards(reward_prior)
        print(reward_location_list)

        mdp = make_grid_world_from_file("L_wall.mp")
        mdp.visualize_initial_map()
        planner = Planner(mdp, tau=0.005, sample_rate=5)

        reward_matrix_list = [
            create_reward_matrix_from_location(location, planner)
            for location in reward_location_list
        ]

        num_demonstrations = 1
        expert_demonstration = generate_demonstrations(planner,
                                                       num_demonstrations)[0]
        partial_demonstration = expert_demonstration[:len(expert_demonstration
                                                          ) // 2]

        for (reward_matrix, reward_location) in zip(reward_matrix_list,
                                                    reward_location_list):
            likelihood = get_partial_trajectory_log_likelihood(
                planner, reward_matrix, partial_demonstration)
            print(reward_location, likelihood)
 def __init__(self, maze_dim, dump_mazes=False):
     self.location = Position(0, 0)
     self.heading = Direction(0)
     self.maze_map = MazeMap(maze_dim)
     self.timestep = 0
     self.run_number = 0
     self.found_goal = False
     self.dump_mazes = dump_mazes
     self.planner = Planner(self.maze_map)  # planner needs to be set by sub-class
Example #7
0
    def __init__(self, length, fig, ax):
        self.engine = Actuator()
        self.steering = Actuator()

        self.speedometer = Sensor()
        self.wheelAngle = Sensor()
        self.gps = Sensor()
        self.compass = Sensor()
        self.laneTracker = Sensor()
        self.lidar = Sensor()
        self.lidarArray = []

        self.pilot = Pilot()
        self.planner = Planner()

        self.vizualizer = Vizualizer(length, fig, ax)
        self.length = length

        self.maxLonAcc = 10
        self.particleFilter = None
def main():

    demonstration_list = []
    agents = read_annotations("annotations.txt")
    grid = create_video_grid(VIDEO_WIDTH, VIDEO_HEIGHT, BLOCK_SIZE)
    
    for agent in agents:
        positions = get_agent_positions_in_grid(agent, grid)
        step_list = get_steps_from_position_list(positions)
        demonstration_list.append(step_list)

    grid_width = len(grid[0])
    grid_height = len(grid)
    num_states = grid_width * grid_height
    placeholder_mdp = GridWorldMDP(width = grid_width, height=grid_height, init_loc=(0,0), 
        goal_locs = [], walls = [], name = "placeholder")    
    planner = Planner(placeholder_mdp, sample_rate=5)
    planner._compute_matrix_from_trans_func()

    feature_map = np.eye(num_states)
    reward_array = deep_maxent_irl(
        feature_map, planner, demonstration_list, args.learning_rate, args.num_iterations)  
    reward_matrix = np.reshape(reward_array, (grid_height, grid_width))
    np.save("reward_prior", reward_matrix) 
Example #9
0
def slow_sampler_example(save_prior=False):

    mdp = make_grid_world_from_file(args.map)
    mdp.visualize_initial_map()
    planner = Planner(mdp, tau=args.tau, sample_rate=5)

    demonstrations = generate_demonstrations(planner, args.num_demonstrations)

    sampler = SlowRewardSampler(planner)
    reward_matrix = sampler.sample_reward_functions(demonstrations)
    if save_prior:
        np.save("slow_reward_prior", reward_matrix)

    plt.plot(sampler.likelihood_vals)
    plt.ylabel("Log-likelihood")
    plt.xlabel("Iteration")
    plt.show()

    heatmap_2d(reward_matrix)
def inverse_model_example():
    # Setup MDP, Agents.

    mdp = make_grid_world_from_file(args.map)
    mdp.visualize_initial_map()
    planner = Planner(mdp, sample_rate=5)

    expert_demonstrations = generate_demonstrations(planner,
                                                    args.num_demonstrations)

    num_states = mdp.height * mdp.width
    feature_map = np.eye(num_states)
    reward_array = deep_maxent_irl(feature_map, planner, expert_demonstrations,
                                   args.learning_rate, args.num_iterations)

    reward_matrix = np.reshape(reward_array, (mdp.height, mdp.width))
    reward_likelihood = get_reward_function_log_likelihood(
        planner, reward_matrix, expert_demonstrations)

    heatmap_2d(reward_matrix, "Recovered Reward Values")
Example #11
0
def main():
    print("Sampling reward functions using slow sampler...")
    mdp = make_grid_world_from_file(args.map)
    mdp.visualize_initial_map()
    planner = Planner(mdp, tau=args.tau, sample_rate=5)

    #demonstrations = generate_demonstrations(planner, args.num_demonstrations)
    num_demonstrations = 1
    expert_demonstration = generate_demonstrations(planner, num_demonstrations)[0]
    partial_demonstration = []
    partial_demonstration.append(expert_demonstration[:len(expert_demonstration)//2])
    slow_sampler = SlowRewardSampler(planner)
    slow_reward_matrix = slow_sampler.sample_reward_functions(partial_demonstration)

    prior_sampler = PriorRewardSampler(planner)
    prior_sampler_reward_matrix = prior_sampler.sample_reward_functions_softmax(partial_demonstration)

    plt.plot(slow_sampler.likelihood_vals, label="Naive Sampler")
    plt.plot(prior_sampler.likelihood_vals, label="Prior Sampler")
    plt.legend(loc="best")
    print(slow_sampler.likelihood_vals[-1])
    print(prior_sampler.likelihood_vals[-1])
    plt.ylabel("Log-likelihood")
    plt.xlabel("Iteration")
    plt.show()

    #print("Sampling reward function using deep network...")

    #num_states = mdp.height * mdp.width
    #feature_map = np.eye(num_states)
    #reward_array = deep_maxent_irl(
    #        feature_map, planner, demonstrations, args.learning_rate, args.num_iterations)

    #reward_matrix = np.reshape(reward_array, (mdp.height, mdp.width))
    #reward_likelihood = get_reward_function_log_likelihood(planner,
    #        reward_matrix, demonstrations)
    #plt.plot([0], [reward_likelihood], marker='o')
    #print(reward_likelihood)
    heatmap_2d(slow_reward_matrix)
Example #12
0
from Planner import Planner


if __name__ == "__main__":
    print 'Enter EID:'
    eid = raw_input()
    print 'Enter Password:'******'print':
            p.print_schedule()
Example #13
0
            heuristicName = sys.argv[4].strip()
            print 
        else:
            raise Exception('Invalid Input! Usage: >> python main.py <domainfile> <problemfile> -h <heuristic_name>')
    except:
        heuristicName = 'equality'
        print bcolors.OKGREEN + "--> Default heuristic 'equality'" + bcolors.ENDC

    # parse SAS/PDDL data #
    listOfPredicates, initialState, goalState, listOfActions, compliantConditions, goalCompliantConditions = grounded_data.returnParsedData()

    # generate transformation #
    Mrref, M = compute_transform(listOfPredicates, listOfActions, goalCompliantConditions, debug_flag)

    # evaluate #
    evaluation_object = Evaluator(listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
    print bcolors.HEADER + "\n>> Initial state evaluation = " + bcolors.OKBLUE + str(float(evaluation_object.evaluate(initialState, heuristicName))) + bcolors.ENDC    
    sys.exit(0)
    
    # solve #
    plan_object = Planner(listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
    plan, cost  = plan_object.aStarSearch(heuristicName)

    if plan:    
        print bcolors.HEADER + "\n>> FINAL PLAN\n--> " + bcolors.OKBLUE + '\n--> '.join(plan) + "\n" + bcolors.OKGREEN + "\nCost of Plan: " + str(cost) + '\n' + bcolors.ENDC
    else:
        if cost == 0.0:
            print bcolors.HEADER + "*** NO PLAN REQUIRED ***" + bcolors.ENDC
        else:
            print bcolors.HEADER + "*** NO PLAN FOUND ***" + bcolors.ENDC    
Example #14
0
class Vehicle(Body):
    '''
    The overal Vehicle class pull together all other components
    '''
    def __init__(self, length, fig, ax):
        self.engine = Actuator()
        self.steering = Actuator()

        self.speedometer = Sensor()
        self.wheelAngle = Sensor()
        self.gps = Sensor()
        self.compass = Sensor()
        self.laneTracker = Sensor()
        self.lidar = Sensor()
        self.lidarArray = []

        self.pilot = Pilot()
        self.planner = Planner()

        self.vizualizer = Vizualizer(length, fig, ax)
        self.length = length

        self.maxLonAcc = 10
        self.particleFilter = None


    def getPos(self):
#        return self.particleFilter.getPosition()
        x, y = self.gps.read()
        return x[0], y[0]


    def getOrientation(self):
        return self.compass.read()

    def getVelocity(self):
        return self.speedometer.read()

    def getAcc(self):
        return self.engine.getValue()

    def getOmega(self):
        return self.steering.getValue()

    def headingTracker(self, dt):
        '''
        Wrapper for the heading controller
        '''
        x, y        = self.gps.read()
#        x, y        = self.particleFilter.getPosition()
        orientation = self.compass.read()
        phi         = self.wheelAngle.read()
        v           = self.speedometer.read()
#        x_road, y_road, angle_road, v_d = self.planner.getGoal(x, y, v, dt)
        x_road, y_road, angle_road, v_d = self.planner.getPath(x, y)
        x_road, y_road, angle_road, v_d = self.planner.getPath(x[0], y[0])

        x_road += cos(angle_road)*v_d*dt*10
        y_road += sin(angle_road)*v_d*dt*10
        return self.pilot.headingTracker(x, y, orientation,
                                           phi, v, self.length,
                                           x_road, y_road, angle_road, v_d,
                                           dt)

    def lqrTracker(self, dt):
        '''
        Wrapper for the heading controller
        '''
        x, y        = self.gps.read()
#
#        x, y        = self.particleFilter.getPosition()
        orientation = self.compass.read()
        phi         = self.wheelAngle.read()
        v           = self.speedometer.read()
        x_road, y_road, angle_road, v_d = self.planner.getPath(x[0], y[0])
#        x_road, y_road, angle_road, v_d = self.planner.getGoal(x, y, v, dt)

        return self.pilot.lqrTracker(x[0], y[0], orientation,
                                           phi, v, self.length, self.planner,
                                           x_road, y_road, angle_road, v_d,
                                           dt)

    def phiController(self, dt):
        '''
        Wrapper for the phi controller
        '''
        x, y        = self.gps.read()
#        x, y        = self.particleFilter.getPosition()
        orientation = self.compass.read()
        phi         = self.wheelAngle.read()
        v           = self.speedometer.read()
        x_road, y_road, angle_road, v_d = self.planner.getGoal(x, y, v, dt)

        return self.pilot.phiController(x, y, orientation, phi,
                                        x_road, y_road, angle_road,
                                       dt)

    def cteController(self, dt):
        '''
        Wrapper for the Cross Track Error controller
        '''
        x, y        = self.gps.read()
#        x, y        = self.particleFilter.getPosition()
        return self.pilot.crossTrackErrorController(x, y, self.planner, dt)

    def velocityController(self, dt):
        '''
        Wrapper for the velocity controller
        '''
        x, y        = self.gps.read()
#        x, y        = self.particleFilter.getPosition()
        v               = self.speedometer.read()
#        _, _, _, v_ref  = self.planner.getGoal(x, y, v, dt)
        x_road, y_road, angle_road, v_d = self.planner.getPath(x[0], y[0])
        return self.pilot.velocityController(v, v_d, self.maxLonAcc, histeresis = 0.1)

#    def addLIDAR(self, lidar):
#        self.lidarArray.append(lidar)

    def connectToEngine(self, function):
        '''
        Adds a connection to the engine
        '''
        self.engine.connect(function)

    def connectToSteering(self, function):
        '''
        Adds a connection to the steering wheel
        '''
        self.steering.connect(function)

    def connectToSpeedometer(self, function):
        '''
        Adds a connection to the velocimeter
        '''
        self.speedometer.connect(function)

    def connectToWheelAngle(self, function):
        '''
        Adds a connection to system to measure the steering wheel angle
        '''
        self.wheelAngle.connect(function)

    def connectToGPS(self, function):
        '''
        Adds a connection to system that captures the vehicle position
        '''
        self.gps.connect(function)

    def connectToCompass(self, function):
        '''
        Adds a connection to system that captures the vehicle orientation
        '''
        self.compass.connect(function)

    def connectToLidar(self, function):
        '''
        Adds a connection to system that captures the enviroment
        '''
        self.lidar.connect(function)

    def connectToLaneTracker(self, function):
        '''
        Adds a connection to system that captures the vehicle orientation
        '''
        self.laneTracker.connect(function)

    def connectToSimulation(self, simulationVehicle, laneTracker, simulationEnviroment = None):
        '''
        Wrapper to connect all system to simulation model
        '''
        self.connectToEngine(simulationVehicle.setAcceleration)
        self.connectToSteering(simulationVehicle.setOmega)

        self.connectToCompass(simulationVehicle.getOrientation)
        self.connectToSpeedometer(simulationVehicle.getVelocity)
        self.connectToGPS(simulationVehicle.getPosition)
        self.connectToWheelAngle(simulationVehicle.getSteering)
        if(simulationEnviroment is not None):
            self.connectToLidar(simulationEnviroment.getPointMap)
        else:
            self.connectToLidar(lambda : 0)
        self.connectToLaneTracker(laneTracker)

    def scan(self):
        '''
        Do a scan round through all of the sensors
        '''
        self.speedometer.scan()
        self.compass.scan()
        self.gps.scan()
        self.wheelAngle.scan()
        self.laneTracker.scan()
        self.lidar.scan()

    def createFilter(self):
        xHat, yHat      = self.gps.read()
        orientationHat  = self.compass.read()
        phiHat          = self.wheelAngle.read()
        vHat            = self.speedometer.read()
        self.particleFilter = ParticleFilter(Bike, xHat, yHat, orientationHat, phiHat, vHat, self.length,  500)

    def updateFilter(self, dt):
        xHat, yHat      = self.gps.read()
        xLane, yLane    = self.laneTracker.read()

        orientationHat  = self.compass.read()
        phiHat          = self.wheelAngle.read()
        vHat            = self.speedometer.read()
        acc             = self.engine.getValue()
        omega           = self.steering.getValue()

        measurements = [ xHat, yHat, orientationHat, phiHat, vHat]
        V = [self.gps.getUncertanty(),
             self.gps.getUncertanty(),
             self.compass.getUncertanty(),
             self.wheelAngle.getUncertanty(),
             self.speedometer.getUncertanty()]
#        measurements = [ xHat, yHat]
#        V = [self.gps.getUncertanty(), self.gps.getUncertanty()]

        self.particleFilter.updateParticles(acc, omega, dt)
        self.particleFilter.weightParticles(measurements, V)
        self.particleFilter.resampleParticles()

#        measurements = [xLane, yLane]
#        V = [self.laneTracker.getUncertanty(), self.laneTracker.getUncertanty()]
#        self.particleFilter.weightParticles(measurements, V)
#        self.particleFilter.resampleParticles()

    def plot(self, draw = True):
        '''
        Wrapper for the vizualizer to update the plot
        '''
        x, y        = self.gps.read()
        orientation = self.compass.read()
#        x_goal, y_goal = self.planner.nextX, self.planner.nextY
        self.vizualizer.plot(x, y, orientation, self.length, draw)


    def plot3d(self):
        '''
        Wrapper for the 3d plotting of the vizualizer. Not yet implement
        '''
        x, y        = self.gps.read()
        orientation = self.compass.read()
        self.vizualizer.plot3d(x, y, orientation)
class RobotController(object):
    """
    This is the base class for all robot controllers. You must override next_move and initialize self.planner in
    a sub-class.
    """
    def __init__(self, maze_dim, dump_mazes=False):
        self.location = Position(0, 0)
        self.heading = Direction(0)
        self.maze_map = MazeMap(maze_dim)
        self.timestep = 0
        self.run_number = 0
        self.found_goal = False
        self.dump_mazes = dump_mazes
        self.planner = Planner(self.maze_map)  # planner needs to be set by sub-class

    def next_move(self, sensors):
        """
        This is the main method to implement for a controller. Make sure that the move the controller returns is a valid
        move (it doesn't try to go through walls). If it does, the location and heading class members will be incorrect.
        :param sensors: The distance in squares to the next wall for the left, middle and right sensors.
        :return: The sub-class should return the next move as a (int, int). The first is the rotation
        (could be -90, 0 or 90), the second is the number of squares to move (from -3 to 3).
        """
        raise NotImplemented("Implement by overriding in sub-class.")

    def update(self, sensors):
        """
        Convenience function for updating the maze, re-planning (if the maze was updated), and dumping the maze for
        debugging.
        :param sensors:
        :return: True if maze was updated
        """
        self.timestep += 1
        maze_updated = self.maze_map.update(self.location, self.heading, sensors)
        if maze_updated:
            self.planner.replan(self.location, self.heading)
        if self.dump_mazes:
            self.maze_map.dump_to_file(os.path.join(os.curdir, "known_maze.txt"))
        return maze_updated

    def move(self, rotation, movement):
        """
        Update the current location and heading for the class. This should be called in the sub-class after choosing
        a move.
        :type rotation: int
        :type movement: int
        :param rotation: The rotation to perform
        :param movement: The number of squares to move
        :return: None
        """
        self.heading = self.heading.rotate(rotation)
        direction_of_movement = self.heading if movement > 0 else self.heading.flip()
        for i in range(abs(movement)):
            self.location = self.location.add(direction_of_movement)
        if self.maze_map.at_goal(self.location):
            self.found_goal = True

    def reset(self):
        """
        Move the robot back to 0,0 and facing north. Also changes the run_number to 1. Call this when you are ready to
        start the second run.
        :return: Returns the reset string for convenience.
        """
        self.location = Position(0, 0)
        self.heading = Direction(0)
        self.run_number = 1
        print "Number of moves in first run: {}".format(self.timestep)
        print "Exploration efficiency: {}".format(self.get_exploration_efficiency())
        self.planner.replan(self.location, self.heading)
        return "Reset", "Reset"

    def show_current_policy_and_map(self):
        """
        For visualization of the maze and policy when using the A* planner.
        :return: None
        """
        vis = Visualizer(self.maze_map.dim)
        vis.draw_maze(Maze('known_maze.txt'))
        vis.draw_policy(reversed(self.planner.policy), self.location, self.heading)
        vis.show_window()

    def get_exploration_efficiency(self):
        return self.maze_map.get_num_known_walls() / float(self.timestep)
Example #16
0
class MobileAgent(agent.Agent):
    agent_name: str

    # Informacja o położeniu do EnvironmentManager'a
    class ActualizationInfoInEnvironment(PeriodicBehaviour):
        async def run(self):
            msg = Message(to="environmentmanager@localhost",
                          metadata=dict(performative="environment_actualize"),
                          body=dumps({
                              "source_name": self.agent.agent_name,
                              "localization": self.agent.localization
                          }))
            # print(f"[{self.agent.agent_name}] Send informations actualization to Environment manager")
            await self.send(msg)

    # Informacja Broadcast jest wysyłana do EnvironmentManager'a, który wyśle ją do wszystkich urządzeń w pobliżu
    class BroadcastSend(PeriodicBehaviour):
        async def run(self):
            msg = Message(to="environmentmanager@localhost",
                          metadata=dict(performative="broadcast"),
                          body=dumps({
                              "source_name": self.agent.agent_name,
                              "localization": self.agent.localization,
                              "schedule": self.agent.schedule
                          }))
            # print(f"[{self.agent.agent_name}] Send Broadcast")
            await self.send(msg)

    # Odebranie informacji o Broadcast od urządzenia w pobliżu za pośrednictwem EnvironmentManager'a
    class BroadcastReceive(CyclicBehaviour):
        async def run(self):
            msg = await self.receive(timeout=5)
            if msg:
                info = loads(msg.body)
                # zaktualizowanie czasu od ostatniego komunikatu od danego sąsiada i aktualizacja informacji o jego ścieżce
                other_name = info["source_name"]
                other_localization = info["localization"]
                other_schedule = info["schedule"]

                print(
                    f"[{self.agent.agent_name}] Broadcast receive from: {other_name}"
                )

                self.agent.neighbors[other_name] = {}
                self.agent.neighbors[other_name]["time"] = 10
                self.agent.neighbors[other_name][
                    "localization"] = other_localization
                self.agent.neighbors[other_name]["schedule"] = other_schedule

    class ExecuteSchedule(PeriodicBehaviour):
        async def run(self):
            if self.agent.current_path is not None:
                path = self.agent.current_path
                clock = self.agent.clock
                index = np.argmax(path[:, -1] > clock) - 1
                point = path[index:(index + 2), :]
                if point.shape[0] == 0:
                    return
                stamp = point[:, -1]
                point = point[:, :-1]
                time_dist = (stamp[-1] - stamp[0])
                if time_dist <= 0.0:
                    return
                alpha = (clock - stamp[0]) / time_dist
                location = point[0] * (1 - alpha) + point[-1] * alpha
                await self.agent.move_to(location[0], location[1])

    # wywalić po ogarnięciu globalnego zegara
    class ClockUpdate(PeriodicBehaviour):
        async def run(self):
            self.agent.clock += (self.period.microseconds * 1e-6)

    class PlanPathBehaviour(OneShotBehaviour):
        permutation = []

        async def run(self):
            self.permutation, distance = self.agent.planer.optimal_permutation(
            )
            print(f'Planning started... {self.permutation}')

            schedule = self.agent.planer.schedule(self.permutation,
                                                  AGENT_SPEED)
            self.agent.schedule = {
                str(appointment[0]):
                (float(appointment[1]), float(appointment[2]))
                for appointment in schedule
            }
            path = self.agent.planer.generate_full_path(
                self.agent.localization, schedule, AGENT_SPEED)
            # path = self.agent.planer.get_path(self.agent.localization, self.permutation[0], AGENT_SPEED)
            print('Path created!')
            await self.agent.set_path(path)

    async def move_to(self, x, y):
        self.localization['x'] = x
        self.localization['y'] = y

    async def move(self, x, y):
        self.localization['x'] += x
        self.localization['y'] += y

    async def set_path(self, path):
        self.current_path = path

    async def numpy_localization(self):
        return np.array((self.localization['x'], self.localization['y']))

    async def setup(self):
        print(f"[{self.agent_name}] Agent created (setup)")
        start_at: datetime = datetime.now()

        self.add_behaviour(self.ActualizationInfoInEnvironment(period=1))
        self.add_behaviour(
            self.BroadcastSend(period=10,
                               start_at=start_at + timedelta(seconds=5)))
        self.add_behaviour(self.BroadcastReceive(), template=BROADCAST)
        self.add_behaviour(self.ExecuteSchedule(period=0.25))
        self.add_behaviour(self.ClockUpdate(period=0.05))

        self.add_behaviour(self.PlanPathBehaviour())

    def __init__(self, name, password, world_map: Map = None):
        super().__init__(name, password)
        self.agent_name = name
        self.world_map = world_map
        self.planer = Planner(world_map)
        self.localization = {
            "x": random.randrange(100),
            "y": random.randrange(100)
        }  # lokalizacja {"x": .., "y": ..}
        self.current_path = None  # ścieżka [[x, y, time], ... ]
        self.schedule = {}  # harmonogram {node_id: [start_time, end_time]}
        self.neighbors = {
        }  # sąsiedzi {"name": {"path": .., "time": .., "localization": ..}, ...}
        self.goals = {}  # cele {node_id: needed_time}
        self.clock = 0  # time of day
        print(f"[{self.agent_name}] Agent created (init)")

    def set_starting_localization(self, localization):
        self.localization = localization

    def set_goals(self, goals):
        current_node = self.world_map.locate_nearset_node(self.localization)
        new_goals = {current_node: 0, **goals}
        self.goals = new_goals
        self.planer.set_goals(self.goals)
Example #17
0
from Story import Story
from Planner import Planner
import subprocess
import Printer as styler

'''Main function for the application'''
if __name__ == "__main__":
	storyTitle = "saintDevilGame"	#inits story with saint devil game
	p = Planner(storyTitle)
	storyText = "\t"
	inp = -1
	while(inp != 0):
		# clear screen on each iteration of story.. for readability
		# UNIX based systems
		try:
			subprocess.call(['clear'])
		except OSError:
			pass
		# Windows systems
		try:
			subprocess.call(['cls'])
		except OSError:
			pass

		# print the story so far
		styler.printHeader("Story so far")
		print storyText

		# print current set of valid conditions in the world
		p.printWorldState()
Example #18
0
 def do(self, state, action):
     return Planner.predict(self.model, state, action)