def test_trivial():
    backboard = Wall((0.05, 0.12), (0.3, 0.12))
    goal = np.array((0.1, 0.12))
    start = np.array((0.1, 0.1))
    b0 = Belief(mu=start, cov=0.001, walls=[backboard])
    bg = Belief(mu=goal, cov=0.001, walls=[backboard])
    bg.connected = True  #because it's the goal
    policy = concerrt(b0, bg, gamma=0.02, p_bg=0.98)
    agent = Agent()
    ne = NavEnv(start=start, goal=goal)
    agent.follow_policy(policy, ne, bg)
def reaches_goal(start, goal, ours=False):
    obstacles = line_world_obstacles(goal)
    obst = obstacles[0]
    backboard = Wall(obst.origin, obst.origin + np.array(obst.x))
    #make the backboard just the longest part of it, best is to make the one in the world real skinny
    ne = NavEnv(start=start, goal=goal, obstacles=obstacles)
    b0 = Belief(mu=start, cov=0.0001, walls=[backboard])
    bg = Belief(mu=goal, cov=0.01, walls=[backboard])
    bg.connected = True  #because it's the goal
    policy = concerrt(b0, bg, gamma=0.5, p_bg=0.98, delta=0.1)
    agent = Agent(ours=ours)
    agent.follow_policy(policy, ne, bg)
    return ne.goal_distance()
def test_execute_guarded():
    backboard = Wall((0.1, 0.12), (0.3, 0.12))
    goal = np.array((0.1, 0.12))
    start = np.array((0.05, 0.05))
    b0 = Belief(mu=start, cov=0.001, walls=[backboard])
    bg = Belief(mu=goal, cov=0.001, walls=[backboard])
    bg.connected = True  #because it's the goal
    action = Guarded(q_rand=goal, b_near=b0, b_old=b0, delta=0.02)
    agent = Agent()
    ne = NavEnv(start=start, goal=goal)
    res = agent.execute_action(ne, action)
    assert (res)
    assert (ne.goal_condition_met())
    print("Test passed")
Пример #4
0
    def __init__(self, agentId):
        self.connectedAgents = []
        self.beliefs = []
        self.agentId = agentId
        self.score = 0

        for num in range(0, Setting.GYOMU_NUM):
            self.beliefs.append(Belief())
Пример #5
0
 def __init__(self, show_training=False, ours=False):
     self.history = History()
     self.belief = Belief(mu=[0, 0], cov=1)  #we know nothing
     self.autoencoder = None
     self.show_training = show_training
     self.goal_threshold = 0.02
     self.max_xdot = 2
     self.vae_fn = "models/vae.h5y"
     self.guapo_eps = 0.9
     self.rmp = None
     self.ours = ours
     self.cluster_planning_history = self.dmp_cluster_planning_history
     self.pd_errors = []
 def __init__(self, params):
     self.params = params
     self.belief = Belief(self.params.p0, self.params.vr0, self.params.psi0,
                          self.params.vb0, self.params.P0)
     self.baseStates = BaseStates(self.params.p0, self.params.euler0,
                                  self.params.vb0)
     self.wLpf = np.zeros((3, 1))
     self.refLlaSet = False
     self.latRef = 0.0
     self.lonRef = 0.0
     self.altRef = 0.0
     self.refEcef = np.zeros((3, 1))
     self.imuPrevTime = 0.0
     self.firstImu = True
Пример #7
0
    def __init__(self, action_provider, init_pose, init_obs, obj_fun_type, exploration_param, behavior_alg,
                 behavior_args):
        """
        Initialize the agent.
        :param action_provider: ActionProvider initialized with the right type of action.
        :param init_pose: Initial pose.
        :param init_obs: Initial observation.
        :param obj_fun_type: Objective function type. Can be 'static' or 'dynamic'
        :param exploration_param: UCB Exploration parameter defining a balance between exploration and exploitation.
        :param behavior_alg: Name of the behavior algorithm to use. Can be 'MCTS_cont', 'MCTS_disc', 'FTS', or 'random'.
        :param behavior_args: Arguments for the behaviour algorithm.
        """
        self.action_provider = action_provider

        # Specify a reward function when simulating actions
        simulator_reward_fun = putils.UCB(exploration_param)
        print self.action_provider.nparams
        # Create the agent's belief
        if obj_fun_type == 'static':
            def restrictions(m):
                m['rbf.variance'].constrain_bounded(0.01, 10.0, warning=False)
                m['rbf.lengthscale'].constrain_bounded(0.1, 10.0, warning=False)

            self.belief = Belief(None, GPy.kern.RBF(self.action_provider.nparams), restrict_hyper_parameters=restrictions)
        elif obj_fun_type == 'dynamic':
            # Space-Time kernel
            ker_space = GPy.kern.RBF(2, lengthscale=0.920497128746, variance=0.00133408521113, active_dims=[0, 1])
            ker_time = GPy.kern.PeriodicExponential(lengthscale=25,
                                                    active_dims=[2]) + GPy.kern.Matern52(1, active_dims=[2])

            # Restrictions on hyperparameters when running optimize
            def restrictions(m):
                m['.*periodic_exponential.variance'].constrain_bounded(0.1, 10.0, warning=False)
                m['.*periodic_exponential.period'].constrain_fixed(world.dynamic_function_period, warning=False)
                m['.*periodic_exponential.lengthscale'].constrain_bounded(0.0, 2.0, warning=False)
                m['.*rbf.variance'].constrain_bounded(0.1, 10.0, warning=False)
                m['.*rbf.lengthscale'].constrain_bounded(0.5, 1.0, warning=False)
                m['.*Mat52.variance'].constrain_bounded(0.1, 10.0, warning=False)
                m['.*Gaussian_noise.variance'].constrain_bounded(0.0, 0.2, warning=False)  # .0004

            self.belief = Belief(None, ker_space * ker_time, restrict_hyper_parameters=restrictions)
        else:
            raise Exception('Objective function type', obj_fun_type, 'is not valid.')

        # Initialize the belief
        self.belief.update(init_pose, init_obs)

        # Create the agent's behavior
        if behavior_alg == 'MCTS_cont':
            self.behavior = cont_MCTS.ContMCTS(self.action_provider, simulator_reward_fun, behavior_args[0],
                                               behavior_args[1], behavior_args[2], act_sel_k, epsilon_act_diff)
        elif behavior_alg == 'MCTS_disc':
            self.behavior = disc_MCTS.DiscMCTS(self.action_provider, simulator_reward_fun, behavior_args[0],
                                               behavior_args[1],
                                               behavior_args[2])
        elif behavior_alg == 'FTS':
            self.behavior = FTS(self.action_provider, simulator_reward_fun, behavior_args[0])
        elif behavior_alg == 'MKCF_FTS_cont' or behavior_alg == 'MKCF_FTS_cont*':
            self.behavior = ContKmcfFts(self.action_provider, simulator_reward_fun, behavior_args[0], act_sel_k,
                                        behavior_args[1])
        elif behavior_alg == 'random':
            self.behavior = RandomBehavior()
        else:
            raise Exception('Behavior algorithm', behavior_alg, 'is not valid.')
Пример #8
0
 def get_curr_belief(self, ne):
     return Belief(mu=ne.get_pos(), cov=0.001)