Exemplo n.º 1
0
def distance_from_goal(node):
    return 0


def goal_test(node):
    goal_region_radius = .01
    n = 4
    return np.sum(np.abs(node['state'][0:n] -
                         goal[0:n])) < goal_region_radius  #disregards time
    return distance(
        node, goal
    ) < goal_region_radius  #need to think more carefully about this one


start = np.array([0, 0, 0, 0, 0])
lqr_rrt = LQR_RRT(A, B, Q, R, max_time_horizon)
rrt = RRT(state_ndim=5, control_ndim=2)

lqr_rrt.action_state_valid = action_state_valid

lqr_rrt.max_nodes_per_extension = 5

rrt.sample_goal = lambda: goal

rrt.set_distance(lqr_rrt.distance_cache)
rrt.set_same_state(lqr_rrt.same_state)
rrt.set_cost(lqr_rrt.cost)
rrt.set_steer(lqr_rrt.steer_cache)

rrt.set_goal_test(goal_test)
rrt.set_sample(sample)
Exemplo n.º 2
0
def goal_test(node):
    goal_region_radius = .01
    n = 2
    return np.sum(np.abs(node['state'][0:n] -
                         goal[0:n])) < goal_region_radius  #disregards time
    return distance(
        node, goal
    ) < goal_region_radius  #need to think more carefully about this one


def distance_from_goal(node):
    return 0


lqr_rrt = LQR_RRT(A, B, Q, R, max_time_horizon)
lqr_rrt.action_state_valid = action_state_valid


def sample():
    if np.random.rand() < .9:
        statespace = np.random.rand(2) * np.array([.4, 2]) - np.array([.2, 1])
        time = np.random.randint(0, max_time_horizon, size=1) + 1
        #time = np.array(min(np.random.geometric(.06,size=1),max_time_horizon))
        time = np.reshape(time, newshape=(1, ))
        return np.concatenate((statespace, time))
    else:  #goal bias
        statespace = goal[0:2]
        time = np.random.randint(0, max_time_horizon, size=1) + 1
        #time = np.array(min(np.random.geometric(.06,size=1),max_time_horizon))
        time = np.reshape(time, newshape=(1, ))
Exemplo n.º 3
0
        #time = np.array(min(np.random.geometric(.06,size=1),max_time_horizon))
        time = np.reshape(time,newshape=(1,))
        return np.concatenate((statespace,time))
        

def distance_from_goal(node):
    return 0

def goal_test(node):
    goal_region_radius = .01
    n = 4
    return np.sum(np.abs(node['state'][0:n]-goal[0:n])) < goal_region_radius #disregards time
    return distance(node,goal) < goal_region_radius                     #need to think more carefully about this one

start = np.array([0,0,0,0,0])
lqr_rrt = LQR_RRT(A,B,Q,R,max_time_horizon)
rrt = RRT(state_ndim=5,control_ndim=2)

lqr_rrt.action_state_valid = action_state_valid

lqr_rrt.max_nodes_per_extension = 5

rrt.sample_goal = lambda : goal

rrt.set_distance(lqr_rrt.distance_cache)
rrt.set_same_state(lqr_rrt.same_state)
rrt.set_cost(lqr_rrt.cost)
rrt.set_steer(lqr_rrt.steer_cache)

rrt.set_goal_test(goal_test)
rrt.set_sample(sample)
Exemplo n.º 4
0
    return r

def action_state_valid(x,u):
    return isStateValid(x) and isActionValid(u)

def goal_test(node):
    goal_region_radius = .01
    n = 2
    return np.sum(np.abs(node['state'][0:n]-goal[0:n])) < goal_region_radius #disregards time
    return distance(node,goal) < goal_region_radius                     #need to think more carefully about this one
    
def distance_from_goal(node):
    return 0


lqr_rrt = LQR_RRT(A,B,Q,R,max_time_horizon)
lqr_rrt.action_state_valid = action_state_valid

def sample():
    if np.random.rand()<.9:
        statespace = np.random.rand(2)*np.array([.4,2])-np.array([.2,1])
        time = np.random.randint(0,max_time_horizon,size=1) + 1
        #time = np.array(min(np.random.geometric(.06,size=1),max_time_horizon))
        time = np.reshape(time,newshape=(1,))
        return np.concatenate((statespace,time))
    else: #goal bias
        statespace = goal[0:2]
        time = np.random.randint(0,max_time_horizon,size=1) + 1
        #time = np.array(min(np.random.geometric(.06,size=1),max_time_horizon))
        time = np.reshape(time,newshape=(1,))
        return np.concatenate((statespace,time))