예제 #1
0
def world1(flag=False):
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    world.cars.append(
        car.UserControlledCar(dyn, [-0.13, 0., math.pi / 2., 0.3],
                              color='red'))
    world.cars.append(
        car.NestedOptimizerCar(dyn, [0.0, 0.5, math.pi / 2., 0.3],
                               color='yellow'))
    world.cars[1].human = world.cars[0]
    if flag:
        world.cars[0].follow = world.cars[1].traj_h
    r_h = world.simple_reward(
        [world.cars[1].traj],
        speed_import=.2 if flag else 1.,
        speed=0.8
        if flag else 1.) + 100. * feature.bounded_control(world.cars[0].bounds)

    @feature.feature
    def human_speed(t, x, u):
        return -world.cars[1].traj_h.x[t][3]**2

    r_r = 300. * human_speed + world.simple_reward(world.cars[1], speed=0.5)
    if flag:
        world.cars[0].follow = world.cars[1].traj_h
    world.cars[1].rewards = (r_h, r_r)
    #world.objects.append(Object('cone', [0., 1.8]))
    return world
예제 #2
0
def world6(know_model=True):
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2), clane.shifted(2.5), clane.shifted(-2.5)]
    world.cars.append(car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi/2., 0.5], color='red'))
    if know_model:
        world.cars.append(car.NestedOptimizerCar(dyn, [0., 0.05, math.pi/2., 0.5], color='yellow'))
    else:
        world.cars.append(car.SimpleOptimizerCar(dyn, [0., 0.05, math.pi/2., 0.5], color='yellow'))
    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.6)
    world.cars[0].default_u = np.asarray([0., 1.])
    @feature.feature
    def goal(t, x, u):
        return -(10.*(x[0]+0.13)**2+0.5*(x[1]-2.)**2)
    if know_model:
        world.cars[1].human = world.cars[0]
        r_h = world.simple_reward([world.cars[1].traj], speed=0.6)+100.*feature.bounded_control(world.cars[0].bounds)
        r_r = 10*goal+world.simple_reward([world.cars[1].traj_h], speed=0.5)
        world.cars[1].rewards = (r_h, r_r)
    else:
        r = 10*goal+world.simple_reward([world.cars[0].linear], speed=0.5)
        world.cars[1].reward = r
    return world
예제 #3
0
def world8(flag=False):
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    vlane = lane.StraightLane([0., -1.], [0., 1.], 0.15)
    hlane = lane.StraightLane([-1., 0.], [1., 0.], 0.15)

    world.lanes += [vlane.shifted(0.5), vlane.shifted(-0.5), hlane.shifted(0.5), hlane.shifted(-0.5)]

    world.fences += [hlane.shifted(-0.5), hlane.shifted(0.5)]


    world.cars.append(car.UserControlledCar(dyn, [0., -.3, math.pi/2., 0.0], color='red'))
    world.cars.append(car.NestedOptimizerCar(dyn, [-0.3, 0., 0., 0.], color='blue'))
    world.cars[1].human = world.cars[0]
    world.cars[0].bounds = [(-3., 3.), (-2., 2.)]
    if flag:
        world.cars[0].follow = world.cars[1].traj_h
    world.cars[1].bounds = [(-3., 3.), (-2., 2.)]
    @feature.feature
    def horizontal(t, x, u):
        return -x[2]**2
    r_h = world.simple_reward([world.cars[1].traj], lanes=[vlane], fences=[vlane.shifted(-1), vlane.shifted(1)]*2)+100.*feature.bounded_control(world.cars[0].bounds)
    @feature.feature
    def human(t, x, u):
        return -tt.exp(-10*(world.cars[1].traj_h.x[t][1]-0.13)/0.1)
    r_r = human*10.+horizontal*30.+world.simple_reward(world.cars[1], lanes=[hlane]*3, fences=[hlane.shifted(-1), hlane.shifted(1)]*3+[hlane.shifted(-1.5), hlane.shifted(1.5)]*2, speed=0.9)
    world.cars[1].rewards = (r_h, r_r)
    return world
예제 #4
0
def world3(flag=False):
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-2),
        clane.shifted(2.5),
        clane.shifted(-2.5)
    ]
    world.cars.append(
        car.UserControlledCar(dyn, [0., 0., math.pi / 2., 0.3], color='red'))
    world.cars.append(
        car.NestedOptimizerCar(dyn, [0., 0.3, math.pi / 2., 0.3],
                               color='yellow'))
    world.cars[1].human = world.cars[0]
    world.cars[0].bounds = [(-3., 3.), (-1., 1.)]
    if flag:
        world.cars[0].follow = world.cars[1].traj_h
    r_h = world.simple_reward([
        world.cars[1].traj
    ]) + 100. * feature.bounded_control(world.cars[0].bounds)

    @feature.feature
    def human(t, x, u):
        return (world.cars[1].traj_h.x[t][0]) * 10

    r_r = 300. * human + world.simple_reward(world.cars[1], speed=0.5)
    world.cars[1].rewards = (r_h, r_r)
    #world.objects.append(Object('firetruck', [0., 0.7]))
    return world
예제 #5
0
def world0():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    world.cars.append(
        car.UserControlledCar(dyn, [-0.13, 0., math.pi / 2., 0.3],
                              color='red'))
    world.cars.append(
        car.NestedOptimizerCar(dyn, [0.0, 0.5, math.pi / 2., 0.3],
                               color='yellow'))
    world.cars[1].human = world.cars[0]
    r_h = world.simple_reward([
        world.cars[1].traj
    ]) + 100. * feature.bounded_control(world.cars[0].bounds)

    @feature.feature
    def human_speed(t, x, u):
        return -world.cars[1].traj_h.x[t][3]**2

    r_r = world.simple_reward(world.cars[1], speed=0.5)
    world.cars[1].rewards = (r_h, r_r)
    return world
예제 #6
0
def lane_cut():
    dyn = dynamics.CarDynamics(0.1)
    world = highway()
    world.cars.append(
        car.UserControlledCar(dyn, [-.13, 0.2, math.pi / 2., 0.],
                              color='white'))
    return world
예제 #7
0
def world9():

    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-2),
        clane.shifted(2.5),
        clane.shifted(-2.5)
    ]

    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, -1.3, math.pi / 2, 0.5],
                               color='yellow'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, -0.5, math.pi / 2, 0.5],
                               color='orange'))

    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.5)
    world.cars[1].reward = world.simple_reward(world.cars[1],
                                               speed=0.5) + 100 * veh_follow
    world.cars[2].reward = world.simple_reward(world.cars[2], speed=0.5)

    return world
예제 #8
0
def world8():
    dyn = dynamics.CarDynamics(0.1)
    world = World(midlevel_exists=True)
    clane = lane.StraightLane([0., -1.], [0., 1.], lane.DEFAULT_WIDTH)
    world.lanes += [clane, clane.shifted(1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-1),
        clane.shifted(2.5),
        clane.shifted(-1.5)
    ]
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi / 2., 0.5],
                               color='yellow'))
    world.cars.append(
        car.SwitchOptimizerCar(dyn, [-0.13, 0.3, math.pi / 2., 0.5],
                               color='red',
                               iamrobot=True))
    world.cars.append(
        car.SwitchOptimizerCar(dyn, [-0.13, -0.3, math.pi / 2., 0.5],
                               color='red',
                               iamrobot=True))

    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.6)
    world.cars[0].default_u = np.asarray([0., 1.])
    world.cars[1].baseline_reward = world.simple_reward(world.cars[1],
                                                        speed=0.6)
    world.cars[1].default_u = np.asarray([0., 1.])
    world.cars[2].baseline_reward = world.simple_reward(world.cars[2],
                                                        speed=0.6)
    world.cars[2].default_u = np.asarray([0., 1.])

    return world
예제 #9
0
def playground():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.17)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    #world.cars.append(car.UserControlledCar(dyn, [0., 0., math.pi/2., 0.], color='orange'))
    world.cars.append(car.UserControlledCar(dyn, [-0.17, -0.17, math.pi/2., 0.], color='white'))
    return world
예제 #10
0
def world_test():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    world.cars.append(car.UserControlledCar(dyn, [-0.13, 0., math.pi/2., 0.3], color='red'))
    world.cars.append(car.SimpleOptimizerCar(dyn, [0.0, 0.5, math.pi/2., 0.3], color='yellow'))
    world.cars[1].reward = world.simple_reward(world.cars[1], speed=0.5)
    return world
예제 #11
0
def world_features(num=0):
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    world.cars.append(car.UserControlledCar(dyn, [-0.13, 0., math.pi/2., 0.3], color='red'))
    world.cars.append(car.Car(dyn, [0., 0.1, math.pi/2.+math.pi/5, 0.], color='yellow'))
    world.cars.append(car.Car(dyn, [-0.13, 0.2, math.pi/2.-math.pi/5, 0.], color='yellow'))
    world.cars.append(car.Car(dyn, [0.13, -0.2, math.pi/2., 0.], color='yellow'))
    #world.cars.append(car.NestedOptimizerCar(dyn, [0.0, 0.5, math.pi/2., 0.3], color='yellow'))
    return world
예제 #12
0
def irl_ground():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    d = shelve.open('cache', writeback=True)
    cars = [
        (-0.13, .1, .9, -0.13),
        (0, .4, .8, 0.0),
        (.13, 0, .6, .13),
        (0, .8, .5, 0.),
        (0., 1., 0.5, 0.),
        (-.13, -0.5, 0.9, -0.13),
        (.13, -.8, 1., 0.13),
        (-.13, 1.0, 0.6, -0.13),
        (.13, 1.9, .5, 0.13),
        (0, 1.5, 0.5, 0),
    ]

    def goal(g):
        @feature.feature
        def r(t, x, u):
            return -(x[0] - g)**2

        return r

    for i, (x, y, s, gx) in enumerate(cars):
        if str(i) not in d:
            d[str(i)] = []
        world.cars.append(
            car.SimpleOptimizerCar(dyn, [x, y, math.pi / 2., s],
                                   color='yellow'))
        world.cars[-1].cache = d[str(i)]

        def f(j):
            def sync(cache):
                d[str(j)] = cache
                d.sync()

            return sync

        world.cars[-1].sync = f(i)
    for c, (x, y, s, gx) in zip(world.cars, cars):
        c.reward = world.simple_reward(c, speed=s) + 10. * goal(gx)
    world.cars.append(
        car.UserControlledCar(dyn, [0., -0.5, math.pi / 2., 0.7], color='red'))
    world.cars = world.cars[-1:] + world.cars[:-1]
    return world
예제 #13
0
def lane_cut_run():
    dyn = dynamics.CarDynamics(0.1)
    world = highway()
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-.13, 0.2, math.pi / 2., 0.],
                               color='white'))
    world.cars[0].reward = world.simple_reward(
        world.cars[0],
        speed=1,
        theta=[
            -1264.1224982, -1472.43401049, -205.60746282, -885.73160945,
            -1394.14040446, 181.50131822, -1356.62990406
        ])
    return world
예제 #14
0
def fast_merge_right_demo():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes = [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads = [clane]
    world.fences = [clane.shifted(2), clane.shifted(-2)]
    d = shelve.open('cache', writeback=True)
    world.cars.append(
        car.UserControlledCar(dyn, [0.0, -0.2, math.pi / 2., 0.],
                              color='white'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0, 0, math.pi / 2., .0], color='red'))
    world.cars[1].reward = world.simple_reward(world.cars[1], speed=0.5)
    return world
예제 #15
0
def dynamics_from(definition):
    if "kind" not in definition:
        raise Exception(
            "dynamics_from: dynamics definition must include 'kind'")

    if definition["kind"] == DYNAMICS_NORMAL:
        dt = 0.1

        if "params" in definition and "dt" in definition["params"]:
            dt = definition["params"]["dt"]

        return dynamics.CarDynamics(dt=definition["params"]["dt"])

    raise Exception("dynamics_from: unknown dynamics kind: " +
                    definition["kind"])
예제 #16
0
def world9():
    dyn = dynamics.CarDynamics(0.1)
    world = World(midlevel_exists=True)
    clane = lane.StraightLane([0., -1.], [0., 1.], lane.DEFAULT_WIDTH)
    world.lanes += [clane, clane.shifted(1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-1),
        clane.shifted(2.5),
        clane.shifted(-1.5)
    ]
    car_set = [{'lane': -0.14382104671193202, 'pos': -5.4614089828930892, 'isRobot': True}, {'lane': -0.076485355678699476, 'pos': -5.287241227862939,'isRobot': True},\
            {'lane': 0.041717183967274571, 'pos': -5.1860610599902248,'isRobot': True},\
            {'lane': -0.13005408356976389, 'pos': -5.6439957056000463, 'isRobot': True},\
            {'lane': -0.0017838032137736108, 'pos': 1.900418486064837, 'isRobot':False},\
            {'lane': -0.1255197374127382, 'pos': 2.9384294124012125, 'isRobot':False},\
            {'lane': -0.0017836856576096698, 'pos': 2.2542116035764277,'isRobot': False},\
            {'lane': -0.0017545585415078939, 'pos': 3.5441324751687593,'isRobot': False},\
            {'lane': -0.15426277999902296, 'pos': -5.2395293216772982,'isRobot': True},\
            {'lane': -0.0016715524262676368, 'pos': 3.1864204004814645,'isRobot': False},\
            {'lane': -0.078191570164785756, 'pos': -5.4227005075263781,'isRobot': True},\
            {'lane': -0.17759460092156229, 'pos': -5.0843811548205187,'isRobot': True},\
            {'lane': -0.0017836890619227796, 'pos': 4.1049603048534156,'isRobot': False},\
            {'lane': -0.0017781598554608973, 'pos': 4.5832853862108536,'isRobot': False},\
            {'lane': -0.12587224415597753, 'pos': 3.3265390539200665,'isRobot':False},\
            {'lane': -0.043776679064340018, 'pos': -5.1047807513629015,'isRobot': True},\
            {'lane': -0.12555872982148231, 'pos': 4.7986304907386588, 'isRobot':False},\
            {'lane': -0.1256044714880388, 'pos': 5.2102349853652896, 'isRobot':False},\
            {'lane': -0.0017403147363880666, 'pos': 5.0194332605150267,'isRobot': False},\
            {'lane': -0.0017840323712066659, 'pos': 5.5120519126069265, 'isRobot':False}]

    for car_dict in car_set:
        if car_dict['isRobot']:
            world.cars.append(
                car.SwitchOptimizerCar(
                    dyn,
                    [car_dict['lane'], car_dict['pos'], math.pi / 2., 0.5],
                    color='red',
                    iamrobot=True))
        else:
            world.cars.append(
                car.SimpleOptimizerCar(
                    dyn,
                    [car_dict['lane'], car_dict['pos'], math.pi / 2., 0.5],
                    color='yellow'))

    return world
예제 #17
0
def playground():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -100.], [0., 100.], 0.17)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    #world.roads += [clane]
    #world.fences += [clane.shifted(2), clane.shifted(-2)]
    #world.cars.append(car.UserControlledCar(dyn, [0., 0., math.pi/2., 0.], color='orange'))
    world.auto_cars.append(car.Car([-0.17, -0.10, -math.pi/2., 0.], color='white',trained=0, car_no=0))
    world.auto_cars.append(car.Car([-0.17, 0.1, -math.pi/2., 0.], color='white',trained=0,car_no=1))
    world.auto_cars.append(car.Car([-0.17, -0.3, -math.pi/2., 255.], color='white',trained=0,car_no=2))
    world.human_cars.append(car.Car([0, 0.30, -math.pi / 2., 0], color='blue',car_no=10))

    for i in range(13):
        world.objects.append(car.Obj([0.35, 0.9-i*0.15]))
        world.objects.append(car.Obj([-0.35, 0.9 - i * 0.15]))
    return world
예제 #18
0
def fast_merge_right_run():
    theta = [0.12118441, -50.98300634, 23.10744109, 3.11221324, -57.56491208]
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes = [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads = [clane]
    world.fences = [clane.shifted(2), clane.shifted(-2)]
    d = shelve.open('cache', writeback=True)
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0.0, -0.2, math.pi / 2., 0.],
                               color='white'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0, 0, math.pi / 2., .0], color='red'))
    world.cars[0].reward = world.simple_reward(world.cars[0],
                                               speed=1,
                                               theta=theta)
    world.cars[1].reward = world.simple_reward(world.cars[1], speed=0.5)
    return world
예제 #19
0
def two_merge_demo():
    dyn = dynamics.CarDynamics(0.1)
    world = highway()
    world.cars.append(
        car.UserControlledCar(dyn, [0, 0.1, math.pi / 2., .0], color='red'))
    world.cars.append(
        car.UserControlledCar(dyn, [0.13, 0.2, math.pi / 2., 0.],
                              color='white'))
    world.cars.append(
        car.UserControlledCar(dyn, [-.13, 0.2, math.pi / 2., 0.],
                              color='white'))
    with open('data/two_merge_traj/two_merge_traj-1490744314.pickle') as f:
        feed_u, feed_x = pickle.load(f)
        world.cars[1].fix_control(feed_u[0])
    with open('data/two_merge_traj/two_merge_traj-1490744613.pickle') as f:
        feed_u, feed_x = pickle.load(f)
        world.cars[2].fix_control(feed_u[1])
    #world.cars[0].reward = world.simple_reward(world.cars[0], speed=.7)
    return world
예제 #20
0
 def __init__(self, name, total_time=50, recording_time=[0, 50]):
     super(DrivingSimulation, self).__init__(name,
                                             total_time=total_time,
                                             recording_time=recording_time)
     self.world = World()
     clane = lane.StraightLane([0., -1.], [0., 1.], 0.17)
     self.world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
     self.world.roads += [clane]
     self.world.fences += [clane.shifted(2), clane.shifted(-2)]
     self.dyn = dynamics.CarDynamics(0.1)
     self.robot = car.Car(self.dyn, [0., -0.3, np.pi / 2., 0.4],
                          color='orange')
     self.human = car.Car(self.dyn, [0.17, 0., np.pi / 2., 0.41],
                          color='white')
     self.world.cars.append(self.robot)
     self.world.cars.append(self.human)
     self.initial_state = [self.robot.x, self.human.x]
     self.input_size = 2
     self.reset()
     self.viewer = None
예제 #21
0
def world_test_human():
    dyn = dynamics.CarDynamics(0.1)
    world = highway()
    world.cars.append(
        car.UserControlledCar(dyn, [-0.13, 0., math.pi / 2., 0.3],
                              color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0.0, 0.3, math.pi / 2., 0.8],
                               color='yellow'))
    world.cars[1].reward = world.simple_reward(world.cars[1], speed=0.5)

    with open('data/run1/world_test_human-1486145445.pickle') as f:
        feed_u, feed_x = pickle.load(f)
        #traj_h = pickle.load(f)
        print feed_u.__class__.__name__
        print feed_u[0].__class__.__name__
        #world.cars[0].follow= traj_h
        world.cars[0].fix_control(feed_u[0])

    return world
예제 #22
0
def world10():

    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-2),
        clane.shifted(2.5),
        clane.shifted(-2.5)
    ]

    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.NestedOptimizerCar(dyn, [-0.13, -1.3, math.pi / 2, 0.5],
                               color='yellow'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, -0.3, math.pi / 2, 0.5],
                               color='orange'))

    world.cars[1].human = world.cars[2]
    #r_h = world.simple_reward([world.cars[1].traj])+100.*feature.bounded_control(world.cars[0].bounds)

    @feature.feature
    def veh_follow(t, x, u):
        return -(
            (world.cars[0].traj.x[t][0] - world.cars[1].traj.x[t][0])**4 +
            (world.cars[0].traj.x[t][1] - 0.3 - world.cars[1].traj.x[t][1])**2)

    r_r = world.simple_reward(world.cars[1], speed=0.5) + 100 * veh_follow
    r_h = world.simple_reward(world.cars[2], speed=0.5)

    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.5)
    world.cars[1].rewards = (r_h, r_r)
    world.cars[2].reward = r_h

    return world
예제 #23
0
def irl_ground_redo():
    start_thetas = [
        [-49.41780328, 7.56889171, -29.88617277, -43.16951913, 2.66180459],
        [31.76720093, -41.00733137, -11.04480014, 3.10109911, -17.74631041],
        [-35.28080999, 7.16573391, 39.76757247, -29.46527668, -46.8011004],
        [-9.15809793, 22.46761354, 16.02720722, -16.42384674, 13.66530697],
        [22.74288425, -30.45558916, 21.02526639, 40.52208768, -22.98279955],
        [-11.82435415, -45.55631605, -48.66029143, 4.7003656, 34.83941114]
    ]
    learned_thetas = [
        [0.63050464, -0.25920318, 6.62276118, 1.36460598, -27.45587143],
        [0.29136727, -45.62213084, 19.73830621, 2.16081166, -40.14207103],
        [
            8.67710219e-01, -1.04749109e-02, 3.39600499e+01, 1.47286358e+00,
            -4.74656408e+01
        ], [0.41207963, -0.15903006, 19.09811664, 1.42279691, -23.45312591],
        [0.87659958, -5.29046734, 34.99991666, 1.62502487, -36.36590119],
        [0.45852171, -45.9455371, -4.8938928, 1.38068336, -28.57903624]
    ]

    #off second run theta,
    #[array([-21.64797415, -41.91799587,  17.57049892,   2.9955055 ,   0.06245884]), array([-48.09720879,  30.55382214,  12.59564428, -14.51636909, -33.88826816])]
    #[array([-17.59146916, -41.91799587,  17.5817344 ,   2.74549597,  -8.84827121]), array([-27.52673679,  30.55382214,  12.65934233,   0.87643322, -55.85940466])]

    #off fifth run theta,
    #[-30.61573957 -33.34307184 -36.49309123 -20.69403454 -57.9208694 ]

    #T=1 cuts
    #[   0.38375847  -19.84508972    5.3975739   -46.28047522 -113.86290679]

    old_theta = [3., -50., 10., 20., -60.]
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [clane.shifted(2), clane.shifted(-2)]
    d = shelve.open('cache', writeback=True)
    cars = [
        (-0.13, .1, .9, -0.13),
        (0, .4, .8, 0.0),
        (.13, 0, .6, .13),
        (0, .8, .5, 0.),
        (0., 1., 0.5, 0.),
        (-.13, -0.5, 0.9, -0.13),
        (.13, -.8, 1., 0.13),
        (-.13, 1.0, 0.6, -0.13),
        (.13, 1.9, .5, 0.13),
        (0, 1.5, 0.5, 0),
    ]

    def goal(g):
        @feature.feature
        def r(t, x, u):
            return -(x[0] - g)**2

        return r

    for i, (x, y, s, gx) in enumerate(cars):
        if str(i) not in d:
            d[str(i)] = []
        world.cars.append(
            car.SimpleOptimizerCar(dyn, [x, y, math.pi / 2., s],
                                   color='yellow'))
        world.cars[-1].cache = d[str(i)]

        def f(j):
            def sync(cache):
                d[str(j)] = cache
                d.sync()

            return sync

        world.cars[-1].sync = f(i)
    for c, (x, y, s, gx) in zip(world.cars, cars):
        c.reward = world.simple_reward(c, speed=s) + 10. * goal(gx)
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0, -0.5, math.pi / 2., .7], color='red'))
    world.cars[-1].reward = world.simple_reward(
        world.cars[-1], speed=1, theta=learned_thetas[5])  #+300*goal(.13)
    world.cars = world.cars[-1:] + world.cars[:-1]
    return world
예제 #24
0
def world_kex1(know_model=True):
    start_human = -0.13
    start_robot = -0.00
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-2),
        clane.shifted(2.5),
        clane.shifted(-2.5)
    ]
    #world.cars.append(car.SimpleOptimizerCar(dyn, [start_human, 0., math.pi/2., 0.5], color='red')) # red car is human
    world.cars.append(
        car.NestedOptimizerCar(dyn, [start_human, 0., math.pi / 2., 0.5],
                               color='red'))  # red car is human
    if know_model:  # yellow car is the robot that uses nested optimizer to find the way
        world.cars.append(
            car.NestedOptimizerCar(dyn, [start_robot, 0.0, math.pi / 2., 0.5],
                                   color='yellow'))
    else:
        world.cars.append(
            car.SimpleOptimizerCar(dyn, [start_robot, 0.0, math.pi / 2., 0.5],
                                   color='yellow'))
    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.6)
    world.cars[0].default_u = np.asarray([0., 1.])

    @feature.feature
    def goal(t, x, u):  # doesnt need this
        k = -(10. * (x[0] + 0.13)**2 + 0.5 * (x[1] - 2.)**2)  #ASK Elis
        #print("--------", x[0].auto_name)
        #print("--------", x[1].auto_name)
        #exit()
        return k

    # object--------------
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0.5, math.pi / 2., 0.0],
                               color='blue'))  # blue car is obstacle
    #world.cars.append(car.NestedOptimizerCar(dyn, [-0.13, 0.5, math.pi/2., 0.0], color='blue')) # blue car is obstacle
    #print(world.cars)
    #exit()
    world.cars[2].reward = world.simple_reward(world.cars[2], speed=0.0)
    #world.cars[2].reward = 1
    world.cars[2].default_u = np.asarray([0., 0.])
    world.cars[2].movable = False

    #------------------

    if know_model:
        world.cars[1].human = world.cars[
            0]  # [1] is robot, asigns that the robot knows who is the human
        world.cars[1].obstacle = world.cars[2]
        world.cars[0].obstacle = world.cars[2]
        world.cars[0].human = world.cars[1]

        # reward with respect to the robot trajectory: world.cars[1].traj
        r_h = world.simple_reward(
            [world.cars[1].traj], speed=0.5) + 100. * feature.bounded_control(
                world.cars[0].bounds) + 100. * feature.bounded_control(
                    world.cars[2].bounds)

        #r_r = 10*goal+world.simple_reward([world.cars[1].traj_h], speed=0.5
        r_r = world.simple_reward(
            [world.cars[1].traj_h],
            speed=0.5) + 100. * feature.bounded_control(world.cars[2].bounds)

        r_h2 = world.simple_reward(
            [world.cars[1].traj_h],
            speed=0.5) + 100. * feature.bounded_control(world.cars[0].bounds)
        +100. * feature.bounded_control(world.cars[2].bounds)
        #r_r = 10*goal+world.simple_reward([world.cars[1].traj_h], speed=0.5
        r_r2 = world.simple_reward(
            [world.cars[1].traj],
            speed=0.5) + 100. * feature.bounded_control(world.cars[2].bounds)

        #r_obj = world.simple_reward([world.cars[1].traj_h], speed=0.0)
        world.cars[1].rewards = (r_h, r_r)  #ADD: r_object
        world.cars[0].rewards = (r_h2, r_r2)  #(optimize on, the car)
        #print(r_h)
        #print(r_r)
        #print(world.cars[1].rewards)
        #exit()
    else:
        r = 10 * goal + world.simple_reward([world.cars[0].linear], speed=0.5)
        world.cars[1].reward = r

    #world.cars.append(static_obj.SimpleOptimizerCar(dyn, [-0.13, 0.5, math.pi/2., 0.0], color='blue')) # blue car is obstacle)

    return world
예제 #25
0
def world11():

    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-2),
        clane.shifted(2.5),
        clane.shifted(-2.5)
    ]

    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.NestedOptimizerCar(dyn, [-0.13, -1.3, math.pi / 2, 0.5],
                               color='yellow'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, -0.5, math.pi / 2, 0.5],
                               color='orange'))

    world.cars[1].human = world.cars[2]
    #r_h = world.simple_reward([world.cars[1].traj])+100.*feature.bounded_control(world.cars[0].bounds)
    """
    This goal just tries to get the cars close to each other
    """
    """
    @feature.feature
    def veh_follow(t, x, u):
        return -((world.cars[0].traj.x[t][0]-world.cars[1].traj.x[t][0])**2 + (world.cars[0].traj.x[t][1]-0.3-world.cars[1].traj.x[t][1])**2)
    """
    """
    For the first few time steps, try to get the vehicles close together in the y-dimension.
    After a number of steps, get them together in the x-dimension. This allows for strategic lane switching.
    """
    """
    @feature.feature
    def veh_follow(t, x, u):
        if (t>3):
            s = -(5*(world.cars[0].traj.x[t][0]-world.cars[1].traj.x[t][0])**2 + (world.cars[0].traj.x[t][1]-0.3-world.cars[1].traj.x[t][1])**2)
        else :
            s = -((world.cars[0].traj.x[t][1]-0.3-world.cars[1].traj.x[t][1])**2);
        return s
    """
    """
    This goal ramps up how much the x-dimension matters as the vehicle gets closer to platooning position. Also, the y-position goal saturates.
    """
    @feature.feature
    def veh_follow(t, x, u):

        follow_loc = 0.3

        distance_sq = (world.cars[0].traj.x[t][0] - world.cars[1].traj.x[t][0]
                       )**2 + (world.cars[0].traj.x[t][1] -
                               world.cars[1].traj.x[t][1])**2

        distance = np.sqrt(distance_sq)

        # if we are not close to the goal car, it doesn't matter what lane we're in (hence the exp term -- importance of being in the correct lane decays exponentially with y-distance from target)
        x_penalty = -10 * tt.exp(-10.0 * (distance - follow_loc)) * (
            world.cars[0].traj.x[t][0] - world.cars[1].traj.x[t][0])**2

        # The y penalty should saturate at a certain distance because a very distant car shouldn't engage in very risky maneuvers.
        # Because of this we have the exponential saturation term.
        #y_penalty = -(world.cars[0].traj.x[t][1]-follow_loc-world.cars[1].traj.x[t][1])**2
        y_penalty = -(
            -1.0 / 2.0 + 100.0 /
            (1.0 + tt.exp(-1.0 / 10.0 *
                          (world.cars[0].traj.x[t][1] -
                           world.cars[1].traj.x[t][1] - follow_loc)**2)))

        s = x_penalty + y_penalty

        return s

    r_r = world.simple_reward(world.cars[1], speed=0.5) + 100 * veh_follow
    r_h = world.simple_reward(world.cars[2], speed=0.5)

    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.5)
    world.cars[1].rewards = (r_h, r_r)
    world.cars[2].reward = r_h

    return world
예제 #26
0
def world7():
    dyn = dynamics.CarDynamics(0.1)
    world = World()
    clane = lane.StraightLane([0., -1.], [0., 1.], 0.13)
    world.lanes += [clane, clane.shifted(1), clane.shifted(-1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-2),
        clane.shifted(2.5),
        clane.shifted(-2.5)
    ]

    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0.6, math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0.3, math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi / 2., 0.5],
                               color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, -0.3, math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [-0.13, -0.6, math.pi / 2, 0.5],
                               color='red'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0., 0.3, math.pi / 2, 0.5],
                               color='orange'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0., 0.05, math.pi / 2., 0.5],
                               color='orange'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0., -0.3, math.pi / 2, 0.5],
                               color='orange'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0., -0.6, math.pi / 2, 0.5],
                               color='orange'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0.13, 0.3, math.pi / 2, 0.5],
                               color='yellow'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0.13, 0., math.pi / 2, 0.5],
                               color='yellow'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0.13, -0.3, math.pi / 2, 0.5],
                               color='yellow'))
    world.cars.append(
        car.SimpleOptimizerCar(dyn, [0.13, -0.6, math.pi / 2, 0.5],
                               color='yellow'))
    world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.5)
    world.cars[0].default_u = np.asarray([0., 1.])
    world.cars[1].reward = world.simple_reward(world.cars[1], speed=0.55)
    world.cars[1].default_u = np.asarray([0., 1.])
    world.cars[2].reward = world.simple_reward(world.cars[2], speed=0.5)
    world.cars[2].default_u = np.asarray([0., 1.])
    world.cars[3].reward = world.simple_reward(world.cars[3], speed=1.5)
    world.cars[3].default_u = np.asarray([0., 1.])
    world.cars[4].reward = world.simple_reward(world.cars[4], speed=0.45)
    world.cars[4].default_u = np.asarray([0., 1.])
    world.cars[5].reward = world.simple_reward(world.cars[5], speed=0.5)
    world.cars[5].default_u = np.asarray([0., 1.])
    world.cars[6].reward = world.simple_reward(world.cars[6], speed=0.5)
    world.cars[6].default_u = np.asarray([0., 1.])
    world.cars[7].reward = world.simple_reward(world.cars[7], speed=0.5)
    world.cars[7].default_u = np.asarray([0., 1.])
    world.cars[8].reward = world.simple_reward(world.cars[8], speed=0.55)
    world.cars[8].default_u = np.asarray([0., 1.])
    world.cars[9].reward = world.simple_reward(world.cars[9], speed=0.5)
    world.cars[9].default_u = np.asarray([0., 1.])
    world.cars[10].reward = world.simple_reward(world.cars[10], speed=0.5)
    world.cars[10].default_u = np.asarray([0., 1.])
    world.cars[11].reward = world.simple_reward(world.cars[11], speed=0.45)
    world.cars[11].default_u = np.asarray([0., 1.])
    world.cars[12].reward = world.simple_reward(world.cars[12], speed=0.5)
    world.cars[12].default_u = np.asarray([0., 1.])
    return world
예제 #27
0
            pyglet.clock.schedule(self.output_loop)
        self.event_loop.run()

    def run_modified(self, history_x, history_u):
        self.pause_every = None
        self.reset()
        self.feed_x = history_x
        self.feed_u = history_u
        pyglet.clock.schedule_interval(self.animation_loop, 0.02)
        pyglet.clock.schedule_interval(self.control_loop, self.dt)
        self.event_loop.run()


if __name__ == '__main__' and False:
    import lane
    dyn = dynamics.CarDynamics(0.1)
    vis = Visualizer(dyn.dt)
    vis.lanes.append(lane.StraightLane([0., -1.], [0., 1.], 0.13))
    vis.lanes.append(vis.lanes[0].shifted(1))
    vis.lanes.append(vis.lanes[0].shifted(-1))
    vis.cars.append(car.UserControlledCar(dyn, [0., 0., math.pi / 2., .1]))
    vis.cars.append(
        car.SimpleOptimizerCar(dyn, [0., 0.5, math.pi / 2., 0.], color='red'))
    r = -60. * vis.cars[0].linear.gaussian()
    r = r + vis.lanes[0].gaussian()
    r = r + vis.lanes[1].gaussian()
    r = r + vis.lanes[2].gaussian()
    r = r - 30. * vis.lanes[1].shifted(1).gaussian()
    r = r - 30. * vis.lanes[2].shifted(-1).gaussian()
    r = r + 30. * feature.speed(0.5)
    r = r + 10. * vis.lanes[0].gaussian(10.)
예제 #28
0
def world7(prob_aut=0.5):
    num_cars, prob_aut = 20, prob_aut
    aut_list = np.zeros(num_cars)
    dyn = dynamics.CarDynamics(0.1)
    world = World(midlevel_exists=True)
    world.aut_level = prob_aut
    clane = lane.StraightLane([0., -1.], [0., 1.], lane.DEFAULT_WIDTH)
    world.lanes += [clane, clane.shifted(1)]
    world.roads += [clane]
    world.fences += [
        clane.shifted(2),
        clane.shifted(-1),
        clane.shifted(2.5),
        clane.shifted(-1.5)
    ]
    # world.cars.append(car.SimpleOptimizerCar(dyn, [-0.13, 0., math.pi/2., 0.5], color='yellow'))
    # world.cars.append(car.SwitchOptimizerCar(dyn, [0, 0.15, math.pi/2., 0.5], color='red', iamrobot=True))
    # world.cars.append(car.SwitchOptimizerCar(dyn, [0, -0.3, math.pi/2., 0.5], color='red', iamrobot = True))
    lane_1_cur_pos = -10
    lane_2_cur_pos = -10.5
    for i in xrange(num_cars):
        temp_pos = None
        temp_lane = np.random.binomial(1, 0.5) * (-0.13)
        if temp_lane < 0:
            temp_pos = lane_1_cur_pos
            lane_1_cur_pos += np.random.uniform(0.25, 0.5)
        else:
            temp_pos = lane_2_cur_pos
            lane_2_cur_pos += np.random.uniform(0.25, 0.5)
        if np.random.random() <= prob_aut:
            world.cars.append(
                car.SwitchOptimizerCar(
                    dyn, [temp_lane, temp_pos, math.pi / 2., 0.5],
                    color='red',
                    iamrobot=True))
            aut_list[i] = 1
        else:
            world.cars.append(
                car.SimpleOptimizerCar(
                    dyn, [temp_lane, temp_pos, math.pi / 2., 0.5],
                    color='yellow'))

    print("\n\n\n\n\n\n\n\n\n\n\n\nAUT LIST IS: {} \n\n\n\n".format(aut_list))

    # world.cars[0].reward = world.simple_reward(world.cars[0], speed=0.6)
    # world.cars[0].default_u = np.asarray([0., 1.])
    # world.cars[1].baseline_reward = world.simple_reward(world.cars[1], speed=0.6)
    # world.cars[1].default_u = np.asarray([0., 1.])
    # world.cars[2].baseline_reward = world.simple_reward(world.cars[2], speed=0.6)
    # world.cars[2].default_u = np.asarray([0., 1.])
    # world.cars[3].reward = world.simple_reward(world.cars[3], speed=0.6)
    # world.cars[3].default_u = np.asarray([0., 1.])
    for i in xrange(num_cars):
        if aut_list[i] == 1:
            world.cars[i].baseline_reward = world.simple_reward(world.cars[i],
                                                                speed=0.6)
            world.cars[i].default_u = np.asarray([0., 1.])
        else:
            world.cars[i].reward = world.simple_reward(world.cars[i],
                                                       speed=0.6)
            world.cars[i].default_u = np.asarray([0., 1.])

    return world