Esempio n. 1
0
    def test_two_tracks(self):
        env = CarRacing(num_tracks=2)

        env.reset()
        assert len(env.tracks) == 2

        env.close()
Esempio n. 2
0
    def test_two_lanes_with_no_lane_changes(self):
        env = CarRacing(num_lanes=2,num_lanes_changes=0)
        env.reset()
        assert env._get_extremes_of_position(0,border=1) == (-3-1/3,3+1/3)

        assert env._get_extremes_of_position(1,border=0) == (-6-2/3,+6+2/3)

        env.close()
        del env
Esempio n. 3
0
    def test_one_track(self):
        env = CarRacing()

        # Tracks should not exist before any reset
        with pytest.raises(AttributeError):
            env.tracks

        env.reset()
        assert len(env.tracks) == 1

        env.close()
Esempio n. 4
0
while True:
    # perform step
    s, r, done, speed, info = env.step(a)
    
    # lane detection
    lane1, lane2 = LD_module.lane_detection(s)

    # waypoint and target_speed prediction
    waypoints = waypoint_prediction(lane1, lane2)
    target_speed = target_speed_prediction(waypoints)

    # reward
    total_reward += r

    # outputs during training
    if steps % 2 == 0 or done:
        print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
        print("step {} total_reward {:+0.2f}".format(steps, total_reward))

        LD_module.plot_state_lane(s, steps, fig, waypoints=waypoints)
        
    steps += 1
    env.render()

    # check if stop
    if done or restart or steps>=600: 
        print("step {} total_reward {:+0.2f}".format(steps, total_reward))
        break

env.close()