def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("map_file")

    args = parser.parse_args()

    with open(args.map_file, "rb") as fp:
        M = pickle.load(fp)

    car = Car()
    sensor = Sensor(200, total=16)

    env = Environment(car, sensor, M)

    last_frame = time.perf_counter()

    total_reads = 0
    avg_tps = 0

    action = [1, 0, 0]

    try:
        while True:
            env.step(action, 1)

            curr_frame = time.perf_counter()
            dt = curr_frame - last_frame
            tps = 1 / dt
            print(f"tps: {int(tps):5d}\r", end='')
            last_frame = curr_frame

            if total_reads == 0:
                avg_tps = tps
            else:
                N = total_reads
                avg_tps = (avg_tps * N + tps) / (N + 1)

            total_reads += 1
    except KeyboardInterrupt:
        pass
    finally:
        print(f"\navg tps: {int(avg_tps)}")

    env.on_exit()
Beispiel #2
0
def main():
    parser = ArgumentParser()
    parser.add_argument("map_file")
    args = parser.parse_args()

    with open(args.map_file, "rb") as fp:
        M = pickle.load(fp)

    pygame.init()

    screen = pygame.display.set_mode([1500, 900])

    running = True

    car = Car()
    player_controls = PlayerControls()
    # sensor = Sensor(200, total=16)
    sensor = NonLinearSensor(300, total=16)

    env = Environment(car, sensor, M)
    env_render = EnvironmentRenderer()

    while running:
        for ev in pygame.event.get():
            if ev.type == pygame.QUIT:
                running = False
            elif player_controls.on_pygame_event(ev):
                continue
            elif ev.type == pygame.KEYDOWN:
                if ev.key == pygame.K_r:
                    env.reset()
                elif ev.key == pygame.K_s:
                    env_render.show_sensor = not env_render.show_sensor
                elif ev.key == pygame.K_d:
                    car.drift = not car.drift
                    print(f"drift={car.drift}")

        screen.fill((255, 255, 255))

        action = player_controls.get_action()
        observation, reward, done, info = env.step(action,
                                                   reset_finished=False)
        env_render.render(screen, env)

        if reward != 0:
            print(f"reward: {reward}")

        pygame.display.flip()

    pygame.quit()

    env.on_exit()
Beispiel #3
0
        env.render(mode="human")
        bet_size = _get_action_from_user_input(env)
        return bet_size


def _get_action_from_user_input(env: Environment):
    try:
        input_integer = int(input(">> Enter bet size: $"))
    except ValueError:
        print("Value must be an integer", file=sys.stderr)
        return _get_action_from_user_input(env)
    if input_integer in env.get_legal_actions():
        return input_integer
    else:
        print("Cannot bet that much", file=sys.stderr)
        return _get_action_from_user_input(env)


if __name__ == "__main__":
    env = Environment()
    user_agent = UserAgent()
    finished = False
    while not finished:
        action = user_agent.act(env)
        print("\n\nFlipping coin...")
        state, reward, finished, debug_info = env.step(action)
        if reward > 0:
            print("WON :)\n")
        else:
            print("LOST :(\n")
Beispiel #4
0
def main():
    parser = ArgumentParser()
    parser.add_argument("map_file")
    parser.add_argument("ai_file")
    parser.add_argument("--drift", action="store_true")

    args = parser.parse_args()

    with open(args.map_file, "rb") as fp:
        M = pickle.load(fp)

    car = Car()
    car.drift = args.drift
    car.C_drift_control = 0.3
    car.C_drift_traction = 0.4
    car.C_drift_sideslip = 0.3
    car.F_engine_max = 10

    # sensor = Sensor(300, total=16)
    sensor = NonLinearSensor(300, total=16)

    env = Environment(car, sensor, M)
    env = EnvironmentWrapper(dqn_controls, env)

    from keras.models import Sequential
    from keras.layers import Dense, Activation, Flatten

    model = create_dqn_model(env)
    print(model.summary())

    # model.load_weights("dqn_weights.h5f")
    # model.load_weights("dqn_drift_weights.h5f")
    model.load_weights(args.ai_file)

    pygame.init()
    screen = pygame.display.set_mode([1500, 900])

    env_render = EnvironmentRenderer()

    running = True

    curr_tick = 0
    action = None

    while running:
        for ev in pygame.event.get():
            if ev.type == pygame.QUIT:
                running = False
            elif ev.type == pygame.KEYDOWN:
                if ev.key == pygame.K_r:
                    curr_tick = 0
                    env.reset()
                elif ev.key == pygame.K_s:
                    env_render.show_sensor = not env_render.show_sensor
                elif ev.key == pygame.K_d:
                    car.drift = not car.drift
                    print(f"drift={car.drift}")

        screen.fill((255, 255, 255))

        if curr_tick % 1 == 0 or action is None:
            observation = env.get_observation()
            observation = np.array(observation).reshape(
                (1, 1, env.nb_observations))

            action = model.predict(observation)
            action = np.argmax(action[0])

        _, reward, _, _ = env.step(action, reset_finished=False, dt=1)
        curr_tick += 1
        env_render.render(screen, env)

        if reward != 0:
            print(f"reward: {reward}")

        pygame.display.flip()

    pygame.quit()

    env.on_exit()