Esempio n. 1
0
        plot_map(self.map, self.obs, display)
        for images in self._agent_images:
            for surf, rectangle in images:
                display.blit(surf, rectangle)
        display.blit(self._info_surface, (0, 0), None, pygame.BLEND_RGB_SUB)
        self._info_surface.fill(black)  # clear notifications from previous round
        pygame.display.update()


if __name__ == "__main__":
    from cars.physics import SimplePhysics
    from cars.track import generate_map

    np.random.seed(3)
    random.seed(3)
    m = generate_map(8, 5, 3, 3)
    SimpleCarWorld(1, m, SimplePhysics, SimpleCarAgent, timedelta=0.2).run()

    # если вы хотите продолжить обучение уже существующей модели, вместо того,
    # чтобы создавать новый мир с новыми агентами, используйте код ниже:
    # # он загружает агента из файла
    # agent = SimpleCarAgent.from_file('filename.txt')
    # # создаёт мир
    # w = SimpleCarWorld(1, m, SimplePhysics, SimpleCarAgent, timedelta=0.2)
    # # подключает к нему агента
    # w.set_agents([agent])
    # # и запускается
    # w.run()
    # # или оценивает агента в этом мире
    # print(w.evaluate_agent(agent, 500))
Esempio n. 2
0
def run_and_save_best(visual, steps, _map=None, file=None):
    """
    Trains multiple networks with different hyperparameters, chooses the network
    with the best result and saves in to a file.

    :param file: File
    """
    # create worlds to run the agents on
    if _map is None:
        worlds_number = 3
        np.random.seed(None)
        random.seed(None)
        _map = generate_map()
        worlds = list(SimpleCarWorld(1, _map, SimplePhysics, SimpleCarAgent, visual, timedelta=0.2) for _ in range(worlds_number))
    else:
        worlds = [SimpleCarWorld(1, _map, SimplePhysics, SimpleCarAgent, visual, timedelta=0.2)]

    # create agents with all possible hyperparameters
    agents = []
    for (eta, reg_coef, epochs, reward_depth, train_every) \
            in list(itertools.product(
        # etas
        [1e-01],
        # reg_coefs
        [32],
        # epochs
        [60],
        # reward_depth
        [15],
        # train_every / batch size
        [50]
    )):
        if file is None:
            print("Creating a new agent")
            agent = SimpleCarAgent()
        else:
            print(f"Using an agent with weights from {file}")
            agent = SimpleCarAgent.from_file(file)
        if eta is not None:
            agent.eta = eta
        if reg_coef is not None:
            agent.reg_coef = reg_coef
        if epochs is not None:
            agent.epochs = epochs
        if reward_depth is not None:
            agent.reward_depth = reward_depth
        if train_every is not None:
            agent.train_every = train_every
        agents += [agent]

    errors = []

    for world in worlds:
        errors += [run_agent_for_worlds(agents, world, steps)]

    means = np.nanmean(errors, 0)[0]
    results = dict(zip(agents, means))

    best_agent = max(results, key=results.get)
    best_reward = results[best_agent]
    if type(best_reward) is not np.float64:
        best_reward = None

    for agent, result in results.items():
        print(f"Creating an agent with hyperparams: \n{agent.hyperparams_to_string()} \nError: {result}\n")

    print(f"🏆 This agent performed the best in all worlds with the error {best_reward}\n{best_agent.hyperparams_to_string()}")

    # write results to files
    file_path = str(file)
    dot_index = file_path.find(".")
    reward_file = file[:dot_index] + latest_error_file_suffix + file[dot_index:]
    with open(reward_file, 'a+') as f:
        f.seek(0)
        lines = f.readlines()
        last_reward = lines[-1] if len(lines) > 0 else None
        if best_reward is not None and (file is None or last_reward is None or float(last_reward) < float(best_reward)):
            save_to_file(best_agent)
            if last_reward is not None:
                f.write('\n')
            f.write(str(best_reward))
        else:
            save_to_file(agent=best_agent, prefix="temp_")
            print(f"Reward ({best_reward}) was invalid or worse than {last_reward} and was saved to a temporary file")
Esempio n. 3
0
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--steps", type=int)
parser.add_argument("-f", "--filename", type=str)
parser.add_argument("-e", "--evaluate", type=bool)
parser.add_argument("--seed", type=int)
args = parser.parse_args()
args.steps = 1000
args.seed = 100
args.filename = 'network_config_agent_0_layers_11_6_1.txt'

print(args.steps, args.seed, args.filename, args.evaluate)

steps = args.steps
seed = args.seed if args.seed else 23
np.random.seed(seed)
random.seed(seed)
m = generate_map(8, 5, 3, 3)

if args.filename:
    agent = SimpleCarAgent.from_file(args.filename)
    w = SimpleCarWorld(5, m, SimplePhysics, SimpleCarAgent, timedelta=0.2)
    if args.evaluate:
        print(w.evaluate_agent(agent, steps))
    else:
        w.set_agents([agent])
        w.run(steps)
else:
    SimpleCarWorld(5, m, SimplePhysics, SimpleCarAgent,
                   timedelta=0.2).run(steps)
Esempio n. 4
0
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("-s", "--steps", type=int)
parser.add_argument("-f", "--filename", type=str)
parser.add_argument("-e", "--evaluate", type=bool)
parser.add_argument("--seed", type=int)
args = parser.parse_args()

print(args.steps, args.seed, args.filename, args.evaluate)

steps = args.steps
seed = args.seed if args.seed else 23
np.random.seed(seed)
random.seed(seed)
m = generate_map(20, 2, 1, 1)
radii = np.random.normal(loc=2.5, scale=0.2, size=8)
angles = get_partition(8, -np.pi, np.pi)
o = generate_obstacles(8, radii, angles, 0)

if args.filename:
    agent = SimpleCarAgent.from_file(args.filename)
    w = SimpleCarWorld(1, m, o, radii, angles, SimplePhysics, SimpleCarAgent, timedelta=0.2)
    if args.evaluate:
        with open('results.txt', 'a') as inf:
            circles, collisions = w.evaluate_agent(agent, SimplePhysics, steps, timedelta=0.2)
            inf.write('seed: {} circles: {} collisions: {} \n'.format(seed, circles, collisions))
    else:
        w.set_agents([agent])
        w.run(SimplePhysics, steps, timedelta=0.2)
else: