def init_cfg_and_env():
    # parses command line arguments and build an ExperimentCfg object to contain them
    prs = get_arguments()
    cfg = ExperimentCfg()

    cfg.make_grad_cam_config(prs)

    environment = common.makeCustomizedGridEnv(cfg)
    obs_shape = environment.observation_space.shape
    act_n = environment.action_space.n
    cfg.OBS_SHAPE = obs_shape
    cfg.ACT_N = act_n

    return prs, cfg, environment
Exemple #2
0
    parser.add_argument("-plr",
                        required=True,
                        type=float,
                        help="distilled policy learning rate")

    config = experiment_config.ExperimentCfg()
    config.make_i2a_config(parser)

    device = torch.device(config.DEVICE)

    writer = SummaryWriter(comment="_i2a_fc_" +
                           config.build_name_for_i2a_writer())
    saves_path = writer.logdir

    envs = [
        common.makeCustomizedGridEnv(config) for _ in range(config.NUM_ENVS)
    ]
    test_env = common.makeCustomizedGridEnv(config)

    #sets seed on torch operations and on all environments
    common.set_seed(config.SEED, envs=envs)
    common.set_seed(config.SEED, envs=[test_env])

    obs_shape = envs[0].observation_space.shape
    act_n = envs[0].action_space.n

    #    net_policy = common.AtariA2C(obs_shape, act_n).to(device)
    net_policy = common.getNet(config)
    config.A2CNET = str(net_policy)

    net_em = models.environment_model.EnvironmentModel(obs_shape, act_n,
Exemple #3
0
                        "--PLOT",
                        default=False,
                        required=False,
                        help=" set to True to show plots during tests")
    parser.add_argument("-lr",
                        required=False,
                        type=float,
                        help="learning rate")

    fig, _ = plt.subplots()

    config = ExperimentCfg()
    config.make_test_env_config(parser)
    device = torch.device(config.DEVICE)

    env = common.makeCustomizedGridEnv(config)
    device = torch.device("cuda")
    config.DEVICE = 'cuda'

    obs_shape = env.observation_space.shape
    act_n = env.action_space.n

    net = common.getNet(config)
    net.load_state_dict(
        torch.load(config.A2C_FN, map_location=lambda storage, loc: storage))

    agent = ptan.agent.PolicyAgent(
        lambda x: net(x)[0],
        action_selector=ptan.actions.ProbabilityActionSelector(),
        apply_softmax=True,
        device=device)