Пример #1
0
    parser.add_argument("--cfg_path", type=str, default='config.json.dist')
    parser.add_argument("--restrict", type=bool, default=False)
    parser.add_argument("--imitation", type=bool, default=False)
    parser.add_argument("--test", type=bool, nargs='?', const=True, default=False)
    parser.add_argument("--restore", type=bool, nargs='?', const=True, default=False)
    parser.add_argument('--save_replay', type=bool, nargs='?', const=True, default=False)
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) #environ是一个字符串所对应环境的映像对象,输入为要使用的GPU number
    tf.reset_default_graph()
    sess = tf.Session()

    # config = Config(args.sz, args.map, lambda _: 1)
    config = Config(args.sz, args.map, args.run_id, restrict=args.restrict, imitation=args.imitation) # 进行参数的设置
    os.makedirs('weights/' + config.full_id(), exist_ok=True)
    cfg_path = 'weights/%s/config.json' % config.full_id() # 保存参数的位置
    config.build(cfg_path if args.restore else args.cfg_path) # 建立和设置参数
    if not args.restore and not args.test:
        config.save(cfg_path) # 保存参数

    envs = EnvWrapper(make_envs(args), config) # 创建环境,封装一层
    agent = A2CAgent(sess, fully_conv, config, args.restore, args.discount, args.lr, args.vf_coef, args.ent_coef, args.clip_grads) # 创建agent

    runner = Runner(envs, agent, args.steps) # 创建进程
    runner.run(args.updates, not args.test) # 开始运行

    if args.save_replay: #是否保存回放
        envs.save_replay()

    envs.close() # 关闭环境
Пример #2
0
    parser.add_argument("--test",
                        type=bool,
                        nargs='?',
                        const=True,
                        default=False)
    parser.add_argument("--restore",
                        type=bool,
                        nargs='?',
                        const=True,
                        default=False)
    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    tf.reset_default_graph()
    sess = tf.Session()

    envs = EnvWrapper(make_envs(args))
    # not specify which feature to use when initialization
    if args.game == 'CartPole-v0':
        num_action = 2
        input_size = None
        agentmodel_local = carpole_net_local
        agentmodel_target = carpole_net_target
    if args.game == 'Breakout-v0':
        num_action = 4
        input_size = 80 * 80
        agentmodel_local = Breakout_local
        agentmodel_target = Breakout_target
    if args.game == 'MsPacman-v0':
        num_action = 9
        input_size = [88, 80]
        agentmodel_local = MsPacman_local
Пример #3
0
                runner = Runner(None, agent, args, args.steps)
                NextStateValue = runner.forwardState()
                np.save(args.nextStatePath+'nextState_'+str(oneCkpt.split('-')[1]) + '.npy', NextStateValue)
            except:
                pass

        time.sleep(10)
        os._exit(0)
    else:
        args.ckptfile = None

    # begin to train the agent
    if not args.distill:
        try:
            sess = tf.Session(config=tf_config)
            envs = EnvWrapper(make_envs(args), config)
            agent = A2CAgent(sess, fully_conv, config, args.discount, args.lr, args.vf_coef, args.ent_coef, args.clip_grads,
                             weight_dir, log_dir, args)
            runner = Runner(envs, agent, args, args.steps)

        except Exception as e:
            print('--exit--')
            print(e)
            if envs is not None:
                envs.close()
            import sys
            sys.exit(-1)

        runner.run(args.updates, not args.test)

        if args.save_replay:
Пример #4
0
        from agents.exploration import Explore
        agent = Explore(args, device)
        args.infer = False
        args.train = False
    elif args.meta == 'hrl':
        from agents.hrl import HRL
        from agents.model import FullyConv
        actor_critic = FullyConv(config, device, args.envs)
        agent = HRL(actor_critic, config, args)
        args.infer = False
        args.train = True
    else:
        raise ValueError

    # initialize training setups
    envs = EnvWrapper(make_envs(args), config, args, device)
    sc2_filewriter = SC2FileWriter(args, dirname)
    runner = Runner(envs, agent, ilp, sc2_filewriter, config, args, device, dirname)
    time.sleep(5)  # wait for SC2 to boot up

    # run eval or train
    if args.eval and not args.load_ilp:  # hrl, random
        runner.eval(num_iter=args.num_iter)
    else:
        if args.meta=='MSGI' or args.meta=='hrl': # MSGI, HRL
            if args.load_ilp:
                runner.meta_eval_load(dirname, init_ep=args.init_ep)
            else:
                runner.meta_eval_save(num_iter=args.num_iter, tr_epi=args.tr_epi)

    # save game replay