Beispiel #1
0
def main():
    setup_utils.setup_and_load()
    wandb_log = True
    if wandb_log:
        wandb.init(project="coinrun",
                   name=Config.RESTORE_ID + 'test',
                   config=Config.get_args_dict())
    with tf.Session() as sess:
        for i in range(0, 256, 8):
            enjoy_env_sess(sess, i, wandb_log)
Beispiel #2
0
def save_params_in_scopes(sess, scopes, filename, base_dict=None):
    data_dict = {}

    if base_dict is not None:
        data_dict.update(base_dict)

    save_path = file_to_path(filename)

    data_dict['args'] = Config.get_args_dict()
    param_dict = {}

    for scope in scopes:
        params = tf.trainable_variables(scope)

        if len(params) > 0:
            print('saving scope', scope, filename)
            ps = sess.run(params)

            param_dict[scope] = ps

    data_dict['params'] = param_dict
    joblib.dump(data_dict, save_path)
Beispiel #3
0
def main():
    args = setup_and_load()

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    seed = int(time.time()) % 10000
    utils.mpi_print(seed * 100 + rank)
    set_global_seeds(seed * 100 + rank)

    # For wandb package to visualize results curves
    config = Config.get_args_dict()
    config['global_seed'] = seed
    wandb.init(name=config["run_id"],
               project="coinrun",
               notes=" GARL generate seed",
               tags=["try"],
               config=config)

    utils.setup_mpi_gpus()
    utils.mpi_print('Set up gpu', args)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # pylint: disable=E1101

    eval_limit = Config.EVAL_STEP * 10**6
    phase_eval_limit = int(eval_limit // Config.TRAIN_ITER)
    total_timesteps = int(Config.TOTAL_STEP * 10**6)
    phase_timesteps = int((total_timesteps - eval_limit) // Config.TRAIN_ITER)

    with tf.Session(config=config):
        sess = tf.get_default_session()

        # init env
        nenv = Config.NUM_ENVS
        env = make_general_env(nenv, rand_seed=seed)
        utils.mpi_print('Set up env')

        policy = policies_back.get_policy()
        utils.mpi_print('Set up policy')

        optimizer = SeedOptimizer(env=env,
                                  logdir=Config.LOGDIR,
                                  spare_size=Config.SPA_LEVELS,
                                  ini_size=Config.INI_LEVELS,
                                  eval_limit=phase_eval_limit,
                                  train_set_limit=Config.NUM_LEVELS,
                                  load_seed=Config.LOAD_SEED,
                                  rand_seed=seed,
                                  rep=1,
                                  log=True)

        step_elapsed = 0
        t = 0

        if args.restore_id is not None:
            datapoints = Config.get_load_data('default')['datapoints']
            step_elapsed = datapoints[-1][0]
            optimizer.load()
            seed = optimizer.hist[-1]
            env.set_seed(seed)
            t = 16
            print('loadrestore')
            Config.RESTORE_ID = Config.get_load_data(
                'default')['args']['run_id']
            Config.RUN_ID = Config.get_load_data(
                'default')['args']['run_id'].replace('-', '_')

        while (step_elapsed < (Config.TOTAL_STEP - 1) * 10**6):
            # ============ GARL =================
            # optimize policy
            mean_rewards, datapoints = learn_func(
                sess=sess,
                policy=policy,
                env=env,
                log_interval=args.log_interval,
                save_interval=args.save_interval,
                nsteps=Config.NUM_STEPS,
                nminibatches=Config.NUM_MINIBATCHES,
                lam=Config.GAE_LAMBDA,
                gamma=Config.GAMMA,
                noptepochs=Config.PPO_EPOCHS,
                ent_coef=Config.ENTROPY_COEFF,
                vf_coef=Config.VF_COEFF,
                max_grad_norm=Config.MAX_GRAD_NORM,
                lr=lambda f: f * Config.LEARNING_RATE,
                cliprange=lambda f: f * Config.CLIP_RANGE,
                start_timesteps=step_elapsed,
                total_timesteps=phase_timesteps,
                index=t)

            # test catestrophic forgetting
            if 'Forget' in Config.RUN_ID:
                last_set = list(env.get_seed_set())
                if t > 0:
                    curr_set = list(env.get_seed_set())
                    last_scores, _ = eval_test(sess,
                                               nenv,
                                               last_set,
                                               train=True,
                                               idx=None,
                                               rep_count=len(last_set))
                    curr_scores, _ = eval_test(sess,
                                               nenv,
                                               curr_set,
                                               train=True,
                                               idx=None,
                                               rep_count=len(curr_set))
                    tmp = set(curr_set).difference(set(last_set))
                    mpi_print("Forgetting Exp")
                    mpi_print("Last setsize", len(last_set))
                    mpi_print("Last scores", np.mean(last_scores),
                              "Curr scores", np.mean(curr_scores))
                    mpi_print("Replace count", len(tmp))

            # optimize env
            step_elapsed = datapoints[-1][0]
            if t < Config.TRAIN_ITER:
                best_rew_mean = max(mean_rewards)
                env, step_elapsed = optimizer.run(sess, env, step_elapsed,
                                                  best_rew_mean)
            t += 1

        save_final_test = True
        if save_final_test:
            final_test = {}
            final_test['step_elapsed'] = step_elapsed
            train_set = env.get_seed()
            final_test['train_set_size'] = len(train_set)
            eval_log = eval_test(sess,
                                 nenv,
                                 train_set,
                                 train=True,
                                 is_high=False,
                                 rep_count=1000,
                                 log=True)
            final_test['Train_set'] = eval_log

            eval_log = final_test(sess,
                                  nenv,
                                  None,
                                  train=False,
                                  is_high=True,
                                  rep_count=1000,
                                  log=True)
            final_test['Test_set'] = eval_log
            joblib.dump(final_test, setup_utils.file_to_path('final_test'))

    env.close()