Example #1
0
def load_args(load_key='default'):
    """get train args of retore id"""
    load_data = Config.get_load_data(load_key)
    if load_data is None:
        return False

    args_dict = load_data['args']
    #Config.parse_args_dict(args_dict)

    return args_dict
Example #2
0
def load_datapoints(load_path=None, load_key=None):
    if load_path is None:
        load_data = Config.get_load_data(load_key)
    else:
        load_path = file_to_path(load_path)
        if os.path.exists(load_path):
            load_data = joblib.load(load_path)
            print('Load file', load_path)
    if load_data is None:
        return False

    return load_data['datapoints']
Example #3
0
def load_params_for_scope(sess, scope, load_key='default', load_path=None):
    if load_path is None:
        load_data = Config.get_load_data(load_key)
    else:
        load_path = file_to_path(load_path)
        print('Load file', load_path)
        if os.path.exists(load_path):
            load_data = joblib.load(load_path)
            print('Load file', load_path)
        else:
            raise ValueError
    if load_data is None:
        return False

    params_dict = load_data['params']
    if scope in params_dict:
        print('Loading saved file for scope', scope)
        loaded_params = params_dict[scope]
        loaded_params, params = get_savable_params(loaded_params,
                                                   scope,
                                                   keep_heads=True)
        restore_params(sess, loaded_params, params)

    return True
Example #4
0
def main():
    args = setup_and_load()

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    seed = int(time.time()) % 10000
    utils.mpi_print(seed * 100 + rank)
    set_global_seeds(seed * 100 + rank)

    # For wandb package to visualize results curves
    config = Config.get_args_dict()
    config['global_seed'] = seed
    wandb.init(name=config["run_id"],
               project="coinrun",
               notes=" GARL generate seed",
               tags=["try"],
               config=config)

    utils.setup_mpi_gpus()
    utils.mpi_print('Set up gpu', args)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # pylint: disable=E1101

    eval_limit = Config.EVAL_STEP * 10**6
    phase_eval_limit = int(eval_limit // Config.TRAIN_ITER)
    total_timesteps = int(Config.TOTAL_STEP * 10**6)
    phase_timesteps = int((total_timesteps - eval_limit) // Config.TRAIN_ITER)

    with tf.Session(config=config):
        sess = tf.get_default_session()

        # init env
        nenv = Config.NUM_ENVS
        env = make_general_env(nenv, rand_seed=seed)
        utils.mpi_print('Set up env')

        policy = policies_back.get_policy()
        utils.mpi_print('Set up policy')

        optimizer = SeedOptimizer(env=env,
                                  logdir=Config.LOGDIR,
                                  spare_size=Config.SPA_LEVELS,
                                  ini_size=Config.INI_LEVELS,
                                  eval_limit=phase_eval_limit,
                                  train_set_limit=Config.NUM_LEVELS,
                                  load_seed=Config.LOAD_SEED,
                                  rand_seed=seed,
                                  rep=1,
                                  log=True)

        step_elapsed = 0
        t = 0

        if args.restore_id is not None:
            datapoints = Config.get_load_data('default')['datapoints']
            step_elapsed = datapoints[-1][0]
            optimizer.load()
            seed = optimizer.hist[-1]
            env.set_seed(seed)
            t = 16
            print('loadrestore')
            Config.RESTORE_ID = Config.get_load_data(
                'default')['args']['run_id']
            Config.RUN_ID = Config.get_load_data(
                'default')['args']['run_id'].replace('-', '_')

        while (step_elapsed < (Config.TOTAL_STEP - 1) * 10**6):
            # ============ GARL =================
            # optimize policy
            mean_rewards, datapoints = learn_func(
                sess=sess,
                policy=policy,
                env=env,
                log_interval=args.log_interval,
                save_interval=args.save_interval,
                nsteps=Config.NUM_STEPS,
                nminibatches=Config.NUM_MINIBATCHES,
                lam=Config.GAE_LAMBDA,
                gamma=Config.GAMMA,
                noptepochs=Config.PPO_EPOCHS,
                ent_coef=Config.ENTROPY_COEFF,
                vf_coef=Config.VF_COEFF,
                max_grad_norm=Config.MAX_GRAD_NORM,
                lr=lambda f: f * Config.LEARNING_RATE,
                cliprange=lambda f: f * Config.CLIP_RANGE,
                start_timesteps=step_elapsed,
                total_timesteps=phase_timesteps,
                index=t)

            # test catestrophic forgetting
            if 'Forget' in Config.RUN_ID:
                last_set = list(env.get_seed_set())
                if t > 0:
                    curr_set = list(env.get_seed_set())
                    last_scores, _ = eval_test(sess,
                                               nenv,
                                               last_set,
                                               train=True,
                                               idx=None,
                                               rep_count=len(last_set))
                    curr_scores, _ = eval_test(sess,
                                               nenv,
                                               curr_set,
                                               train=True,
                                               idx=None,
                                               rep_count=len(curr_set))
                    tmp = set(curr_set).difference(set(last_set))
                    mpi_print("Forgetting Exp")
                    mpi_print("Last setsize", len(last_set))
                    mpi_print("Last scores", np.mean(last_scores),
                              "Curr scores", np.mean(curr_scores))
                    mpi_print("Replace count", len(tmp))

            # optimize env
            step_elapsed = datapoints[-1][0]
            if t < Config.TRAIN_ITER:
                best_rew_mean = max(mean_rewards)
                env, step_elapsed = optimizer.run(sess, env, step_elapsed,
                                                  best_rew_mean)
            t += 1

        save_final_test = True
        if save_final_test:
            final_test = {}
            final_test['step_elapsed'] = step_elapsed
            train_set = env.get_seed()
            final_test['train_set_size'] = len(train_set)
            eval_log = eval_test(sess,
                                 nenv,
                                 train_set,
                                 train=True,
                                 is_high=False,
                                 rep_count=1000,
                                 log=True)
            final_test['Train_set'] = eval_log

            eval_log = final_test(sess,
                                  nenv,
                                  None,
                                  train=False,
                                  is_high=True,
                                  rep_count=1000,
                                  log=True)
            final_test['Test_set'] = eval_log
            joblib.dump(final_test, setup_utils.file_to_path('final_test'))

    env.close()