Example #1
0
def main(log_dir, easy, n_steps=450):
    exp_dir, seed_offset = get_exp_dir(
        os.path.join(log_dir, "reacher-obstacle-default" + ("-easy" if easy else "") + "-ppo"))
    print("Seed offset: " + str(seed_offset))

    log_path = os.path.join(exp_dir, "ppo-reach-avoid.log")
    avg_log_path = exp_dir
    if not os.path.exists(log_path):
        n_envs = 8
        env = VecNormalize(
            SubprocVecEnv([create_env_fn(seed_offset * n_envs + i, easy=easy) for i in range(0, n_envs)]),
            gamma=0.999)
        model = PPO2(policy='MlpPolicy', env=env, n_steps=n_steps, nminibatches=5, verbose=1, gamma=0.999,
                     noptepochs=15, ent_coef=1e-3, lam=1, policy_kwargs=dict(layers=[164, 164]))

        average_rewards = []

        def log_callback(local_vars, global_vars):
            avg_r = np.mean([ep_info['r'] for ep_info in local_vars["ep_info_buf"]])
            average_rewards.append(avg_r)
            return True

        # 3067500 = 409 iterations (400 + 9 for buffer initialization) * 50 trajectories * 150 timesteps
        model.learn(3067500, seed=seed_offset, callback=log_callback)
        model.save(log_path)
        env.save_running_average(avg_log_path)
        np.save(os.path.join(exp_dir, "rewards.npy"), np.array(average_rewards))
Example #2
0
class PPO2_SB():
    def __init__(self):
        self.love = 'Ramona'
        self.env_fns = [] 
        self.env_names = []
    
    def make_env(self, env_id, rank, seed=0):
        """
        Utility function for multiprocessed env.
    
        :param env_id: (str) the environment ID
        :param num_env: (int) the number of environment you wish to have in subprocesses
        :param seed: (int) the inital seed for RNG
        :param rank: (int) index of the subprocess
        """
        def _init():
            env = Template_Gym()
            env.seed(seed + rank)
            return env
        set_global_seeds(seed)
        return _init
    

    def train(self, num_e=1, n_timesteps=1000000, save_fraction=0.1, save='saves/aud5'):
        env_id = "default"
        num_e = 1  # Number of processes to use
        # Create the vectorized environment
        #env = DummyVecEnv([lambda: env])
        #Ramona
        self.env = SubprocVecEnv([self.make_env(env_id, i) for i in range(num_e)])
        #env = Template_Gym()
        #self.env = DummyVecEnv([lambda: env])
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        self.model = PPO2(CustomPolicy_4, self.env, verbose=0, learning_rate=1e-4, nminibatches=1, tensorboard_log="./day1" )
        
        
        #self.model = PPO2.load("default9", self.env, policy=CustomPolicy, tensorboard_log="./test/" )
        n_timesteps = n_timesteps * save_fraction
        n_timesteps = int(n_timesteps)
        training_loop = 1 / save_fraction
        training_loop = int(training_loop)
        log_dir = "saves"
        for i in range(training_loop):
            self.model.learn(n_timesteps)
            self.model.save(save+str(i))
            self.env.save_running_average(log_dir)
        self.env.save_running_average(log_dir)
    
    
    def evaluate(self, num_env=1, num_steps=21900, load="saves/aud5", runs=10):
        """
        Evaluate a RL agent
        :param model: (BaseRLModel object) the RL Agent
        :param num_steps: (int) number of timesteps to evaluate it
        :return: (float) Mean reward
        """
        env_id = 'default'
        num_e = 1
        log_dir = "saves"
        self.env = SubprocVecEnv([self.make_env(env_id, i) for i in range(num_env)])
        #self.model = PPO2(CustomPolicy, self.env, verbose=1, learning_rate=1e-5, tensorboard_log="./default" )
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        self.env.load_running_average(log_dir)
        for i in range(runs):
            self.model = PPO2.load(load+str(i), self.env, policy=CustomPolicy_4,  tensorboard_log="./default/" )
            self.env.load_running_average(log_dir)
            episode_rewards = [[0.0] for _ in range(self.env.num_envs)]
            #self.total_pips = []
            obs = self.env.reset()
            state = None
            # When using VecEnv, done is a vector
            done = [False for _ in range(env.num_envs)]
            for i in range(num_steps):
                # _states are only useful when using LSTM policies
                action, state = self.model.predict(obs, state=state, mask=done, deterministic=True)
                obs, rewards , dones, _ = self.env.step(action)
                #actions, _states = self.model.predict(obs)
                # # here, action, rewards and dones are arrays
                 # # because we are using vectorized env
                #obs, rewards, dones, info = self.env.step(actions)
                #self.total_pips.append(self.env.player.placement)
      
        # Stats
                for i in range(self.env.num_envs):
                    episode_rewards[i][-1] += rewards[i]
                    if dones[i]:
                        episode_rewards[i].append(0.0)

            mean_rewards =  [0.0 for _ in range(self.env.num_envs)]
            n_episodes = 0
            for i in range(self.env.num_envs):
                mean_rewards[i] = np.mean(episode_rewards[i])     
                n_episodes += len(episode_rewards[i])   

        # Compute mean reward
            mean_reward = np.mean(mean_rewards)
            print("Mean reward:", mean_reward, "Num episodes:", n_episodes)

        return mean_reward

    def pre_train(self, num_e=1, load="saves/m19"):
        env_id = 'default'
        num_e = 1
        log_dir = "saves"
        # Usingenv = make_env() only one expert trajectory
        # you can specify `traj_limitation=-1` for using the whole dataset
        dataset = ExpertDataset(expert_path='default2.npz',traj_limitation=1, batch_size=128)
        self.env = SubprocVecEnv([self.make_env(env_id, i) for i in range(num_e)])
        #env = Template_Gym()
        #self.env = DummyVecEnv([lambda: env])
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        #env = make_env()
        #model = GAIL("MlpPolicy", env=env, expert_dataset=dataset, verbose=1)
        self.env.load_running_average("saves")
        self.model = PPO2(CustomPolicy, self.env, verbose=1, nminibatches=1,  learning_rate=1e-5, tensorboard_log="./m1ln4" )
        #self.model = PPO2.load("saves/m19", self.env, policy=CustomPolicy, tensorboard_log="./default/" )
        self.env.load_running_average("saves")
        # Pretrain the PPO2 model
        self.model.pretrain(dataset, n_epochs=10000)

        # As an option, you can train the RL agent
        #self.model.learn(int(100000000))

        # Test the pre-trained model
        self.env = self.model.get_env()
        self.env.load_running_average("saves")
        obs = self.env.reset()

        reward_sum = 0.0
        for _ in range(1000000):
            action, _ = self.model.predict(obs)
            obs, reward, done, _ = self.env.step(action)
            reward_sum += reward
            #self.env.render()
            if done:
                print(reward_sum)
                reward_sum = 0.0
                obs = self.env.reset()

        self.env.close()

    def gen_pre_train(self, num_e=1, save='default2', episodes=1000):
        #self.create_envs(game_name=game, state_name=state, num_env=num_e)
        #self.env=SubprocVecEnv(self.env_fns)
        env_id = 'default'
        num_e = 1
        self.env = SubprocVecEnv([self.make_env(env_id, i) for i in range(num_e)])
        #env = Template_Gym()
        #self.env = DummyVecEnv([lambda: env])
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        #env = make_env()
        #model = GAIL("MlpPolicy", env=env, expert_dataset=dataset, verbose=1)
        self.env.load_running_average("saves")
        self.model = PPO2.load("saves/m19", self.env, policy=CustomPolicy, tensorboard_log="./default/" )
        self.env.load_running_average("saves")
        #env = make_env()
        #self.expert_agent = 
        generate_expert_traj(self.model, save, self.env, n_episodes=episodes)
Example #3
0
class Optimization():
    def __init__(self, config):

        self.reward_strategy = 'sortino2'
        #self.input_data_file = 'data/coinbase_hourly.csv'
        self.params_db_file = 'sqlite:///params.db'

        # number of parallel jobs
        self.n_jobs = 1
        # maximum number of trials for finding the best hyperparams
        self.n_trials = 1000
        #number of test episodes per trial
        self.n_test_episodes = 10
        # number of evaluations for pruning per trial
        self.n_evaluations = 10
        self.config = config

        #self.df = pd.read_csv(input_data_file)
        #self.df = df.drop(['Symbol'], axis=1)
        #self.df = df.sort_values(['Date'])
        #self.df = add_indicators(df.reset_index())

        #self.train_len = int(len(df) * 0.8)

        #self.df = df[:train_len]

        #self.validation_len = int(train_len * 0.8)
        #self.train_df = df[:validation_len]
        #self.test_df = df[validation_len:]

    def make_env(self, env_id, rank, seed=0, eval=False,config=pc.configeurcad4h):
        """
        Utility function for multiprocessed env.
    
        :param env_id: (str) the environment ID
        :param num_env: (int) the number of environment you wish to have in subprocesses
        :param seed: (int) the inital seed for RNG
        :param rank: (int) index of the subprocess
        """
        def _init():
            self.config = config
            self.eval= eval
            env = Template_Gym(config=self.config, eval=self.eval)
            env.seed(seed + rank)
            return env
        set_global_seeds(seed)
        return _init
    
    # Categorical parameter
    #optimizer = trial.suggest_categorical('optimizer', ['MomentumSGD', 'Adam'])

    # Int parameter
    #num_layers = trial.suggest_int('num_layers', 1, 3)

    # Uniform parameter
    #dropout_rate = trial.suggest_uniform('dropout_rate', 0.0, 1.0)

    # Loguniform parameter
    #learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1e-2)

    # Discrete-uniform parameter
    #drop_path_rate = trial.suggest_discrete_uniform('drop_path_rate', 0.0, 1.0, 0.1)
    def optimize_envs(self, trial):
        return {
            'reward_func': self.reward_strategy,
            'forecast_len': int(trial.suggest_loguniform('forecast_len', 1, 200)),
            'confidence_interval': trial.suggest_uniform('confidence_interval', 0.7, 0.99),
        }

    def optimize_config(self, trial):
        return {
            'sl': trial.suggest_loguniform('sl', 1.0, 10.0),
            'tp': trial.suggest_loguniform('tp', 1.0 ,10.0)
            
        }

    def optimize_ppo2(self,trial):
        return {
            'n_steps': int(trial.suggest_int('n_steps', 16, 2048)),
            'gamma': trial.suggest_loguniform('gamma', 0.9, 0.9999),
            'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1.),
            'ent_coef': trial.suggest_loguniform('ent_coef', 1e-8, 1e-1),
            'cliprange': trial.suggest_uniform('cliprange', 0.1, 0.4),
            'noptepochs': int(trial.suggest_int('noptepochs', 1, 48)),
            'lam': trial.suggest_uniform('lam', 0.8, 1.)
        }

    def optimize_lstm(self, trial):
        return {
            'lstm': trial.suggest_categorical('optimizer', ['lstm', 'mlp'])
            
        }
    def ob_types(self, trial):
        return {
            'lstm': trial.suggest_categorical('optimizer', ['lstm', 'mlp'])
            
        }


    def optimize_agent(self,trial):
        #self.env_params = self.optimize_envs(trial)
        env_id = "default"+str()
        num_e = 1  # Number of processes to use
        #self.config_param = self.optimize_config(trial)
        #self.config.sl = self.config_param['sl']
        #self.config.sl = self.config_param['tp']
        #self.model_type = self.optimize_lstm(trial)
        #self.model_type = self.model_type['lstm']
        self.model_type = "mlp"
        if self.model_type == 'mlp':
            self.policy = CustomPolicy_5
        else:
             self.policy = CustomPolicy_4 
        self.train_env = SubprocVecEnv([self.make_env(env_id+str('train'), i, eval=False, config=self.config) for i in range(num_e)])
        #self.train_env = SubprocVecEnv([self.make_env(env_id, i, eval=False) for i in range(num_e)])
        self.train_env = VecNormalize(self.train_env, norm_obs=True, norm_reward=True)
        self.test_env =SubprocVecEnv([self.make_env(env_id+str("test"), i, eval=True, config=self.config) for i in range(num_e)])
        #self.test_env = SubprocVecEnv([self.make_env(env_id, i, eval=True) for i in range(num_e)])
        self.test_env = VecNormalize(self.test_env, norm_obs=True, norm_reward=True)
        try:
            self.test_env.load_running_average("saves")
            self.train_env.load_running_average("saves")
        except:
            print('cant load')
        self.model_params = self.optimize_ppo2(trial)
        self.model = PPO2(self.policy, self.train_env, verbose=0, nminibatches=1, tensorboard_log="./gbp_chf_single", **self.model_params )
        #self.model = PPO2(CustomPolicy_2, self.env, verbose=0, learning_rate=1e-4, nminibatches=1, tensorboard_log="./min1" )

        last_reward = -np.finfo(np.float16).max
        #evaluation_interval = int(len(train_df) / self.n_evaluations)
        evaluation_interval = 36525

        for eval_idx in range(self.n_evaluations):
            try:
                self.model.learn(evaluation_interval)
                self.test_env.save_running_average("saves")
                self.train_env.save_running_average("saves")
            except:
                print('did not work')

            rewards = []
            n_episodes, reward_sum = 0, 0.0
            print('Eval')
            obs = self.test_env.reset()
            #state = None
            #done = [False for _ in range(self.env.num_envs)]
            while n_episodes < self.n_test_episodes:
                action, _ = self.model.predict(obs, deterministic=True)
                obs, reward, done, _ = self.test_env.step(action)
                reward_sum += reward

                if done:
                    rewards.append(reward_sum)
                    reward_sum = 0.0
                    n_episodes += 1
                    obs = self.test_env.reset()

            last_reward = np.mean(rewards)
            trial.report(-1 * last_reward, eval_idx)

            if trial.should_prune(eval_idx):
                raise optuna.structs.TrialPruned()

        return -1 * last_reward


    def optimize(self, config):
        self.config = config
        study_name = 'ppo2_single_ready'
        study_name = 'ppo2_single_ready_nosltp'
        study_name = 'ppo2_single_ready_nosltp_all_yeah'
        study_name = 'ppo2_eur_gbp_op'
        study_name = 'ppo2_gbp_chf_op'
        study_name = 'ppo2_gbp_chf_h1_new1'
        study_name = 'ppo2_gbp_chf_h4_r_new11'
        study_name = 'ppo2_gbp_chf_h4_r_withvolfixed'
        study_name = 'ppo2_gbp_chf_h4_r_withvolclosefix212'
        study_name = 'ppo2_gbp_chf_h4_loged_sortinonew'
        study = optuna.create_study(
            study_name=study_name, storage=self.params_db_file, load_if_exists=True)

        try:
            study.optimize(self.optimize_agent, n_trials=self.n_trials, n_jobs=self.n_jobs)
        except KeyboardInterrupt:
            pass

        print('Number of finished trials: ', len(study.trials))

        print('Best trial:')
        trial = study.best_trial

        print('Value: ', trial.value)

        print('Params: ')
        for key, value in trial.params.items():
            print('    {}: {}'.format(key, value))

        return study.trials_dataframe()


#if __name__ == '__main__':
    #optimize()
Example #4
0
class PPO2_SB():
    def __init__(self):
        self.love = 'Ramona'
        self.env_fns = [] 
        self.env_names = []
    
    def make_env(self, env_id, rank, seed=0, eval=False,config=pc.configeurcad4h):
        """
        Utility function for multiprocessed env.
    
        :param env_id: (str) the environment ID
        :param num_env: (int) the number of environment you wish to have in subprocesses
        :param seed: (int) the inital seed for RNG
        :param rank: (int) index of the subprocess
        """
        def _init():
            self.config = config
            self.eval= eval
            env = Template_Gym(config=self.config, eval=self.eval)
            env.seed(seed + rank)
            return env
        set_global_seeds(seed)
        return _init
    
    def train(self, num_e=1, n_timesteps=1000000, save_fraction=0.0125, save='saves/audbuyh4120', config=pc.configgbpchf4h):
        env_id = "default"
        num_e = 1  # Number of processes to use
        # Create the vectorized environment
        #env = DummyVecEnv([lambda: env])
        #Ramona
        self.config = config
        self.env = SubprocVecEnv([self.make_env(env_id, i, eval=False, config=self.config) for i in range(num_e)])
        #env = Template_Gym()
        #self.env = DummyVecEnv([lambda: env])
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        #self.model = PPO2(CustomPolicy_4, self.env, verbose=0, nminibatches=1, tensorboard_log="./gbp_chf_4h_r", **self.config.params )
        #self.model = PPO2(CustomPolicy_5, self.env, verbose=0, nminibatches=1, tensorboard_log="./aud_chf", learning_rate=1e-5  )#**self.config.params
        #self.model = PPO2.load('saves/playerdetails39', self.env, policy=CustomPolicy,  tensorboard_log="./playerdetailsex" )
        self.model = PPO2.load(self.config.path+str(79)+'.pkl', self.env, policy=CustomPolicy_5,  tensorboard_log="./default/" )
        #self.model = PPO2.load("default9", self.env, policy=CustomPolicy, tensorboard_log="./test/" )
        n_timesteps = n_timesteps * save_fraction
        n_timesteps = int(n_timesteps)
        training_loop = 1 / save_fraction
        training_loop = int(training_loop)
        log_dir = "saves"
        #self.env.load_running_average(log_dir)
        for i in range(training_loop):
            self.model.learn(n_timesteps)
            self.model.save(self.config.path+'8'+str(i))
            self.env.save_running_average(log_dir)
        self.env.save_running_average(log_dir)
    
    
    def evaluate(self, num_env=1, num_steps=1461, load='saves/audbuyh1', runs=80, config=pc.configgbpchf4h):
        """
        Evaluate a RL agent
        :param model: (BaseRLModel object) the RL Agent
        :param num_steps: (int) number of timesteps to evaluate it
        :return: (float) Mean reward
        """
        env_id = config.year+config.pair
        num_e = 1
        self.config = config
        log_dir = self.config.log
        
        #log_dir = self.config.norm
        #self.env = SubprocVecEnv([self.make_env(env_id, i, eval=True) for i in range(num_env)])
        self.env = SubprocVecEnv([self.make_env(env_id, i, eval=True, config=self.config) for i in range(num_env)])
        #self.model = PPO2(CustomPolicy, self.env, verbose=1, learning_rate=1e-5, tensorboard_log="./default" )
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        try:
            self.env.load_running_average(log_dir)
        except:
            print('cant load')
        for i in range(runs):
            #self.model = PPO2(CustomPolicy, self.env, verbose=0, learning_rate=1e-5, tensorboard_log="./moose14" )
            #self.model = PPO2.load(self.config.path, self.env, policy=CustomPolicy_2,  tensorboard_log="./default/" )
            self.model = PPO2.load(self.config.path+'8'+str(i)+'.pkl', self.env, policy=CustomPolicy_5,  tensorboard_log="./default/" )
            #self.env.load_running_average(log_dir)
            episode_rewards = [[0.0] for _ in range(self.env.num_envs)]
            #self.total_pips = []
            obs = self.env.reset()
            state = None
            # When using VecEnv, done is a vector
            done = [False for _ in range(self.env.num_envs)]
            for i in range(num_steps):
                # _states are only useful when using LSTM policies
                action, state = self.model.predict(obs, state=state, mask=done, deterministic=True)
                obs, rewards , dones, _ = self.env.step(action)
                #actions, _states = self.model.predict(obs)
                # # here, action, rewards and dones are arrays
                 # # because we are using vectorized env
                #obs, rewards, dones, info = self.env.step(actions)
                #self.total_pips.append(self.env.player.placement)
        
        # Stats
                for i in range(self.env.num_envs):
                    episode_rewards[i][-1] += rewards[i]
                    if dones[i]:
                        episode_rewards[i].append(0.0)
            #self.env.save_running_average(log_dir)
            mean_rewards =  [0.0 for _ in range(self.env.num_envs)]
            n_episodes = 0
            for i in range(self.env.num_envs):
                mean_rewards[i] = np.mean(episode_rewards[i])     
                n_episodes += len(episode_rewards[i])   

        # Compute mean reward
            mean_reward = np.mean(mean_rewards)
            print("Mean reward:", mean_reward, "Num episodes:", n_episodes)
            #self.env.save(log_dir)

        return mean_reward

    def live(self, num_env=1, num_steps=1461, load='saves/gbp_usd_buy', runs=1, config=pc.configgbpcad4h):
        """
        Evaluate a RL agent
        :param model: (BaseRLModel object) the RL Agent
        :param num_steps: (int) number of timesteps to evaluate it
        :return: (float) Mean reward
        """
        self.config = config
        env_id = self.config.pair
        num_e = 1
        log_dir = self.config.log
       
        self.config.live = True
        self.config.load = False

        
        self.env = SubprocVecEnv([self.make_env(env_id, i, eval=True, config=self.config) for i in range(num_env)])
        
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        try:
            self.env.load_running_average(self.config.log)
        except:
            print('cant load')
            
        self.env.num_envs = 1
        for i in range(runs):
            self.model = PPO2.load(self.config.path+str(self.config.best)+'.pkl', self.env, policy=CustomPolicy_5,  tensorboard_log="./default/" )
            
            episode_rewards = [[0.0] for _ in range(self.env.num_envs)]
            
            print(datetime.datetime.now())
            print(time.ctime())
            print('Market Check')
            

            print("Market time check")
        
            obs = self.env.reset()
            
            state = None
            
            # When using VecEnv, done is a vector
            done = [False for _ in range(self.env.num_envs)]
            
            for i in range(num_steps):

                
                # _states are only useful when using LSTM policies
                print("live step")
                action, state = self.model.predict(obs, state=state, mask=done, deterministic=True)
                obs, rewards , dones, _ = self.env.step(action)
        
                # # here, action, rewards and dones are arrays
                 # # because we are using vectorized env
                #obs, rewards, dones, info = self.env.step(actions)
                #self.total_pips.append(self.env.player.placement)
      
        # Stats
                for i in range(self.env.num_envs):
                    episode_rewards[i][-1] += rewards[i]
                    if dones[i]:
                        episode_rewards[i].append(0.0)
                    

                
                
                print(datetime.datetime.now())
                print(time.ctime())
                
                

            mean_rewards =  [0.0 for _ in range(self.env.num_envs)]
            n_episodes = 0
            

            for i in range(self.env.num_envs):
                mean_rewards[i] = np.mean(episode_rewards[i])     
                n_episodes += len(episode_rewards[i])  
                

        # Compute mean reward
            mean_reward = np.mean(mean_rewards)
            print("Mean reward_gbp_buy:", mean_reward, "Num episodes:", n_episodes)
            

        return mean_reward

    # The two function below are not working atm
    def pre_train(self, num_e=1, load="saves/m19"):
        env_id = 'default'
        num_e = 1
        log_dir = "saves"
        # Usingenv = make_env() only one expert trajectory
        # you can specify `traj_limitation=-1` for using the whole dataset
        dataset = ExpertDataset(expert_path='default2.npz',traj_limitation=1, batch_size=128)
        self.env = SubprocVecEnv([self.make_env(env_id, i) for i in range(num_e)])
        #env = Template_Gym()
        #self.env = DummyVecEnv([lambda: env])
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        #env = make_env()
        #model = GAIL("MlpPolicy", env=env, expert_dataset=dataset, verbose=1)
        self.env.save_running_average("saves"+self.config.pair)
        self.model = PPO2(CustomPolicy, self.env, verbose=1, nminibatches=1,  learning_rate=1e-5, tensorboard_log="./m1ln4" )
        #self.model = PPO2.load("saves/m19", self.env, policy=CustomPolicy, tensorboard_log="./default/" )
        self.env.save_running_average("saves"+self.config.pair)
        # Pretrain the PPO2 model
        self.model.pretrain(dataset, n_epochs=10000)

        # As an option, you can train the RL agent
        #self.model.learn(int(100000000))

        # Test the pre-trained model
        self.env = self.model.get_env()
        self.env.save_running_average("saves"+self.config.pair)
        obs = self.env.reset()

        reward_sum = 0.0
        for _ in range(1000000):
            action, _ = self.model.predict(obs)
            obs, reward, done, _ = self.env.step(action)
            reward_sum += reward
            #self.env.render()
            if done:
                print(reward_sum)
                reward_sum = 0.0
                obs = self.env.reset()

        self.env.close()

    def gen_pre_train(self, num_e=1, save='default2', episodes=1000):
        #self.create_envs(game_name=game, state_name=state, num_env=num_e)
        #self.env=SubprocVecEnv(self.env_fns)
        env_id = 'default'
        num_e = 1
        self.env = SubprocVecEnv([self.make_env(env_id, i) for i in range(num_e)])
        #env = Template_Gym()
        #self.env = DummyVecEnv([lambda: env])
        self.env = VecNormalize(self.env, norm_obs=True, norm_reward=True)
        #env = make_env()
        #model = GAIL("MlpPolicy", env=env, expert_dataset=dataset, verbose=1)
        self.env.load_running_average("saves")
        self.model = PPO2.load("saves/m19", self.env, policy=CustomPolicy, tensorboard_log="./default/" )
        self.env.load_running_average("saves")
        #env = make_env()
        #self.expert_agent = 
        generate_expert_traj(self.model, save, self.env, n_episodes=episodes)


    def gen_pre_train_2(self, game, state, num_e=1, save='default2', episodes=10):
        self.create_envs(game_name=game, state_name=state, num_env=num_e)
        env=SubprocVecEnv(self.env_fns)
        self.expert_agent = "moose"
        self.generate_expert_traj(self.expert_agent, save, env, n_episodes=episodes)