示例#1
0
            epoche_actions.append(action_onehot)

            if done:
                episode_rewards_sum = sum(epoche_rewards)
                max_reward = max(episode_rewards_sum, max_reward)

                if episode_rewards_sum > 0:
                    suc_count += 1

                print("-----------------------")
                print("Episode: ", epoch)
                print("Reward: ", episode_rewards_sum)
                print("Max reward during train: ", max_reward)
                print("-----------------------")
                epoche_rewards = model.calc_reward(epoche_rewards)
                replBuffer.append(epoche_observations, epoche_actions,
                                  epoche_rewards)

                model.fit(epoche_observations, epoche_actions, epoche_rewards,
                          replBuffer)

                epoche_observations = []
                epoche_actions = []
                epoche_rewards = []

                training_version = load_version + (
                    epochs_count - current_epoch) // save_period

                save_path = "res/{}/{}/LunarLander-v2.ckpt".format(
                    train_model_name, training_version)