def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.memory = Replay_Buffer(self.hyperparameters["buffer_size"], self.hyperparameters["batch_size"], config.seed)
     self.q_network_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size)
     self.q_network_optimizer = optim.Adam(self.q_network_local.parameters(),
                                           lr=self.hyperparameters["learning_rate"])
     self.exploration_strategy = Epsilon_Greedy_Exploration(config)
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        base_config.no_render_mode = False  ## must be render mode

        self.q_network_local = q_network_2_EYE(n_action=self.get_action_size())
        self.q_network_target = q_network_2_EYE(
            n_action=self.get_action_size())
        self.q_network_optimizer = optim.SGD(
            self.q_network_local.parameters(),
            lr=self.hyperparameters["learning_rate"],
            weight_decay=5e-4)

        self.memory = Replay_Buffer(self.hyperparameters["buffer_size"],
                                    self.hyperparameters["batch_size"],
                                    config.seed)
        self.exploration_strategy = Epsilon_Greedy_Exploration(config)

        if config.backbone_pretrain:
            self.load_pretrain()

        self.copy_model_over(from_model=self.q_network_local,
                             to_model=self.q_network_target)

        self.q_network_local.to(self.q_network_local.device)
        self.q_network_target.to(self.q_network_target.device)
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        self.hyperparameters = config.hyperparameters
        self.critic_local = self.create_NN(input_dim=self.state_size +
                                           self.action_size,
                                           output_dim=1,
                                           key_to_use="Critic")
        self.critic_target = self.create_NN(input_dim=self.state_size +
                                            self.action_size,
                                            output_dim=1,
                                            key_to_use="Critic")
        Base_Agent.copy_model_over(self.critic_local, self.critic_target)

        self.critic_optimizer = optim.Adam(
            self.critic_local.parameters(),
            lr=self.hyperparameters["Critic"]["learning_rate"],
            eps=1e-4)
        self.memory = Replay_Buffer(
            self.hyperparameters["Critic"]["buffer_size"],
            self.hyperparameters["batch_size"], self.config.seed)
        self.actor_local = self.create_NN(input_dim=self.state_size,
                                          output_dim=self.action_size,
                                          key_to_use="Actor")
        self.actor_target = self.create_NN(input_dim=self.state_size,
                                           output_dim=self.action_size,
                                           key_to_use="Actor")
        Base_Agent.copy_model_over(self.actor_local, self.actor_target)

        self.actor_optimizer = optim.Adam(
            self.actor_local.parameters(),
            lr=self.hyperparameters["Actor"]["learning_rate"],
            eps=1e-4)
        self.exploration_strategy = OU_Noise_Exploration(self.config)
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        assert self.action_types == "DISCRETE", "Action types must be discrete. Use SAC instead for continuous actions"
        assert self.config.hyperparameters["Actor"]["final_layer_activation"] == "Softmax", "Final actor layer must be softmax"
        self.hyperparameters = config.hyperparameters
        self.critic_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Critic")
        self.critic_local_2 = self.create_NN(input_dim=self.state_size, output_dim=self.action_size,
                                           key_to_use="Critic", override_seed=self.config.seed + 1)
        self.critic_optimizer = torch.optim.Adam(self.critic_local.parameters(),
                                                 lr=self.hyperparameters["Critic"]["learning_rate"])
        self.critic_optimizer_2 = torch.optim.Adam(self.critic_local_2.parameters(),
                                                   lr=self.hyperparameters["Critic"]["learning_rate"])
        self.critic_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size,
                                           key_to_use="Critic")
        self.critic_target_2 = self.create_NN(input_dim=self.state_size, output_dim=self.action_size,
                                            key_to_use="Critic")
        Base_Agent.copy_model_over(self.critic_local, self.critic_target)
        Base_Agent.copy_model_over(self.critic_local_2, self.critic_target_2)
        self.memory = Replay_Buffer(self.hyperparameters["Critic"]["buffer_size"], self.hyperparameters["batch_size"],
                                    self.config.seed)

        self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor")
        self.actor_optimizer = torch.optim.Adam(self.actor_local.parameters(),
                                          lr=self.hyperparameters["Actor"]["learning_rate"])
        self.automatic_entropy_tuning = self.hyperparameters["automatically_tune_entropy_hyperparameter"]
        if self.automatic_entropy_tuning:
            self.target_entropy = -torch.prod(torch.Tensor(self.environment.action_space.shape).to(self.device)).item() # heuristic value from the paper
            self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
            self.alpha = self.log_alpha.exp()
            self.alpha_optim = Adam([self.log_alpha], lr=self.hyperparameters["Actor"]["learning_rate"])
        else:
            self.alpha = self.hyperparameters["entropy_term_weight"]
        assert not self.hyperparameters["add_extra_noise"], "There is no add extra noise option for the discrete version of SAC at moment"
        self.add_extra_noise = False
        self.do_evaluation_iterations = self.hyperparameters["do_evaluation_iterations"]
 def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.policy_output_size = self.calculate_policy_output_size()
     self.policy_new = self.create_NN(input_dim=self.state_size,
                                      output_dim=self.policy_output_size)
     model_path = self.config.model_path if self.config.model_path else 'Models'
     self.policy_new_path = os.path.join(
         model_path, "{}_policy_new.pt".format(self.agent_name))
     if self.config.load_model: self.locally_load_policy()
     self.policy_old = self.create_NN(input_dim=self.state_size,
                                      output_dim=self.policy_output_size)
     self.policy_old.load_state_dict(
         copy.deepcopy(self.policy_new.state_dict()))
     self.policy_new_optimizer = optim.Adam(
         self.policy_new.parameters(),
         lr=self.hyperparameters["learning_rate"],
         eps=1e-4)
     self.episode_number = 0
     self.many_episode_states = []
     self.many_episode_actions = []
     self.many_episode_rewards = []
     self.experience_generator = Parallel_Experience_Generator(
         self.environment, self.policy_new, self.config.seed,
         self.hyperparameters, self.action_size)
     self.exploration_strategy = Epsilon_Greedy_Exploration(self.config)
예제 #6
0
 def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.policy = self.create_NN(
         input_dim=self.state_size, output_dim=self.action_size)
     self.optimizer = optim.Adam(
         self.policy.parameters(), lr=self.hyperparameters["learning_rate"])
     self.episode_rewards = []
     self.episode_log_probabilities = []
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        self.hyperparameters = config.hyperparameters
        self.critic_local = self.create_NN(input_dim=self.state_size +
                                           self.action_size,
                                           output_dim=1,
                                           key_to_use="Critic")
        self.critic_target = self.create_NN(input_dim=self.state_size +
                                            self.action_size,
                                            output_dim=1,
                                            key_to_use="Critic")
        Base_Agent.copy_model_over(self.critic_local, self.critic_target)

        self.critic_optimizer = optim.Adam(
            self.critic_local.parameters(),
            lr=self.hyperparameters["Critic"]["learning_rate"],
            eps=1e-4)
        self.memory = Replay_Buffer(
            self.hyperparameters["Critic"]["buffer_size"],
            self.hyperparameters["batch_size"], self.config.seed)
        self.actor_local = self.create_NN(input_dim=self.state_size,
                                          output_dim=self.action_size,
                                          key_to_use="Actor")
        self.actor_target = self.create_NN(input_dim=self.state_size,
                                           output_dim=self.action_size,
                                           key_to_use="Actor")
        Base_Agent.copy_model_over(self.actor_local, self.actor_target)

        self.actor_optimizer = optim.Adam(
            self.actor_local.parameters(),
            lr=self.hyperparameters["Actor"]["learning_rate"],
            eps=1e-4)
        self.exploration_strategy = OU_Noise_Exploration(self.config)

        if self.video_mode:
            self.file_name = self.environment_title + "_" + self.agent_name + "_videos"
            for i in range(config.num_episodes_to_run):
                pathset = os.path.join(self.file_name)
                if not (os.path.exists(pathset)):
                    os.mkdir(pathset)
            # f = tables.open_file(self.file_name, mode = 'w')
            # f.close()
            # datainfo = "DDPG_"+ self.environment_title + "_info.txt"
            # f = open(self.file_name, 'w')
            # f.close()
            # f = open(datainfo, 'w')
            # f.write(str(self.height))
            # f.write(str(self.width))
            # f.write(str(self.channel))
            # f.write(str(config.max_step))
            # f.write(str(config.num_episodes_to_run))
            # f.close()
        self.save_max_result_list_list = []
예제 #8
0
 def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.memory = Replay_Buffer(self.hyperparameters["buffer_size"],
                                 self.hyperparameters["batch_size"],
                                 config.seed)
     self.q_network_local = Policy(self.state_size,
                                   self.action_size).to("cuda")
     self.q_network_optimizer = optim.Adam(
         self.q_network_local.parameters(),
         lr=self.hyperparameters["learning_rate"],
         eps=1e-4)
     self.exploration_strategy = Epsilon_Greedy_Exploration(config)
 def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.policy_output_size = self.calculate_policy_output_size()
     self.policy_new = self.create_NN(input_dim=self.state_size, output_dim=self.policy_output_size)
     self.policy_old = self.create_NN(input_dim=self.state_size, output_dim=self.policy_output_size)
     self.policy_old.load_state_dict(copy.deepcopy(self.policy_new.state_dict()))
     self.policy_new_optimizer = optim.Adam(self.policy_new.parameters(), lr=self.hyperparameters["learning_rate"])
     self.episode_number = 0
     self.many_episode_states = []
     self.many_episode_actions = []
     self.many_episode_rewards = []
     self.experience_generator = Parallel_Experience_Generator(self.environment, self.policy_new, self.config.seed,
                                                               self.hyperparameters, self.action_size)
     self.exploration_strategy = Epsilon_Greedy_Exploration(self.config)
    def __init__(self, config, agent_name_=agent_name):
        Base_Agent.__init__(self, config, agent_name=agent_name_)
        self.memory = Replay_Buffer(self.hyperparameters["buffer_size"],
                                    self.hyperparameters["batch_size"],
                                    config.seed, self.device)
        self.q_network_local = self.create_NN(
            input_dim=self.state_size,
            output_dim=self.action_size)  # TODO: Change NN
        self.q_network_optimizer = optim.Adam(
            self.q_network_local.parameters(),
            lr=self.hyperparameters["learning_rate"],
            eps=1e-4)
        self.exploration_strategy = Epsilon_Greedy_Exploration(config)

        self.wandb_watch(self.q_network_local,
                         log_freq=self.config.wandb_model_log_freq)
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        self.controller_config = copy.deepcopy(config)
        self.controller_config.hyperparameters = self.controller_config.hyperparameters[
            "CONTROLLER"]
        self.controller = DDQN(self.controller_config)
        self.controller.q_network_local = self.create_NN(
            input_dim=self.state_size * 2,
            output_dim=self.action_size,
            key_to_use="CONTROLLER")
        self.controller.q_network_target = self.create_NN(
            input_dim=self.state_size * 2,
            output_dim=self.action_size,
            key_to_use="CONTROLLER")

        self.meta_controller_config = copy.deepcopy(config)
        self.meta_controller_config.hyperparameters = self.meta_controller_config.hyperparameters[
            "META_CONTROLLER"]

        # self.meta_controller = DDQN(self.meta_controller_config)
        # self.meta_controller.q_network_local = self.create_NN(input_dim=self.state_size, output_dim=config.environment.observation_space.n,
        #                                                       key_to_use="META_CONTROLLER")
        # self.meta_controller.q_network_target = self.create_NN(input_dim=self.state_size, output_dim=config.environment.observation_space.n,
        #                                                       key_to_use="META_CONTROLLER")

        self.list_meta_controller = [
            DDQN(self.meta_controller_config) for _ in range(5)
        ]
        self.lq_network_local = []
        self.lq_network_target = []
        for m in self.list_meta_controller:
            m.q_network_local = self.create_NN(
                input_dim=self.state_size,
                output_dim=config.environment.observation_space.n,
                key_to_use="META_CONTROLLER")
            self.lq_network_local.append(m.q_network_local)
            m.q_network_target = self.create_NN(
                input_dim=self.state_size,
                output_dim=config.environment.observation_space.n,
                key_to_use="META_CONTROLLER")
            self.lq_network_target.append(m.q_network_target)

        self.rolling_intrinsic_rewards = []
        self.goals_seen = []
        self.controller_learnt_enough = False
        self.controller_actions = []
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        model_path = self.config.model_path if self.config.model_path else 'Models'
        self.memory = Replay_Buffer(self.hyperparameters["buffer_size"],
                                    self.hyperparameters["batch_size"],
                                    config.seed)
        self.q_network_local = self.create_NN(input_dim=self.state_size,
                                              output_dim=self.action_size)
        self.q_network_local_path = os.path.join(
            model_path, "{}_q_network_local.pt".format(self.agent_name))

        if self.config.load_model: self.locally_load_policy()
        self.q_network_optimizer = optim.Adam(
            self.q_network_local.parameters(),
            lr=self.hyperparameters["learning_rate"],
            eps=1e-4)
        self.exploration_strategy = Epsilon_Greedy_Exploration(config)
예제 #13
0
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        assert isinstance(self.environment.reset(), int) or isinstance(self.environment.reset(
        ), np.int64) or self.environment.reset().dtype == np.int64, "only works for discrete states currently"
        self.num_skills = self.hyperparameters["SKILL_AGENT"]["num_skills"]
        self.episodes_for_pretraining = self.hyperparameters[
            "SKILL_AGENT"]["episodes_for_pretraining"]
        self.timesteps_before_changing_skill = self.hyperparameters[
            "MANAGER"]["timesteps_before_changing_skill"]

        self.skill_agent_config = copy.deepcopy(config)
        self.skill_agent_config.hyperparameters = self.skill_agent_config.hyperparameters[
            "SKILL_AGENT"]
        self.skill_agent_config.num_episodes_to_run = self.episodes_for_pretraining

        self.manager_config = copy.deepcopy(config)
        self.manager_config.hyperparameters = self.manager_config.hyperparameters["MANAGER"]
        self.manager_config.num_episodes_to_run = self.config.num_episodes_to_run - \
            self.skill_agent_config.num_episodes_to_run
    def __init__(self, config):
        Base_Agent.__init__(self, config)
        self.hyperparameters = config.hyperparameters
        self.critic_local = self.create_NN(input_dim=self.state_size +
                                           self.action_size,
                                           output_dim=1,
                                           key_to_use="Critic")
        self.actor_local = self.create_NN(input_dim=self.state_size,
                                          output_dim=self.action_size,
                                          key_to_use="Actor")
        self.critic_target = self.create_NN(input_dim=self.state_size +
                                            self.action_size,
                                            output_dim=1,
                                            key_to_use="Critic")
        model_path = self.config.model_path if self.config.model_path else 'Models'
        self.critic_local_path = os.path.join(
            model_path, "{}_critic_local.pt".format(self.agent_name))
        self.critic_local_2_path = os.path.join(
            model_path, "{}_critic_local_2.pt".format(self.agent_name))
        self.actor_local_path = os.path.join(
            model_path, "{}_actor_local.pt".format(self.agent_name))
        if self.config.load_model: self.locally_load_policy()
        Base_Agent.copy_model_over(self.critic_local, self.critic_target)

        self.critic_optimizer = optim.Adam(
            self.critic_local.parameters(),
            lr=self.hyperparameters["Critic"]["learning_rate"],
            eps=1e-4)
        self.memory = Replay_Buffer(
            self.hyperparameters["Critic"]["buffer_size"],
            self.hyperparameters["batch_size"], self.config.seed)
        self.actor_target = self.create_NN(input_dim=self.state_size,
                                           output_dim=self.action_size,
                                           key_to_use="Actor")

        if self.config.load_model: self.locally_load_policy()
        Base_Agent.copy_model_over(self.actor_local, self.actor_target)

        self.actor_optimizer = optim.Adam(
            self.actor_local.parameters(),
            lr=self.hyperparameters["Actor"]["learning_rate"],
            eps=1e-4)
        self.exploration_strategy = OU_Noise_Exploration(self.config)
    def __init__(self, config, agent_name_=agent_name):
        Base_Agent.__init__(self, config, agent_name_=agent_name_)
        assert self.action_types == "CONTINUOUS", "Action types must be continuous. Use SAC Discrete instead for discrete actions"
        assert self.config.hyperparameters["Actor"]["final_layer_activation"] != "Softmax", "Final actor layer must not be softmax"
        self.hyperparameters = config.hyperparameters
        self.critic_local = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic")
        self.critic_local_2 = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1,
                                           key_to_use="Critic", override_seed=self.config.seed + 1)
        self.critic_optimizer = torch.optim.Adam(self.critic_local.parameters(),
                                                 lr=self.hyperparameters["Critic"]["learning_rate"], eps=1e-4)
        self.critic_optimizer_2 = torch.optim.Adam(self.critic_local_2.parameters(),
                                                   lr=self.hyperparameters["Critic"]["learning_rate"], eps=1e-4)
        self.critic_target = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1,
                                           key_to_use="Critic")
        self.critic_target_2 = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1,
                                            key_to_use="Critic")
        Base_Agent.copy_model_over(self.critic_local, self.critic_target)
        Base_Agent.copy_model_over(self.critic_local_2, self.critic_target_2)
        self.memory = Replay_Buffer(self.hyperparameters["Critic"]["buffer_size"], self.hyperparameters["batch_size"],
                                    self.config.seed)
        self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size * 2, key_to_use="Actor")
        self.actor_optimizer = torch.optim.Adam(self.actor_local.parameters(),
                                          lr=self.hyperparameters["Actor"]["learning_rate"], eps=1e-4)
        self.automatic_entropy_tuning = self.hyperparameters["automatically_tune_entropy_hyperparameter"]
        if self.automatic_entropy_tuning:
            self.target_entropy = -torch.prod(torch.Tensor(self.environment.action_space.shape).to(self.device)).item() # heuristic value from the paper
            self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
            self.alpha = self.log_alpha.exp()
            self.alpha_optim = Adam([self.log_alpha], lr=self.hyperparameters["Actor"]["learning_rate"], eps=1e-4)
        else:
            self.alpha = self.hyperparameters["entropy_term_weight"]

        self.add_extra_noise = self.hyperparameters["add_extra_noise"]
        if self.add_extra_noise:
            self.noise = OU_Noise(self.action_size, self.config.seed, self.hyperparameters["mu"],
                                  self.hyperparameters["theta"], self.hyperparameters["sigma"])

        self.do_evaluation_iterations = self.hyperparameters["do_evaluation_iterations"]

        self.wandb_watch(self.actor_local, log_freq=self.config.wandb_model_log_freq)
예제 #16
0
 def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.controller_config = copy.deepcopy(config)
     self.controller_config.hyperparameters = self.controller_config.hyperparameters[
         "CONTROLLER"]
     self.controller = DDQN(self.controller_config)
     self.controller.q_network_local = self.create_NN(
         input_dim=self.state_size * 2,
         output_dim=self.action_size,
         key_to_use="CONTROLLER")
     self.meta_controller_config = copy.deepcopy(config)
     self.meta_controller_config.hyperparameters = self.meta_controller_config.hyperparameters[
         "META_CONTROLLER"]
     self.meta_controller = DDQN(self.meta_controller_config)
     self.meta_controller.q_network_local = self.create_NN(
         input_dim=self.state_size,
         output_dim=config.environment.observation_space.n,
         key_to_use="META_CONTROLLER")
     self.rolling_intrinsic_rewards = []
     self.goals_seen = []
     self.controller_learnt_enough = False
     self.controller_actions = []
    def __init__(self, config, agent_name_=agent_name):
        Base_Agent.__init__(self, config, agent_name=agent_name_)

        self.memory = Replay_Buffer(self.hyperparameters["buffer_size"],
                                    self.hyperparameters["batch_size"],
                                    config.seed, self.device)

        # If model is not provided, create one. TODO Add this mechanism to all agents.
        if not "model" in self.hyperparameters or self.hyperparameters[
                "model"] is None:
            self.q_network_local = self.create_NN(input_dim=self.state_size,
                                                  output_dim=self.action_size)
        else:
            self.q_network_local = self.hyperparameters["model"]

        self.wandb_watch(self.q_network_local,
                         log_freq=self.config.wandb_model_log_freq)

        self.q_network_optimizer = optim.Adam(
            self.q_network_local.parameters(),
            lr=self.hyperparameters["learning_rate"],
            eps=1e-4)
        self.exploration_strategy = Epsilon_Greedy_Exploration(config)
예제 #18
0
 def __init__(self, config):
     Base_Agent.__init__(self, config)
     self.agent_dic = self.create_agent_dic()
     self.exploration_strategy = Epsilon_Greedy_Exploration(config)
예제 #19
0
    def __init__(self, config, agent_name_=agent_name):
        Base_Agent.__init__(self, config, agent_name_=agent_name_)
        assert self.action_types == "DISCRETE", "Action types must be discrete. Use SAC instead for continuous actions"
        assert self.config.hyperparameters["Actor"][
            "final_layer_activation"] == "Softmax", "Final actor layer must be softmax"
        self.hyperparameters = config.hyperparameters
        self.critic_local = self.create_NN(input_dim=self.state_size,
                                           output_dim=self.action_size,
                                           key_to_use="Critic")
        self.critic_local_2 = self.create_NN(input_dim=self.state_size,
                                             output_dim=self.action_size,
                                             key_to_use="Critic",
                                             override_seed=self.config.seed +
                                             1)
        self.critic_optimizer = torch.optim.Adam(
            self.critic_local.parameters(),
            lr=self.hyperparameters["Critic"]["learning_rate"],
            eps=1e-4)
        self.critic_optimizer_2 = torch.optim.Adam(
            self.critic_local_2.parameters(),
            lr=self.hyperparameters["Critic"]["learning_rate"],
            eps=1e-4)
        self.critic_target = self.create_NN(input_dim=self.state_size,
                                            output_dim=self.action_size,
                                            key_to_use="Critic")
        self.critic_target_2 = self.create_NN(input_dim=self.state_size,
                                              output_dim=self.action_size,
                                              key_to_use="Critic")
        Base_Agent.copy_model_over(self.critic_local, self.critic_target)
        Base_Agent.copy_model_over(self.critic_local_2, self.critic_target_2)
        self.memory = Replay_Buffer(
            self.hyperparameters["Critic"]["buffer_size"],
            self.hyperparameters["batch_size"],
            self.config.seed,
            device=self.device)

        self.actor_local = self.create_NN(input_dim=self.state_size,
                                          output_dim=self.action_size,
                                          key_to_use="Actor")
        self.actor_optimizer = torch.optim.Adam(
            self.actor_local.parameters(),
            lr=self.hyperparameters["Actor"]["learning_rate"],
            eps=1e-4)
        self.automatic_entropy_tuning = self.hyperparameters[
            "automatically_tune_entropy_hyperparameter"]
        if self.automatic_entropy_tuning:
            # we set the max possible entropy as the target entropy
            self.target_entropy = -np.log((1.0 / self.action_size)) * 0.98
            self.log_alpha = torch.zeros(1,
                                         requires_grad=True,
                                         device=self.device)
            self.alpha = self.log_alpha.exp()
            self.alpha_optim = Adam(
                [self.log_alpha],
                lr=self.hyperparameters["Actor"]["learning_rate"],
                eps=1e-4)
        else:
            self.alpha = self.hyperparameters["entropy_term_weight"]
        assert not self.hyperparameters[
            "add_extra_noise"], "There is no add extra noise option for the discrete version of SAC at moment"
        self.add_extra_noise = False
        self.do_evaluation_iterations = self.hyperparameters[
            "do_evaluation_iterations"]

        self.wandb_watch(self.actor_local,
                         log_freq=self.config.wandb_model_log_freq)