Beispiel #1
0
    def __init__(
        self,
        model,
        buffer_size=1000,
        learning_rate=1e-3,
        discount_factor=0.99,
        gae_lamda=1,  # mc
        verbose=False,
        num_episodes=1000,
    ):
        super().__init__()
        self.lr = learning_rate
        self.end_lr = self.lr * 0.1
        self.eps = np.finfo(np.float32).eps.item()

        self._gamma = discount_factor
        self._gae_lamda = gae_lamda  # default: 1, MC
        self._learn_cnt = 0
        self._verbose = verbose
        self.schedule_adam = True
        self.buffer = ReplayBuffer(buffer_size, replay=False)

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)

        self.criterion = nn.SmoothL1Loss()
        self.num_episodes = num_episodes
Beispiel #2
0
    def __init__(self,
                 actor_net,
                 critic_net,
                 buffer_size=1000,
                 actor_learn_freq=1,
                 target_update_freq=0,
                 target_update_tau=5e-3,
                 learning_rate=0.0001,
                 discount_factor=0.99,
                 batch_size=100,
                 verbose=False):
        super().__init__()
        self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau
        self.ratio_clip = 0.2
        self.lam_entropy = 0.01
        self.adv_norm = True
        self.rew_norm = False
        self.schedule_clip = False
        self.schedule_adam = False

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._target = target_update_freq > 0
        self._update_iteration = 10
        self._sync_cnt = 0
        # self._learn_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0

        self._verbose = verbose
        self._batch_size = batch_size
        self.buffer = ReplayBuffer(buffer_size, replay=False)
        # assert not self.buffer.allow_replay, 'PPO buffer cannot be replay buffer'
        self._normalized = lambda x, e: (x - x.mean()) / (x.std() + e)

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.actor_eval = actor_net.to(self.device)
        self.critic_eval = critic_net.to(self.device)
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)

        self.actor_eval.train()
        self.critic_eval.train()

        if self._target:
            self.actor_target = deepcopy(self.actor_eval)
            self.critic_target = deepcopy(self.critic_eval)
            self.actor_target.load_state_dict(self.actor_eval.state_dict())
            self.critic_target.load_state_dict(self.critic_eval.state_dict())

            self.actor_target.eval()
            self.critic_target.eval()

        self.criterion = nn.SmoothL1Loss()
Beispiel #3
0
    def __init__(
        self, 
        model,
        buffer_size=1000,
        batch_size=100,
        actor_learn_freq=1,
        target_update_freq=5,
        target_update_tau=1e-2,
        learning_rate=1e-3,
        discount_factor=0.99,
        verbose=False,
        update_iteration=10,
        act_dim=None,
        alpha=None # default: auto_entropy_tuning
        ):
        super().__init__()
        self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._target = target_update_freq > 0
        self._update_iteration = update_iteration
        self._sync_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = verbose
        self._batch_size = batch_size
        self.buffer = ReplayBuffer(buffer_size) # off-policy

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()

        self.actor_target = self.copy_net(self.actor_eval)
        self.critic_target = self.copy_net(self.critic_eval)
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(), lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(), lr=self.lr)
        
        self.criterion = nn.SmoothL1Loss()
        self.act_dim = act_dim
        self.alpha = alpha
        self.auto_entropy_tuning = True

        if self.alpha:
            self.auto_entropy_tuning = False
            self.value_eval = model.v_net.to(device).train()
            self.value_target = self.copy_net(self.value_eval)
            self.value_eval_optim = optim.Adam(self.value_eval.parameters(), lr=self.lr)
        else:
            self.target_entropy = -torch.tensor(1).to(device)
            self.log_alpha = torch.zeros(1, requires_grad=True, device=device)
            self.alpha_optim = optim.Adam([self.log_alpha], lr=self.lr)
            self.alpha = self.log_alpha.exp()
Beispiel #4
0
    def __init__(
            self,
            model,
            buffer_size=1000,
            actor_learn_freq=1,
            target_update_freq=0,
            target_update_tau=5e-3,
            learning_rate=0.0001,
            discount_factor=0.99,
            gae_lamda=0.95,  # td
            batch_size=100,
            verbose=False):
        super().__init__()
        self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau
        self.ratio_clip = 0.2
        self.lam_entropy = 0.01
        self.adv_norm = False  # normalize advantage, defalut=False
        self.rew_norm = False  # normalize reward, default=False
        self.schedule_clip = False
        self.schedule_adam = False

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._gae_lam = gae_lamda
        self._target = target_update_freq > 0
        self._update_iteration = 10
        self._sync_cnt = 0
        # self._learn_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0

        self._verbose = verbose
        self._batch_size = batch_size
        self._normalized = lambda x, e: (x - x.mean()) / (x.std() + e)
        self.buffer = ReplayBuffer(buffer_size, replay=False)

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)

        # self.actor_eval.train()
        # self.critic_eval.train()

        if self._target:
            self.actor_target = self.copy_net(self.actor_eval)
            self.critic_target = self.copy_net(self.critic_eval)

        self.criterion = nn.SmoothL1Loss()
Beispiel #5
0
    def __init__(self,
                 actor_net,
                 critic_net,
                 buffer_size=1000,
                 actor_learn_freq=1,
                 target_update_freq=0,
                 target_update_tau=5e-3,
                 learning_rate=0.01,
                 discount_factor=0.99,
                 batch_size=100,
                 verbose=False):
        super().__init__()
        self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._target = target_update_freq > 0
        self._update_iteration = 10
        self._sync_cnt = 0
        # self._learn_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = verbose
        self._batch_size = batch_size
        self.replay_buffer = ReplayBuffer(buffer_size)
        # assert buffer.allow_replay, 'DDPG buffer must be replay buffer'

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.actor_eval = actor_net.to(self.device)  # pi(s)
        self.critic_eval = critic_net.to(self.device)  # Q(s, a)
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)

        self.actor_eval.train()
        self.critic_eval.train()

        if self._target:
            self.actor_target = deepcopy(self.actor_eval)
            self.critic_target = deepcopy(self.critic_eval)
            self.actor_target.load_state_dict(self.actor_eval.state_dict())
            self.critic_target.load_state_dict(self.critic_eval.state_dict())

            self.actor_target.eval()
            self.critic_target.eval()

        self.criterion = nn.MSELoss()  # why mse?
Beispiel #6
0
    def __init__(self,
                 actor_net,
                 critic_net,
                 buffer_size=1000,
                 actor_learn_freq=1,
                 target_update_freq=0,
                 target_update_tau=5e-3,
                 learning_rate=0.01,
                 discount_factor=0.99,
                 gae_lamda=1,
                 verbose=False):
        super().__init__()
        self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau
        self.gae_lamda = gae_lamda

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._target = target_update_freq > 0
        self._sync_cnt = 0
        # self._learn_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = verbose
        self.buffer = ReplayBuffer(buffer_size, replay=False)
        # assert not self.buffer.allow_replay, 'PPO buffer cannot be replay buffer'

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.actor_eval = actor_net.to(self.device)
        self.critic_eval = critic_net.to(self.device)
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)

        self.actor_eval.train()
        self.critic_eval.train()

        if self._target:
            self.actor_target = deepcopy(self.actor_eval)
            self.critic_target = deepcopy(self.critic_eval)
            self.actor_target.load_state_dict(self.actor_eval.state_dict())
            self.critic_target.load_state_dict(self.critic_eval.state_dict())

            self.actor_target.eval()
            self.critic_target.eval()

        self.criterion = nn.SmoothL1Loss()
Beispiel #7
0
    def __init__(self,
                 critic_net,
                 action_shape=0,
                 buffer_size=1000,
                 batch_size=100,
                 target_update_freq=1,
                 target_update_tau=1,
                 learning_rate=0.01,
                 discount_factor=0.99,
                 verbose=False):
        super().__init__()
        self.lr = learning_rate
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau
        self.epsilon = 0.5
        # ticks
        self.double_q = True
        self.dueling_q = True
        self.distributional_q = True
        self.prioritized_replay = True
        self.noisy_q = True
        self.n_step_td = True

        self.target_update_freq = target_update_freq
        self.action_shape = action_shape
        self._gamma = discount_factor
        self._batch_size = batch_size
        self._verbose = verbose
        self._update_iteration = 10
        self._learn_cnt = 0
        self._normalized = lambda x, e: (x - x.mean()) / (x.std() + e)
        self.rew_norm = True
        self.buffer = ReplayBuffer(buffer_size)

        self.critic_eval = critic_net.to(self.device)
        self.critic_target = deepcopy(self.critic_eval)
        self.critic_target.load_state_dict(self.critic_eval.state_dict())
        self.critic_eval.use_dueling = self.critic_target.use_dueling = self.dueling_q  # Dueling DQN

        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)
        self.critic_eval.train()

        self.criterion = nn.MSELoss()

        self.random_choose = 0
        self.sum_choose = 0
Beispiel #8
0
    def __init__(
        self,
        model,
        buffer_size=1000,
        actor_learn_freq=1,
        target_update_freq=1,
        target_update_tau=0.01,
        # learning_rate=3e-3,
        actor_lr=1e-4,
        critic_lr=1e-3,
        discount_factor=0.99,
        batch_size=100,
        update_iteration=10,
        verbose=False,
    ):
        super().__init__()
        # self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._update_iteration = update_iteration
        self._sync_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = verbose
        self._batch_size = batch_size
        self.buffer = ReplayBuffer(buffer_size)

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()

        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=actor_lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=critic_lr)

        self.actor_target = self.copy_net(self.actor_eval)
        self.critic_target = self.copy_net(self.critic_eval)

        self.criterion = nn.MSELoss()  # why mse?

        self.noise_clip = 0.5
        self.noise_std = 0.2
Beispiel #9
0
    def __init__(
        self, 
        model,
        buffer_size=1000,
        batch_size=100,
        actor_learn_freq=1,
        target_update_freq=5,
        target_update_tau=1e-2,
        learning_rate=1e-3,
        discount_factor=0.99,
        verbose=False,
        update_iteration=10,
        act_dim=None,
        alpha=1.0,
        ):
        super().__init__()
        self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._target = target_update_freq > 0
        self._update_iteration = update_iteration
        self._sync_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = verbose
        self._batch_size = batch_size
        self.buffer = ReplayBuffer(buffer_size) # off-policy

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()
        self.value_eval = model.v_net.to(device).train()

        self.value_target = self.copy_net(self.value_eval)
        
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(), lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(), lr=self.lr)
        self.value_eval_optim = optim.Adam(self.value_eval.parameters(), lr=self.lr)

        self.criterion = nn.SmoothL1Loss()
        self.act_dim = act_dim
        self.alpha = alpha
Beispiel #10
0
    def __init__(
        self,
        model,
        buffer_size=1e6,
        batch_size=256,
        policy_freq=2,
        tau=0.005,
        discount=0.99,
        policy_lr=3e-4,
        value_lr=3e-4,
        learn_iteration=1,
        verbose=False,
        act_dim=None,
        alpha=1.0,
    ):
        super().__init__()
        self.tau = tau
        self.gamma = discount
        self.policy_freq = policy_freq
        self.learn_iteration = learn_iteration
        self.verbose = verbose
        self.act_dim = act_dim
        self.batch_size = batch_size
        self.buffer = ReplayBuffer(buffer_size)  # off-policy

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()
        self.value_eval = model.v_net.to(device).train()

        self.value_target = self.copy_net(self.value_eval)

        self.actor_eval_optim = torch.optim.Adam(self.actor_eval.parameters(),
                                                 lr=policy_lr)
        self.critic_eval_optim = torch.optim.Adam(
            self.critic_eval.parameters(), lr=value_lr)
        self.value_eval_optim = torch.optim.Adam(self.value_eval.parameters(),
                                                 lr=value_lr)

        self.criterion = nn.SmoothL1Loss()

        self.alpha = alpha
        self.eps = np.finfo(np.float32).eps.item()
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
Beispiel #11
0
    def __init__(
        self,
        model,
        buffer_size=1000,
        actor_learn_freq=1,
        target_update_freq=1,
        target_update_tau=0.005,
        learning_rate=1e-4,
        discount_factor=0.99,
        batch_size=100,
        update_iteration=10,
        verbose=False,
        act_dim=None,
        num_episodes=1000,
    ):
        super().__init__()
        self.lr = learning_rate
        self.end_lr = learning_rate * 0.1
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._update_iteration = update_iteration
        self._sync_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = False
        self._batch_size = batch_size
        self.schedule_adam = True
        self.buffer = ReplayBuffer(buffer_size)

        self.actor_eval = model.policy_net.to(device).train()  # pi(s)
        self.critic_eval = model.value_net.to(device).train()  # Q(s, a)
        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(), lr=self.lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(), lr=self.lr)

        self.actor_target = self.copy_net(self.actor_eval)
        self.critic_target = self.copy_net(self.critic_eval)

        self.criterion = nn.MSELoss()  # why mse?
        self.act_dim = act_dim
        self.num_episodes = num_episodes
Beispiel #12
0
    def __init__(self,
                 model,
                 action_shape=0,
                 buffer_size=1000,
                 batch_size=100,
                 target_update_freq=1,
                 target_update_tau=1,
                 learning_rate=0.01,
                 discount_factor=0.99,
                 verbose=False):
        super().__init__()
        self.lr = learning_rate
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau
        self.target_update_freq = target_update_freq
        # self.action_shape = action_shape
        self._gamma = discount_factor
        self._batch_size = batch_size
        self._verbose = verbose
        self._update_iteration = 10
        self._learn_cnt = 0
        self._normalized = lambda x, e: (x - x.mean()) / (x.std() + e)
        self.rew_norm = True
        self.buffer = ReplayBuffer(buffer_size)

        # self.declare_networks()
        self.critic_eval = model.value_net.to(self.device).train()
        self.critic_target = self.copy_net(self.critic_eval)

        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=self.lr)
        # self.critic_eval.train()

        self.criterion = nn.MSELoss()

        self.random_choose = 0
        self.sum_choose = 0
Beispiel #13
0
    def __init__(
        self,
        model,
        action_dim=1,
        buffer_size=1000,
        batch_size=100,
        actor_learn_freq=1,
        target_update_freq=5,
        target_update_tau=0.1,
        # learning_rate=1e-3,
        actor_lr=1e-4,
        critic_lr=1e-3,
        discount_factor=0.99,
        verbose=False,
        update_iteration=10,
        use_priority=False,
        use_m=False,
        n_step=1,
    ):
        super().__init__()
        # self.lr = learning_rate
        self.eps = np.finfo(np.float32).eps.item()
        self.tau = target_update_tau

        self.actor_learn_freq = actor_learn_freq
        self.target_update_freq = target_update_freq
        self._gamma = discount_factor
        self._target = target_update_freq > 0
        self._update_iteration = update_iteration
        self._sync_cnt = 0
        # self._learn_cnt = 0
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0
        self._verbose = verbose
        self._batch_size = batch_size
        self.use_priority = use_priority
        self.use_dist = model.value_net.use_dist
        self.use_munchausen = use_m
        self.n_step = n_step

        if self.use_priority:
            self.buffer = PriorityReplayBuffer(buffer_size, n_step=self.n_step)
        else:
            self.buffer = ReplayBuffer(buffer_size)  # off-policy

        if self.use_dist:
            assert model.value_net.num_atoms > 1
            # assert isinstance(model.value_net, CriticModelDist)
            self.v_min = model.value_net.v_min
            self.v_max = model.value_net.v_max
            self.num_atoms = model.value_net.num_atoms
            self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1)
            self.support = torch.linspace(self.v_min, self.v_max,
                                          self.num_atoms)

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()

        self.actor_target = self.copy_net(self.actor_eval)
        self.critic_target = self.copy_net(self.critic_eval)

        self.actor_eval_optim = optim.Adam(self.actor_eval.parameters(),
                                           lr=actor_lr)
        self.critic_eval_optim = optim.Adam(self.critic_eval.parameters(),
                                            lr=critic_lr)

        self.criterion = nn.SmoothL1Loss(reduction='none')  # keep batch dim

        self.target_entropy = -torch.tensor(action_dim).to(device)
        self.log_alpha = torch.zeros(1, requires_grad=True, device=device)
        self.alpha_optim = optim.Adam([self.log_alpha], lr=actor_lr)
        self.alpha = self.log_alpha.exp()
Beispiel #14
0
    def __init__(
        self,
        model,
        buffer_size=1e6,
        batch_size=256,
        policy_freq=2,
        tau=0.005,
        discount=0.99,
        policy_lr=3e-4,
        value_lr=3e-4,
        learn_iteration=1,
        verbose=False,
        act_dim=None,
        n_step=1,
        use_munchausen=False,
        use_priority=False,
        use_dist_q=False,
        use_PAL=False,
    ):
        super().__init__()
        self.tau = tau
        self.gamma = discount
        self.policy_freq = policy_freq
        self.learn_iteration = learn_iteration
        self.verbose = verbose
        self.act_dim = act_dim
        self.batch_size = batch_size

        self.use_dist_q = use_dist_q
        self.use_priority = use_priority
        self.use_munchausen = use_munchausen
        self.use_PAL = use_PAL

        assert not (self.use_priority and self.use_PAL)

        self.buffer = ReplayBuffer(buffer_size)
        if self.use_priority:
            self.buffer = PriorityReplayBuffer(buffer_size,
                                               gamma=discount,
                                               n_step=n_step)

        self.actor_eval = model.policy_net.to(device).train()
        self.critic_eval = model.value_net.to(device).train()

        self.actor_target = self.copy_net(self.actor_eval)
        self.critic_target = self.copy_net(self.critic_eval)

        self.actor_eval_optim = torch.optim.Adam(self.actor_eval.parameters(),
                                                 lr=policy_lr)
        self.critic_eval_optim = torch.optim.Adam(
            self.critic_eval.parameters(), lr=value_lr)

        self.criterion = nn.SmoothL1Loss(reduction='none')  # keep batch dim
        self.target_entropy = -torch.tensor(1).to(device)
        self.log_alpha = torch.zeros(1, requires_grad=True, device=device)
        self.alpha_optim = torch.optim.Adam([self.log_alpha], lr=policy_lr)
        self.alpha = self.log_alpha.exp()

        self.eps = np.finfo(np.float32).eps.item()
        self._learn_critic_cnt = 0
        self._learn_actor_cnt = 0