Example #1
0
 def init_nets(self):
     '''Initialize the neural network used to learn the Q function from the spec'''
     if 'Recurrent' in self.net_spec['type']:
         self.net_spec.update(seq_len=self.net_spec['seq_len'])
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net']
     self.post_init_nets()
Example #2
0
 def init_nets(self):
     '''Initialize the neural network used to learn the Q function from the spec'''
     if self.algorithm_spec['name'] == 'VanillaDQN':
         assert all(k not in self.net_spec for k in ['update_type', 'update_frequency', 'polyak_coef']), 'Network update not available for VanillaDQN; use DQN.'
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net']
     self.post_init_nets()
Example #3
0
    def init_nets(self, global_nets=None):
        '''
        Initialize the neural networks used to learn the actor and critic from the spec
        Below we automatically select an appropriate net based on two different conditions
        1. If the action space is discrete or continuous action
            - Networks for continuous action spaces have two heads and return two values, the first is a tensor containing the mean of the action policy, the second is a tensor containing the std deviation of the action policy. The distribution is assumed to be a Gaussian (Normal) distribution.
            - Networks for discrete action spaces have a single head and return the logits for a categorical probability distribution over the discrete actions
        2. If the actor and critic are separate or share weights
            - If the networks share weights then the single network returns a list.
            - Continuous action spaces: The return list contains 3 elements: The first element contains the mean output for the actor (policy), the second element the std dev of the policy, and the third element is the state-value estimated by the network.
            - Discrete action spaces: The return list contains 2 element. The first element is a tensor containing the logits for a categorical probability distribution over the actions. The second element contains the state-value estimated by the network.
        3. If the network type is feedforward, convolutional, or recurrent
            - Feedforward and convolutional networks take a single state as input and require an OnPolicyReplay or OnPolicyBatchReplay memory
            - Recurrent networks take n states as input and require env spec "frame_op": "concat", "frame_op_len": seq_len
        '''
        assert 'shared' in self.net_spec, 'Specify "shared" for ActorCritic network in net_spec'
        self.shared = self.net_spec['shared']

        # create actor/critic specific specs
        actor_net_spec = self.net_spec.copy()
        critic_net_spec = self.net_spec.copy()
        for k in self.net_spec:
            if 'actor_' in k:
                actor_net_spec[k.replace('actor_', '')] = actor_net_spec.pop(k)
                critic_net_spec.pop(k)
            if 'critic_' in k:
                critic_net_spec[k.replace('critic_',
                                          '')] = critic_net_spec.pop(k)
                actor_net_spec.pop(k)
        if critic_net_spec['use_same_optim']:
            critic_net_spec = actor_net_spec

        in_dim = self.body.state_dim
        out_dim = net_util.get_out_dim(self.body, add_critic=self.shared)
        # main actor network, may contain out_dim self.shared == True
        NetClass = getattr(net, actor_net_spec['type'])
        self.net = NetClass(actor_net_spec, in_dim, out_dim)
        self.net_names = ['net']
        if not self.shared:  # add separate network for critic
            critic_out_dim = 1
            CriticNetClass = getattr(net, critic_net_spec['type'])
            self.critic_net = CriticNetClass(critic_net_spec, in_dim,
                                             critic_out_dim)
            self.net_names.append('critic_net')
        # init net optimizer and its lr scheduler
        self.optim = net_util.get_optim(self.net, self.net.optim_spec)
        self.lr_scheduler = net_util.get_lr_scheduler(
            self.optim, self.net.lr_scheduler_spec)
        if not self.shared:
            self.critic_optim = net_util.get_optim(self.critic_net,
                                                   self.critic_net.optim_spec)
            self.critic_lr_scheduler = net_util.get_lr_scheduler(
                self.critic_optim, self.critic_net.lr_scheduler_spec)
        net_util.set_global_nets(self, global_nets)
        self.end_init_nets()
Example #4
0
 def init_nets(self):
     '''Initialize networks'''
     if self.algorithm_spec['name'] == 'DQNBase':
         assert all(k not in self.net_spec for k in ['update_type', 'update_frequency', 'polyak_coef']), 'Network update not available for DQNBase; use DQN.'
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.target_net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net', 'target_net']
     self.post_init_nets()
     self.online_net = self.target_net
     self.eval_net = self.target_net
Example #5
0
 def init_nets(self):
     '''
     Initialize the neural network used to learn the policy function from the spec
     Below we automatically select an appropriate net for a discrete or continuous action space if the setting is of the form 'MLPNet'. Otherwise the correct type of network is assumed to be specified in the spec.
     Networks for continuous action spaces have two heads and return two values, the first is a tensor containing the mean of the action policy, the second is a tensor containing the std deviation of the action policy. The distribution is assumed to be a Gaussian (Normal) distribution.
     Networks for discrete action spaces have a single head and return the logits for a categorical probability distribution over the discrete actions
     '''
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net']
     self.post_init_nets()
Example #6
0
 def init_nets(self, global_nets=None):
     '''Initialize the neural network used to learn the Q function from the spec'''
     if 'Recurrent' in self.net_spec['type']:
         self.net_spec.update(seq_len=self.net_spec['seq_len'])
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net']
     # init net optimizer and its lr scheduler
     self.optim = net_util.get_optim(self.net, self.net.optim_spec)
     self.lr_scheduler = net_util.get_lr_scheduler(self.optim, self.net.lr_scheduler_spec)
     net_util.set_global_nets(self, global_nets)
     self.end_init_nets()
Example #7
0
 def init_nets(self, global_nets=None):
     '''Initialize the neural network used to learn the Q function from the spec'''
     if 'Recurrent' in self.net_spec['type']:
         self.net_spec.update(seq_len=self.net_spec['seq_len'])
     if global_nets is None:
         in_dim = self.body.state_dim
         out_dim = net_util.get_out_dim(self.body)
         NetClass = getattr(net, self.net_spec['type'])
         self.net = NetClass(self.net_spec, in_dim, out_dim)
         self.net_names = ['net']
     else:
         util.set_attr(self, global_nets)
         self.net_names = list(global_nets.keys())
     self.post_init_nets()
Example #8
0
    def init_nets(self, global_nets=None):
        '''
        Networks: net(actor/policy), q1_net, target_q1_net, q2_net, target_q2_net
        All networks are separate, and have the same hidden layer architectures and optim specs, so tuning is minimal
        '''
        self.shared = False  # SAC does not share networks
        NetClass = getattr(net, self.net_spec['type'])
        # main actor network
        self.net = NetClass(self.net_spec, self.body.state_dim,
                            net_util.get_out_dim(self.body))
        self.net_names = ['net']
        # two critic Q-networks to mitigate positive bias in q_loss and speed up training, uses q_net.py with prefix Q
        QNetClass = getattr(net, 'Q' + self.net_spec['type'])
        q_in_dim = [self.body.state_dim, self.body.action_dim]
        self.q1_net = QNetClass(self.net_spec, q_in_dim, 1)
        self.target_q1_net = QNetClass(self.net_spec, q_in_dim, 1)
        self.q2_net = QNetClass(self.net_spec, q_in_dim, 1)
        self.target_q2_net = QNetClass(self.net_spec, q_in_dim, 1)
        self.net_names += [
            'q1_net', 'target_q1_net', 'q2_net', 'target_q2_net'
        ]
        net_util.copy(self.q1_net, self.target_q1_net)
        net_util.copy(self.q2_net, self.target_q2_net)
        # temperature variable to be learned, and its target entropy
        self.log_alpha = torch.zeros(1,
                                     requires_grad=True,
                                     device=self.net.device)
        self.alpha = self.log_alpha.detach().exp()
        if self.body.is_discrete:
            self.target_entropy = -self.body.action_space.n
        else:
            self.target_entropy = -np.product(self.body.action_space.shape)

        # init net optimizer and its lr scheduler
        self.optim = net_util.get_optim(self.net, self.net.optim_spec)
        self.lr_scheduler = net_util.get_lr_scheduler(
            self.optim, self.net.lr_scheduler_spec)
        self.q1_optim = net_util.get_optim(self.q1_net, self.q1_net.optim_spec)
        self.q1_lr_scheduler = net_util.get_lr_scheduler(
            self.q1_optim, self.q1_net.lr_scheduler_spec)
        self.q2_optim = net_util.get_optim(self.q2_net, self.q2_net.optim_spec)
        self.q2_lr_scheduler = net_util.get_lr_scheduler(
            self.q2_optim, self.q2_net.lr_scheduler_spec)
        self.alpha_optim = net_util.get_optim(self.log_alpha,
                                              self.net.optim_spec)
        self.alpha_lr_scheduler = net_util.get_lr_scheduler(
            self.alpha_optim, self.net.lr_scheduler_spec)
        net_util.set_global_nets(self, global_nets)
        self.post_init_nets()
Example #9
0
 def init_nets(self, global_nets=None):
     '''Initialize the neural network used to learn the Q function from the spec'''
     if self.algorithm_spec['name'] == 'VanillaDQN':
         assert all(
             k not in self.net_spec
             for k in ['update_type', 'update_frequency', 'polyak_coef']
         ), 'Network update not available for VanillaDQN; use DQN.'
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net']
     # init net optimizer and its lr scheduler
     self.optim = net_util.get_optim(self.net, self.net.optim_spec)
     self.lr_scheduler = net_util.get_lr_scheduler(
         self.optim, self.net.lr_scheduler_spec)
     net_util.set_global_nets(self, global_nets)
     self.post_init_nets()
Example #10
0
 def init_nets(self, global_nets=None):
     '''Initialize networks'''
     if self.algorithm_spec['name'] == 'DQNBase':
         assert all(
             k not in self.net_spec
             for k in ['update_type', 'update_frequency', 'polyak_coef']
         ), 'Network update not available for DQNBase; use DQN.'
     in_dim = self.body.state_dim
     out_dim = net_util.get_out_dim(self.body)
     NetClass = getattr(net, self.net_spec['type'])
     self.net = NetClass(self.net_spec, in_dim, out_dim)
     self.target_net = NetClass(self.net_spec, in_dim, out_dim)
     self.net_names = ['net', 'target_net']
     # init net optimizer and its lr scheduler
     self.optim = net_util.get_optim(self.net, self.net.optim_spec)
     self.lr_scheduler = net_util.get_lr_scheduler(
         self.optim, self.net.lr_scheduler_spec)
     net_util.set_global_nets(self, global_nets)
     self.post_init_nets()
     self.online_net = self.target_net
     self.eval_net = self.target_net
Example #11
0
    def init_nets(self, global_nets=None):
        '''
        Networks: net(actor/policy), critic (value), target_critic, q1_net, q1_net
        All networks are separate, and have the same hidden layer architectures and optim specs, so tuning is minimal
        '''
        self.shared = False  # SAC does not share networks
        in_dim = self.body.state_dim
        out_dim = net_util.get_out_dim(self.body)
        NetClass = getattr(net, self.net_spec['type'])
        # main actor network
        self.net = NetClass(self.net_spec, in_dim, out_dim)
        self.net_names = ['net']
        # critic network and its target network
        val_out_dim = 1
        self.critic_net = NetClass(self.net_spec, in_dim, val_out_dim)
        self.target_critic_net = NetClass(self.net_spec, in_dim, val_out_dim)
        self.net_names += ['critic_net', 'target_critic_net']
        # two Q-networks to mitigate positive bias in q_loss and speed up training
        q_in_dim = in_dim + self.body.action_dim  # NOTE concat s, a for now
        self.q1_net = NetClass(self.net_spec, q_in_dim, val_out_dim)
        self.q2_net = NetClass(self.net_spec, q_in_dim, val_out_dim)
        self.net_names += ['q1_net', 'q2_net']

        # init net optimizer and its lr scheduler
        self.optim = net_util.get_optim(self.net, self.net.optim_spec)
        self.lr_scheduler = net_util.get_lr_scheduler(
            self.optim, self.net.lr_scheduler_spec)
        self.critic_optim = net_util.get_optim(self.critic_net,
                                               self.critic_net.optim_spec)
        self.critic_lr_scheduler = net_util.get_lr_scheduler(
            self.critic_optim, self.critic_net.lr_scheduler_spec)
        self.q1_optim = net_util.get_optim(self.q1_net, self.q1_net.optim_spec)
        self.q1_lr_scheduler = net_util.get_lr_scheduler(
            self.q1_optim, self.q1_net.lr_scheduler_spec)
        self.q2_optim = net_util.get_optim(self.q2_net, self.q2_net.optim_spec)
        self.q2_lr_scheduler = net_util.get_lr_scheduler(
            self.q2_optim, self.q2_net.lr_scheduler_spec)
        net_util.set_global_nets(self, global_nets)
        self.post_init_nets()