def __init__(self,
                 num_inputs,
                 recurrent=False,
                 recurrent_type="GRU",
                 hidden_size=64):
        super(MLPBase, self).__init__(recurrent, num_inputs, recurrent_type,
                                      hidden_size)

        if recurrent:
            num_inputs = hidden_size

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), np.sqrt(2))

        self.actor = nn.Sequential(init_(nn.Linear(num_inputs, hidden_size)),
                                   nn.Tanh(),
                                   init_(nn.Linear(hidden_size, hidden_size)),
                                   nn.Tanh())

        self.critic = nn.Sequential(init_(nn.Linear(num_inputs, hidden_size)),
                                    nn.Tanh(),
                                    init_(nn.Linear(hidden_size, hidden_size)),
                                    nn.Tanh())

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
Example #2
0
    def __init__(self, num_inputs, recurrent=False, hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
            init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
            init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
            init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
    def __init__(self, num_inputs, num_outputs):
        super(Categorical, self).__init__()

        init_ = lambda m: init(m,
                               nn.init.orthogonal_,
                               lambda x: nn.init.constant_(x, 0),
                               gain=0.01)

        self.linear = init_(nn.Linear(num_inputs, num_outputs))
 def init_weight(self, layer):
     init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                            constant_(x, 0), nn.init.calculate_gain('relu'))
     if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
         return init_(layer)
     elif isinstance(layer, nn.BatchNorm2d):
         layer.weight.data.fill_(1)
         if hasattr(layer, 'bias'):
             layer.bias.data.zero_()
         return layer
    def __init__(self, num_inputs, num_outputs):
        super(DiagGaussian, self).__init__()

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
        desired_init_log_std = -0.693471  #exp(..) ~= 0.5
        self.logstd = AddBias(
            desired_init_log_std *
            torch.ones(num_outputs))  #so no state-dependent sigma
 def init_(m):
     return init(m, nn.init.orthogonal_,
                 lambda x: nn.init.constant_(x, 0))