def __init__(self, num_inputs, use_gru): super(CNNBase, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), nn.init.calculate_gain('relu')) self.main = nn.Sequential( init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(), init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(), init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(), init_(nn.Linear(32 * 7 * 7, 512)), nn.ReLU() ) if use_gru: self.gru = nn.GRUCell(512, 512) nn.init.orthogonal_(self.gru.weight_ih.data) nn.init.orthogonal_(self.gru.weight_hh.data) self.gru.bias_ih.data.fill_(0) self.gru.bias_hh.data.fill_(0) init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)) # receives 512 units from the cnn and predicts the state value self.critic_linear = init_(nn.Linear(512, 1)) self.train()
def __init__(self, num_inputs, num_outputs): super(DiagGaussian, self).__init__() init_ = lambda m: init(m, init_normc_, lambda x: nn.init.constant_( x, 0)) self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) self.logstd = AddBias(torch.zeros(num_outputs))
def __init__(self, num_inputs, num_outputs): super(Categorical, self).__init__() init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), gain=0.01) self.linear = init_(nn.Linear(num_inputs, num_outputs))
def __init__(self, num_inputs): super(MLPBase, self).__init__() init_ = lambda m: init(m, init_normc_, lambda x: nn.init.constant_(x, 0)) self.actor = nn.Sequential( init_(nn.Linear(num_inputs, 64)), nn.Tanh(), init_(nn.Linear(64, 64)), nn.Tanh() ) self.critic = nn.Sequential( init_(nn.Linear(num_inputs, 64)), nn.Tanh(), init_(nn.Linear(64, 64)), nn.Tanh() ) self.critic_linear = init_(nn.Linear(64, 1)) self.train()