def __init__(self, state_shape, action_size, hiddens, layer_fn, activation_fn=nn.ReLU, norm_fn=None, bias=True, out_activation=nn.Tanh): super().__init__() # hack to prevent cycle imports from catalyst.modules.modules import name2nn layer_fn = name2nn(layer_fn) activation_fn = name2nn(activation_fn) norm_fn = name2nn(norm_fn) out_activation = name2nn(out_activation) state_size = reduce(lambda x, y: x * y, state_shape) self.feature_net = SequentialNet(hiddens=[state_size] + hiddens, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) self.policy_net = SequentialNet(hiddens=[hiddens[-1], action_size], layer_fn=nn.Linear, activation_fn=out_activation, norm_fn=None, bias=True) inner_init = create_optimal_inner_init(nonlinearity=activation_fn) self.feature_net.apply(inner_init) self.policy_net.apply(out_init)
def __init__(self, state_shape, action_size, hiddens, layer_fn, concat_at=1, n_atoms=1, activation_fn=nn.ReLU, norm_fn=None, bias=True, out_activation=None): super().__init__() # hack to prevent cycle imports from catalyst.modules.modules import name2nn layer_fn = name2nn(layer_fn) activation_fn = name2nn(activation_fn) norm_fn = name2nn(norm_fn) out_activation = name2nn(out_activation) self.n_atoms = n_atoms state_size = reduce(lambda x, y: x * y, state_shape) if concat_at > 0: hiddens_ = [state_size] + hiddens[0:concat_at] self.observation_net = SequentialNet(hiddens=hiddens_, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) hiddens_ = \ [hiddens[concat_at - 1] + action_size] + hiddens[concat_at:] self.feature_net = SequentialNet(hiddens=hiddens_, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) else: self.observation_net = None hiddens_ = [state_size + action_size] + hiddens self.feature_net = SequentialNet(hiddens=hiddens_, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) self.value_net = SequentialNet(hiddens=[hiddens[-1], n_atoms], layer_fn=nn.Linear, activation_fn=out_activation, norm_fn=None, bias=True) inner_init = create_optimal_inner_init(nonlinearity=activation_fn) if self.observation_net is not None: self.observation_net.apply(inner_init) self.feature_net.apply(inner_init) self.value_net.apply(out_init)
def __init__(self, state_shape, action_size, hiddens, layer_fn, activation_fn=nn.ReLU, norm_fn=None, bias=True, out_activation=nn.Sigmoid): super().__init__() # hack to prevent cycle imports from catalyst.modules.modules import name2nn self.n_action = action_size layer_fn = name2nn(layer_fn) activation_fn = name2nn(activation_fn) norm_fn = name2nn(norm_fn) out_activation = name2nn(out_activation) state_size = reduce(lambda x, y: x * y, state_shape) self.feature_net = SequentialNet(hiddens=[state_size] + hiddens, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) self.embedding_net = SequentialNet( hiddens=[hiddens[-1], action_size * 2], layer_fn=layer_fn, activation_fn=None, norm_fn=norm_fn, bias=bias) self.coupling1 = CouplingLayer(action_size=action_size, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=None, bias=bias, parity="odd") self.coupling2 = CouplingLayer(action_size=action_size, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=None, bias=bias, parity="even") self.squasher = SquashingLayer(out_activation) inner_init = create_optimal_inner_init(nonlinearity=activation_fn) self.feature_net.apply(inner_init) self.embedding_net.apply(inner_init)
def __init__(self, state_shape, action_size, hiddens, layer_fn, activation_fn=nn.ReLU, norm_fn=None, bias=True, out_activation=nn.Tanh): super().__init__() # hack to prevent cycle imports from catalyst.modules.modules import name2nn layer_fn = name2nn(layer_fn) activation_fn = name2nn(activation_fn) norm_fn = name2nn(norm_fn) out_activation = name2nn(out_activation) state_size = state_shape[-1] self.feature_net = SequentialNet(hiddens=[state_size] + hiddens, layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) self.attn = nn.Sequential( nn.Conv1d(in_channels=hiddens[-1], out_channels=1, kernel_size=1, bias=True), nn.Softmax(dim=1)) self.feature_net2 = SequentialNet( hiddens=[hiddens[-1] * 4, hiddens[-1]], layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=norm_fn, bias=bias) self.policy_net = SequentialNet(hiddens=[hiddens[-1], action_size], layer_fn=nn.Linear, activation_fn=out_activation, norm_fn=None, bias=True) inner_init = create_optimal_inner_init(nonlinearity=activation_fn) self.feature_net.apply(inner_init) self.attn.apply(out_init) self.feature_net2.apply(inner_init) self.policy_net.apply(out_init)
def __init__(self, action_size, layer_fn, activation_fn=nn.ReLU, norm_fn=None, bias=True, parity="odd"): """ Conditional affine coupling layer used in Real NVP Bijector. Original paper: https://arxiv.org/abs/1605.08803 Adaptation to RL: https://arxiv.org/abs/1804.02808 Important notes --------------- 1. State embeddings are supposed to have size (action_size * 2). 2. Scale and translation networks used in the Real NVP Bijector both have one hidden layer of (action_size) (activation_fn) units. 3. Parity ("odd" or "even") determines which part of the input is being copied and which is being transformed. """ super().__init__() # hack to prevent cycle imports from catalyst.modules.modules import name2nn layer_fn = name2nn(layer_fn) activation_fn = name2nn(activation_fn) norm_fn = name2nn(norm_fn) self.parity = parity if self.parity == "odd": self.copy_size = action_size // 2 else: self.copy_size = action_size - action_size // 2 self.scale_prenet = SequentialNet( hiddens=[action_size * 2 + self.copy_size, action_size], layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=None, bias=bias) self.scale_net = SequentialNet( hiddens=[action_size, action_size - self.copy_size], layer_fn=layer_fn, activation_fn=None, norm_fn=None, bias=True) self.translation_prenet = SequentialNet( hiddens=[action_size * 2 + self.copy_size, action_size], layer_fn=layer_fn, activation_fn=activation_fn, norm_fn=None, bias=bias) self.translation_net = SequentialNet( hiddens=[action_size, action_size - self.copy_size], layer_fn=layer_fn, activation_fn=None, norm_fn=None, bias=True) inner_init = create_optimal_inner_init(nonlinearity=activation_fn) self.scale_prenet.apply(inner_init) self.scale_net.apply(out_init) self.translation_prenet.apply(inner_init) self.translation_net.apply(out_init)