Beispiel #1
0
    def __init__(
        self,
        configs: ConfigDict,
        hidden_activation: Callable = F.relu,
    ):
        """Initialize."""
        if configs.use_noisy_net:
            linear_layer = NoisyLinearConstructor(configs.std_init)
            init_fn: Callable = identity
        else:
            linear_layer = nn.Linear
            init_fn = init_layer_uniform
        super(IQNMLP, self).__init__(
            configs=configs,
            hidden_activation=hidden_activation,
            linear_layer=linear_layer,
            init_fn=init_fn,
        )

        self.n_quantiles = configs.n_quantile_samples
        self.quantile_embedding_dim = configs.quantile_embedding_dim
        self.input_size = configs.input_size
        self.output_size = configs.output_size

        # set quantile_net layer
        self.quantile_fc_layer = self.linear_layer(self.quantile_embedding_dim,
                                                   self.input_size)
        self.quantile_fc_layer = init_fn(self.quantile_fc_layer)
Beispiel #2
0
    def __init__(
        self,
        configs: ConfigDict,
        hidden_activation: Callable = F.relu,
    ):
        """Initialize."""
        if configs.use_noisy_net:
            linear_layer = NoisyLinearConstructor(configs.std_init)
            init_fn: Callable = identity
        else:
            linear_layer = nn.Linear
            init_fn = init_layer_uniform
        super(C51DuelingMLP, self).__init__(
            configs=configs,
            hidden_activation=hidden_activation,
            linear_layer=linear_layer,
            use_output_layer=False,
        )
        in_size = configs.hidden_sizes[-1]
        self.action_size = configs.output_size
        self.atom_size = configs.atom_size
        self.output_size = configs.output_size * configs.atom_size
        self.v_min, self.v_max = configs.v_min, configs.v_max

        # set advantage layer
        self.advantage_hidden_layer = self.linear_layer(in_size, in_size)
        self.advantage_layer = self.linear_layer(in_size, self.output_size)
        self.advantage_layer = init_fn(self.advantage_layer)

        # set value layer
        self.value_hidden_layer = self.linear_layer(in_size, in_size)
        self.value_layer = self.linear_layer(in_size, self.atom_size)
        self.value_layer = init_fn(self.value_layer)
Beispiel #3
0
def get_fc_model(
    cfg: ConfigDict,
    input_size: int,
    output_size: int,
    hidden_sizes: List[int],
):
    """Get FC model depends on type."""
    # use noisy net
    if cfg.use_noisy_net:
        linear_layer = NoisyLinearConstructor(cfg.std_init)
        init_fn: Callable = identity
        cfg.max_epsilon = 0.0
        cfg.min_epsilon = 0.0
    else:
        linear_layer = nn.Linear
        init_fn = init_layer_uniform

    if cfg.use_dist_q == "C51":
        fc_model = C51DuelingMLP(
            input_size=input_size,
            action_size=output_size,
            hidden_sizes=hidden_sizes,
            v_min=cfg.v_min,
            v_max=cfg.v_max,
            atom_size=cfg.atoms,
            linear_layer=linear_layer,
            init_fn=init_fn,
        ).to(device)

    elif cfg.use_dist_q == "IQN":
        fc_model = IQNMLP(
            input_size=input_size,
            output_size=output_size,
            hidden_sizes=hidden_sizes,
            n_quantiles=cfg.n_quantile_samples,
            quantile_embedding_dim=cfg.quantile_embedding_dim,
            linear_layer=linear_layer,
            init_fn=init_fn,
        ).to(device)

    else:
        fc_model = DuelingMLP(
            input_size=input_size,
            output_size=output_size,
            hidden_sizes=hidden_sizes,
            linear_layer=linear_layer,
            init_fn=init_fn,
        ).to(device)

    return fc_model