def __init__(self, ensemble_size, in_features, out_features):
        super().__init__()

        self.num_nets = ensemble_size

        self.in_features = in_features
        self.out_features = out_features

        self.lin0_w, self.lin0_b = get_affine_params(ensemble_size,
                                                     in_features, 200)

        self.lin1_w, self.lin1_b = get_affine_params(ensemble_size, 200, 200)

        self.lin2_w, self.lin2_b = get_affine_params(ensemble_size, 200, 200)

        self.lin3_w, self.lin3_b = get_affine_params(ensemble_size, 200,
                                                     out_features)

        self.inputs_mu = nn.Parameter(torch.zeros(in_features),
                                      requires_grad=False)
        self.inputs_sigma = nn.Parameter(torch.zeros(in_features),
                                         requires_grad=False)

        self.max_logvar = nn.Parameter(
            torch.ones(1, out_features // 2, dtype=torch.float32) / 2.0)
        self.min_logvar = nn.Parameter(
            -torch.ones(1, out_features // 2, dtype=torch.float32) * 10.0)
Exemplo n.º 2
0
    def __init__(self, ensemble_size, in_features, out_features, hidden_size,
                 num_layers, weight_decays):
        super().__init__()

        self.num_nets = ensemble_size
        self.in_features = in_features
        self.out_features = out_features
        self.num_layers = num_layers
        self.weight_decays = weight_decays

        self.linear_layers = nn.ParameterList()
        self.linear_layers.extend(
            get_affine_params(ensemble_size, in_features, hidden_size))
        for i in range(num_layers - 2):
            self.linear_layers.extend(
                get_affine_params(ensemble_size, hidden_size, hidden_size))
        self.linear_layers.extend(
            get_affine_params(ensemble_size, hidden_size, out_features))

        self.inputs_mu = nn.Parameter(torch.zeros(1, in_features),
                                      requires_grad=False)
        self.inputs_sigma = nn.Parameter(torch.zeros(1, in_features),
                                         requires_grad=False)

        self.max_logvar = nn.Parameter(
            torch.ones(1, out_features // 2, dtype=torch.float32) / 2.0)
        self.min_logvar = nn.Parameter(
            -torch.ones(1, out_features // 2, dtype=torch.float32) * 10.0)