Example #1
0
 def test_bias_false(self):
     lc = LocallyConnected(1, 1, 1, bias=False)
     assert lc.bias is None
Example #2
0
    def __init__(
        self,
        n_features: int,
        dist_types: List[DistTypeBase],
        use_bias: bool = False,
        hidden_layer_units: Iterable[int] = (0, ),
        bounds: List[Tuple[int, int]] = None,
        lasso_beta: float = 0.0,
        ridge_beta: float = 0.0,
        nonlinear_clamp: float = 1e-2,
    ):
        """
        Constructor for NOTEARS MLP class.

        Args:
            n_features: number of input features.
            dist_types: list of data type objects used to fit the NOTEARS algorithm.
            use_bias: True to add the intercept to the model
            hidden_layer_units: An iterable where its length determine the number of layers used,
            and the numbers determine the number of nodes used for the layer in order.
            bounds: bound constraint for each parameter.
            lasso_beta: Constant that multiplies the lasso term (l1 regularisation).
            It only applies to dag_layer weight.
            ridge_beta: Constant that multiplies the ridge term (l2 regularisation).
            It applies to both dag_layer and loc_lin_layer weights.
            nonlinear_clamp: Value used to soft clamp the nonlinear layer normalisation.
            Prevents the weights from being scaled above 1/nonlinear_clamp.
        """
        super().__init__()
        self.device = torch.device("cpu")
        self.lasso_beta = lasso_beta
        self.ridge_beta = ridge_beta
        self.nonlinear_clamp = nonlinear_clamp

        # cast to list for later concat.
        self.dims = ([n_features] + list(hidden_layer_units) +
                     [1] if hidden_layer_units[0] else [n_features, 1])

        # dag_layer: initial linear layer
        self.dag_layer = nn.Linear(self.dims[0],
                                   self.dims[0] * self.dims[1],
                                   bias=use_bias).float()
        nn.init.zeros_(self.dag_layer.weight)
        if use_bias:
            nn.init.zeros_(self.dag_layer.bias)

        # loc_lin_layer: local linear layers
        layers = [
            LocallyConnected(self.dims[0],
                             input_features,
                             output_features,
                             bias=use_bias).float() for input_features,
            output_features in zip(self.dims[1:-1], self.dims[2:])
        ]
        self._loc_lin_layer_weights = nn.ModuleList(layers)
        for layer in layers:
            layer.reset_parameters()

        # set the bounds as an attribute on the weights object
        self.dag_layer.weight.bounds = bounds
        # set the dist types
        self.dist_types = dist_types
        # type the adjacency matrix
        self.adj = None
        self.adj_mean_effect = None
Example #3
0
 def test_bias_true(self):
     lc = LocallyConnected(1, 1, 1, bias=True)
     assert isinstance(lc.bias, nn.Parameter)