def __init__(self, in_features: int, out_channels: int, num_repetitions: int = 1, dropout=0.0): """ Create the leaf layer. Args: in_features: Number of input features. out_channels: Number of parallel representations for each input feature. num_repetitions: Number of parallel repetitions of this layer. dropout: Dropout probability. """ super().__init__(in_features=in_features, num_repetitions=num_repetitions) self.in_features = check_valid(in_features, int, 1) self.out_channels = check_valid(out_channels, int, 1) self.num_repetitions = check_valid(num_repetitions, int, 1) dropout = check_valid(dropout, float, 0.0, 1.0) self.dropout = nn.Parameter(torch.tensor(dropout), requires_grad=False) self.out_shape = f"(N, {in_features}, {out_channels})" # Marginalization constant self.marginalization_constant = nn.Parameter(torch.zeros(1), requires_grad=False) # Dropout bernoulli self._bernoulli_dist = torch.distributions.Bernoulli( probs=self.dropout)
def __init__(self, in_features: int, in_channels: int, num_repetitions: int = 1): """ Create a rat product node layer. Args: in_features (int): Number of input features. in_channels (int): Number of input channels. This is only needed for the sampling pass. """ super().__init__(in_features, num_repetitions) self.in_channels = check_valid(in_channels, int, 1) cardinality = 2 # Fixed to binary graphs for now self.cardinality = check_valid(cardinality, int, 2, in_features + 1) self._out_features = np.ceil(self.in_features / self.cardinality).astype(int) self._pad = 0 # Collect scopes for each product child self._scopes = [[] for _ in range(self.cardinality)] # Create sequence of scopes scopes = np.arange(self.in_features) # For two consecutive scopes for i in range(0, self.in_features, self.cardinality): for j in range(cardinality): if i + j < in_features: self._scopes[j].append(scopes[i + j]) else: # Case: d mod cardinality != 0 => Create marginalized nodes with prob 1.0 # Pad x in forward pass on the right: [n, d, c] -> [n, d+1, c] where index # d+1 is the marginalized node (index "in_features") self._scopes[j].append(self.in_features) # Transform into numpy array for easier indexing self._scopes = np.array(self._scopes) # Create index map from flattened to coordinates (only needed in sampling) self.unraveled_channel_indices = nn.Parameter( torch.tensor([(i, j) for i in range(self.in_channels) for j in range(self.in_channels)]), requires_grad=False, ) self.out_shape = f"(N, {self._out_features}, {self.in_channels ** 2}, {self.num_repetitions})"
def __init__(self, in_features: int, out_channels: int, cardinality: int, num_repetitions: int = 1, dropout=0.0): """Creat a gaussian layer. Args: out_channels: Number of parallel representations for each input feature. in_features: Number of input features. num_repetitions: Number of parallel repetitions of this layer. cardinality: Number of features covered. """ # TODO: Fix for num_repetitions super().__init__(in_features, out_channels, num_repetitions, dropout) self.cardinality = check_valid(cardinality, int, 2, in_features + 1) self._pad_value = in_features % cardinality self._out_features = np.ceil(in_features / cardinality).astype(int) self._n_dists = np.ceil(in_features / cardinality).astype(int) # Create gaussian means and covs self.means = nn.Parameter(torch.randn(out_channels * self._n_dists, cardinality)) # Generate covariance matrix via the cholesky decomposition: s = A'A where A is a triangular matrix # Further ensure, that diag(a) > 0 everywhere, such that A has full rank rand = torch.rand(out_channels * self._n_dists, cardinality, cardinality) # Make a matrices triangular for i in range(out_channels * self._n_dists): rand[i, :, :].tril_() self.triangular = nn.Parameter(rand) self._mv = dist.MultivariateNormal(loc=self.means, scale_tril=self.triangular) self.out_shape = f"(N, {self._out_features}, {self.out_channels})"
def __init__(self, in_features: int, cardinality: int, num_repetitions: int = 1): """ Create a product node layer. Args: in_features (int): Number of input features. cardinality (int): Number of random children for each product node. """ super().__init__(in_features, num_repetitions) self.cardinality = check_valid(cardinality, int, 1, in_features + 1) # Implement product as convolution self._conv_weights = nn.Parameter(torch.ones(1, 1, cardinality, 1, 1), requires_grad=False) self._pad = (self.cardinality - self.in_features % self.cardinality) % self.cardinality self._out_features = np.ceil(self.in_features / self.cardinality).astype(int) self.out_shape = f"(N, {self._out_features}, in_channels, {self.num_repetitions})"
def test_invalid_type(self): with self.assertRaises(InvalidTypeException): check_valid(0, float, 0, 1) with self.assertRaises(InvalidTypeException): check_valid(0.0, int, 0, 1) with self.assertRaises(InvalidTypeException): check_valid(np.int64(0), float, 0, 1) with self.assertRaises(InvalidTypeException): check_valid(torch.tensor(0).int(), float, 0, 1)
def __init__( self, in_features: int, out_channels: int, num_repetitions: int = 1, dropout: float = 0.0, min_sigma: float = 0.1, max_sigma: float = 1.0, min_mean: float = None, max_mean: float = None, ): """Creat a gaussian layer. Args: out_channels: Number of parallel representations for each input feature. in_features: Number of input features. """ super().__init__(in_features, out_channels, num_repetitions, dropout) # Create gaussian means and stds self.means = nn.Parameter( torch.randn(1, in_features, out_channels, num_repetitions)) if min_sigma is not None and max_sigma is not None: # Init from normal self.stds = nn.Parameter( torch.randn(1, in_features, out_channels, num_repetitions)) else: # Init uniform between 0 and 1 self.stds = nn.Parameter( torch.rand(1, in_features, out_channels, num_repetitions)) self.min_sigma = check_valid(min_sigma, float, 0.0, max_sigma) self.max_sigma = check_valid(max_sigma, float, min_sigma) self.min_mean = check_valid(min_mean, float, upper_bound=max_mean, allow_none=True) self.max_mean = check_valid(max_mean, float, min_mean, allow_none=True)
def __init__(self, in_channels: int, in_features: int, out_channels: int, num_repetitions: int = 1, dropout: float = 0.0): """ Create a Sum layer. Input is expected to be of shape [n, d, ic, r]. Output will be of shape [n, d, oc, r]. Args: in_channels (int): Number of output channels from the previous layer. in_features (int): Number of input features. out_channels (int): Multiplicity of a sum node for a given scope set. num_repetitions(int): Number of layer repetitions in parallel. dropout (float, optional): Dropout percentage. """ super().__init__(in_features, num_repetitions) self.in_channels = check_valid(in_channels, int, 1) self.out_channels = check_valid(out_channels, int, 1) self.dropout = nn.Parameter(torch.tensor( check_valid(dropout, float, 0.0, 1.0)), requires_grad=False) # Weights, such that each sumnode has its own weights ws = torch.randn(self.in_features, self.in_channels, self.out_channels, self.num_repetitions) self.weights = nn.Parameter(ws) self._bernoulli_dist = torch.distributions.Bernoulli( probs=self.dropout) self.out_shape = f"(N, {self.in_features}, {self.out_channels}, {self.num_repetitions})" # Necessary for sampling with evidence: Save input during forward pass. self._is_input_cache_enabled = False self._input_cache = None
def test_invalid_range(self): with self.assertRaises(OutOfBoundsException): check_valid(0, int, 1, 2) with self.assertRaises(OutOfBoundsException): check_valid(0.0, float, 1.0, 2.0) with self.assertRaises(OutOfBoundsException): check_valid(2, int, 0, 1)
def __init__( self, in_features: int, out_channels: int, cardinality: int, num_repetitions: int = 1, dropout: float = 0.0, leaf_base_class: Leaf = RatNormal, leaf_base_kwargs: Dict = None, ): """ Create multivariate distribution that only has non zero values in the covariance matrix on the diagonal. Args: out_channels: Number of parallel representations for each input feature. cardinality: Number of variables per gauss. in_features: Number of input features. dropout: Dropout probabilities. leaf_base_class (Leaf): The encapsulating base leaf layer class. """ super(IndependentMultivariate, self).__init__(in_features, out_channels, num_repetitions, dropout) if leaf_base_kwargs is None: leaf_base_kwargs = {} self.base_leaf = leaf_base_class( out_channels=out_channels, in_features=in_features, dropout=dropout, num_repetitions=num_repetitions, **leaf_base_kwargs, ) self._pad = (cardinality - self.in_features % cardinality) % cardinality # Number of input features for the product needs to be extended depending on the padding applied here prod_in_features = in_features + self._pad self.prod = Product(in_features=prod_in_features, cardinality=cardinality, num_repetitions=num_repetitions) self.cardinality = check_valid(cardinality, int, 1, in_features + 1) self.out_shape = f"(N, {self.prod._out_features}, {out_channels}, {self.num_repetitions})"
def assert_valid(self): """Check whether the configuration is valid.""" self.F = check_valid(self.F, int, 1) self.D = check_valid(self.D, int, 1) self.C = check_valid(self.C, int, 1) self.S = check_valid(self.S, int, 1) self.R = check_valid(self.R, int, 1) self.I = check_valid(self.I, int, 1) self.dropout = check_valid(self.dropout, float, 0.0, 1.0) assert self.leaf_base_class is not None, Exception( "RatSpnConfig.leaf_base_class parameter was not set!") assert isinstance(self.leaf_base_class, type) and issubclass( self.leaf_base_class, Leaf ), f"Parameter RatSpnConfig.leaf_base_class must be a subclass type of Leaf but was {self.leaf_base_class}." if 2**self.D > self.F: raise Exception( f"The tree depth D={self.D} must be <= {np.floor(np.log2(self.F))} (log2(in_features)." )
def test_valid(self): # Ints check_valid(0, int, 0) check_valid(np.int64(0), int, 0) check_valid(np.int32(0), int, 0) check_valid(np.int16(0), int, 0) check_valid(np.int8(0), int, 0) check_valid(torch.tensor(0).int(), int, 0) check_valid(torch.tensor(0).long(), int, 0) # Floats check_valid(1.0, float, 0) check_valid(np.float64(1.0), float, 0) check_valid(np.float32(1.0), float, 0) check_valid(np.float16(1.0), float, 0) check_valid(torch.tensor(1.0).half(), float, 0) check_valid(torch.tensor(1.0).float(), float, 0) check_valid(torch.tensor(1.0).double(), float, 0)
def __init__(self, in_features: int, num_repetitions: int = 1): super().__init__() self.in_features = check_valid(in_features, int, 1) self.num_repetitions = check_valid(num_repetitions, int, 1)