def __init__(self, distributions, in_features: int, out_channels, num_repetitions, dropout=0.0): """ Create a layer that stack multiple representations of a feature along the scope dimension. Args: distributions: List of possible distributions to represent the feature with. out_channels: out_channels of how many nodes each distribution is assigned to. in_features: Number of input features. """ super().__init__(in_features, out_channels, num_repetitions, dropout) # Build different layers for each distribution specified reprs = [ distr(in_features, out_channels, num_repetitions, dropout) for distr in distributions ] self.representations = nn.ModuleList(reprs) # Build sum layer as mixture of distributions self.sumlayer = Sum( in_features=in_features, in_channels=len(distributions) * out_channels, out_channels=out_channels, num_repetitions=num_repetitions, )
def _build(self): """Construct the internal architecture of the RatSpn.""" # Build the SPN bottom up: # Definition from RAT Paper # Leaf Region: Create I leaf nodes # Root Region: Create C sum nodes # Internal Region: Create S sum nodes # Partition: Cross products of all child-regions # Construct leaf self._leaf = self._build_input_distribution() # First product layer on top of leaf layer prodlayer = CrossProduct(in_features=2**self.config.D, in_channels=self.config.I, num_repetitions=self.config.R) self._inner_layers = nn.ModuleList() self._inner_layers.append(prodlayer) # Sum and product layers sum_in_channels = self.config.I**2 for i in np.arange(start=self.config.D - 1, stop=0, step=-1): # Current in_features in_features = 2**i # Sum layer sumlayer = Sum( in_features=in_features, in_channels=sum_in_channels, out_channels=self.config.S, dropout=self.config.dropout, num_repetitions=self.config.R, ) self._inner_layers.append(sumlayer) # Product layer prodlayer = CrossProduct(in_features=in_features, in_channels=self.config.S, num_repetitions=self.config.R) self._inner_layers.append(prodlayer) # Update sum_in_channels sum_in_channels = self.config.S**2 # Construct root layer self.root = Sum(in_channels=self.config.R * sum_in_channels, in_features=1, num_repetitions=1, out_channels=self.config.C) # Construct sampling root with weights according to priors for sampling self._sampling_root = Sum(in_channels=self.config.C, in_features=1, out_channels=1, num_repetitions=1) self._sampling_root.weights = nn.Parameter( torch.ones(size=(1, self.config.C, 1, 1)) * torch.tensor(1 / self.config.C), requires_grad=False)
def __init__(self, region_spns, C, S, I): """ Initialize the RAT SPN PyTorch module. Args: region_spns: Internal SPNs which correspond to random region splits. C: Number of classes. S: Number of sum nodes at each sum layer. I: Number of leaf nodes for each leaf region. """ super().__init__() self.C = C self._priors = nn.Parameter(torch.log(torch.tensor(1 / self.C)), requires_grad=False) self.region_spns = nn.ModuleList(region_spns) # Root in_channels = 0 for spn in region_spns: if spn.num_recursions > 1: in_channels += S**2 else: in_channels += I**2 self.root = Sum(in_channels=in_channels, in_features=1, out_channels=C) self.init_weights()
class Mixture(Leaf): def __init__(self, distributions, in_features: int, out_channels, num_repetitions, dropout=0.0): """ Create a layer that stack multiple representations of a feature along the scope dimension. Args: distributions: List of possible distributions to represent the feature with. out_channels: out_channels of how many nodes each distribution is assigned to. in_features: Number of input features. """ super().__init__(in_features, out_channels, num_repetitions, dropout) # Build different layers for each distribution specified reprs = [distr(in_features, out_channels, num_repetitions, dropout) for distr in distributions] self.representations = nn.ModuleList(reprs) # Build sum layer as mixture of distributions self.sumlayer = Sum( in_features=in_features, in_channels=len(distributions) * out_channels, out_channels=out_channels, num_repetitions=num_repetitions, ) def _get_base_distribution(self): raise Exception("Not implemented") def forward(self, x): results = [d(x) for d in self.representations] # Stack along output channel dimension x = torch.cat(results, dim=2) # Build mixture of different leafs per in_feature x = self.sumlayer(x) return x def sample(self, n: int = None, context: SamplingContext = None) -> torch.Tensor: # Sample from sum mixture layer context = self.sumlayer.sample(context=context) # Collect samples from different distribution layers samples = [] for d in self.representations: sample_d = d.sample(context=context) samples.append(sample_d) # Stack along channel dimension samples = torch.cat(samples, dim=2) # If parent index into out_channels are given if context.parent_indices is not None: # Choose only specific samples for each feature/scope samples = torch.gather(samples, dim=2, index=context.parent_indices.unsqueeze(-1)).squeeze(-1) return samples
def _build(self): # Build the SPN bottom up: # Definition from RAT Paper # Leaf Region: Create I leaf nodes # Root Region: Create C sum nodes # Internal Region: Create S sum nodes # Partition: Cross products of all child-regions ### LEAF ### # Cardinality is the size of the region in the last partitions cardinality = np.ceil( self.in_features / (self.num_parts**self.num_recursions)).astype(int) self._leaf = IndependentNormal(multiplicity=self.I, in_features=self.in_features, cardinality=cardinality, dropout=self.dropout) self._inner_layers = nn.Sequential() count = 0 prod = RatProduct(in_features=self.num_parts**self.num_recursions) self._inner_layers.add_module(f"Product-{count}", prod) count += 1 for i in np.arange(start=self.num_recursions - 1, stop=0, step=-1): is_lowest_sum_layer = i == self.num_recursions - 1 if is_lowest_sum_layer: # Output channels channels of product layer after leafs sum_in_channels = self.I**2 else: # Output channels of product layer after sums sum_in_channels = self.S**2 in_features = self.num_parts**i # Sum layer sumlayer = Sum(in_features=in_features, in_channels=sum_in_channels, out_channels=self.S) # Product layer prod = RatProduct(in_features=in_features) # Collect self._inner_layers.add_module(f"Sum-{count}", sumlayer) self._inner_layers.add_module(f"Product-{count}", prod) count += 1
if __name__ == "__main__": from spn.algorithms.layerwise.layers import Product, Sum # Setup I = 3 in_features = 2 num_repetitions = 1 batch_size = 1 # Leaf layer: DistributionsMixture dists = [Gamma, Beta, Chi2, Cauchy] leaf = Mixture(distributions=dists, in_features=in_features, out_channels=I, num_repetitions=num_repetitions) # Add further layers pro1 = Product(in_features=in_features, cardinality=in_features, num_repetitions=num_repetitions) sum1 = Sum(in_features=1, in_channels=I, out_channels=1, num_repetitions=1) # Random input x = torch.randn(batch_size, in_features) # Pass through leaf mixture layer x = leaf(x) # Check dimensions n, d, c, r = x.shape assert n == batch_size assert d == in_features assert c == I assert r == num_repetitions # Sample
AE_tr_err = train_ae(model_f, model_de, train_xs, train_xt, drift_num) def test_ae(model_f, model_de, test_x): model_f.eval() model_de.eval() cri = torch.nn.MSELoss() test_x = test_x.cuda() feature = model_f(test_x) output = model_de(feature) loss = cri(output, test_x) return loss.item() gauss = Normal(multiplicity=5, in_features=50) prod1 = Product(in_features=50, cardinality=5) sum1 = Sum(in_features=10, in_channels=5, out_channels=1) prod2 = Product(in_features=10, cardinality=10) spn = nn.Sequential(gauss, prod1, sum1, prod2).cuda() clipper = DistributionClipper() optimizer_spn = torch.optim.Adam(spn.parameters(), lr=0.001) optimizer_spn.zero_grad() #temp_loss = [] def train_spn(model_f, spn, train_x): model_f.eval() spn.train() if True: for t in range(200): for i in range(len(train_x)): data = train_x[i] data = data.cuda()
class RatSpn(nn.Module): """ RAT SPN PyTorch implementation with layer-wise tensors. See also: https://arxiv.org/abs/1806.01910 """ def __init__(self, config: RatSpnConfig): """ Create a RatSpn based on a configuration object. Args: config (RatSpnConfig): RatSpn configuration object. """ super().__init__() config.assert_valid() self.config = config # Construct the architecture self._build() # Initialize weights self._init_weights() # Obtain permutation indices self._make_random_repetition_permutation_indices() def _make_random_repetition_permutation_indices(self): """Create random permutation indices for each repetition.""" self.rand_indices = torch.empty(size=(self.config.F, self.config.R)) for r in range(self.config.R): # Each repetition has its own randomization self.rand_indices[:, r] = torch.tensor( np.random.permutation(self.config.F)) self.rand_indices = self.rand_indices.long() def _randomize(self, x: torch.Tensor) -> torch.Tensor: """ Randomize the input at each repetition according to `self.rand_indices`. Args: x: Input. Returns: torch.Tensor: Randomized input along feature axis. Each repetition has its own permutation. """ # Expand input to the number of repetitions x = x.unsqueeze(2) # Make space for repetition axis x = x.repeat((1, 1, self.config.R)) # Repeat R times # Random permutation for r in range(self.config.R): # Get permutation indices for the r-th repetition perm_indices = self.rand_indices[:, r] # Permute the features of the r-th version of x using the indices x[:, :, r] = x[:, perm_indices, r] return x def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass through RatSpn. Computes the conditional log-likelihood P(X | C). Args: x: Input. Returns: torch.Tensor: Conditional log-likelihood P(X | C) of the input. """ # Apply feature randomization for each repetition x = self._randomize(x) # Apply leaf distributions x = self._leaf(x) # Pass through intermediate layers x = self._forward_layers(x) # Merge results from the different repetitions into the channel dimension n, d, c, r = x.size() assert d == 1 # number of features should be 1 at this point x = x.view(n, d, c * r, 1) # Apply C sum node outputs x = self.root(x) # Remove repetition dimension x = x.squeeze(3) # Remove in_features dimension x = x.squeeze(1) return x def _forward_layers(self, x): """ Forward pass through the inner sum and product layers. Args: x: Input. Returns: torch.Tensor: Output of the last layer before the root layer. """ # Forward to inner product and sum layers for layer in self._inner_layers: x = layer(x) return x def _build(self): """Construct the internal architecture of the RatSpn.""" # Build the SPN bottom up: # Definition from RAT Paper # Leaf Region: Create I leaf nodes # Root Region: Create C sum nodes # Internal Region: Create S sum nodes # Partition: Cross products of all child-regions # Construct leaf self._leaf = self._build_input_distribution() # First product layer on top of leaf layer prodlayer = CrossProduct(in_features=2**self.config.D, in_channels=self.config.I, num_repetitions=self.config.R) self._inner_layers = nn.ModuleList() self._inner_layers.append(prodlayer) # Sum and product layers sum_in_channels = self.config.I**2 for i in np.arange(start=self.config.D - 1, stop=0, step=-1): # Current in_features in_features = 2**i # Sum layer sumlayer = Sum( in_features=in_features, in_channels=sum_in_channels, out_channels=self.config.S, dropout=self.config.dropout, num_repetitions=self.config.R, ) self._inner_layers.append(sumlayer) # Product layer prodlayer = CrossProduct(in_features=in_features, in_channels=self.config.S, num_repetitions=self.config.R) self._inner_layers.append(prodlayer) # Update sum_in_channels sum_in_channels = self.config.S**2 # Construct root layer self.root = Sum(in_channels=self.config.R * sum_in_channels, in_features=1, num_repetitions=1, out_channels=self.config.C) # Construct sampling root with weights according to priors for sampling self._sampling_root = Sum(in_channels=self.config.C, in_features=1, out_channels=1, num_repetitions=1) self._sampling_root.weights = nn.Parameter( torch.ones(size=(1, self.config.C, 1, 1)) * torch.tensor(1 / self.config.C), requires_grad=False) def _build_input_distribution(self): """Construct the input distribution layer.""" # Cardinality is the size of the region in the last partitions cardinality = np.ceil(self.config.F / (2**self.config.D)).astype(int) return IndependentMultivariate( in_features=self.config.F, out_channels=self.config.I, num_repetitions=self.config.R, cardinality=cardinality, dropout=self.config.dropout, leaf_base_class=self.config.leaf_base_class, leaf_base_kwargs=self.config.leaf_base_kwargs, ) @property def __device(self): """Small hack to obtain the current device.""" return self._sampling_root.weights.device def _init_weights(self): """Initiale the weights. Calls `_init_weights` on all modules that have this method.""" for module in self.modules(): if hasattr(module, "_init_weights") and module != self: module._init_weights() continue if isinstance(module, Sum): truncated_normal_(module.weights, std=0.5) continue def sample(self, n: int = None, class_index=None, evidence: torch.Tensor = None): """ Sample from the distribution represented by this SPN. Possible valid inputs: - `n`: Generates `n` samples. - `n` and `class_index (int)`: Generates `n` samples from P(X | C = class_index). - `class_index (List[int])`: Generates `len(class_index)` samples. Each index `c_i` in `class_index` is mapped to a sample from P(X | C = c_i) - `evidence`: If evidence is given, samples conditionally and fill NaN values. Args: n: Number of samples to generate. class_index: Class index. Can be either an int in combination with a value for `n` which will result in `n` samples from P(X | C = class_index). Or can be a list of ints which will map each index `c_i` in the list to a sample from P(X | C = c_i). evidence: Evidence that can be provided to condition the samples. If evidence is given, `n` and `class_index` must be `None`. Evidence must contain NaN values which will be imputed according to the distribution represented by the SPN. The result will contain the evidence and replace all NaNs with the sampled values. Returns: torch.Tensor: Samples generated according to the distribution specified by the SPN. """ assert class_index is None or evidence is None, "Cannot provide both, evidence and class indices." assert n is None or evidence is None, "Cannot provide both, number of samples to generate (n) and evidence." # Check if evidence contains nans if evidence is not None: assert (evidence != evidence).any(), "Evidence has no NaN values." # Set n to the number of samples in the evidence n = evidence.shape[0] with provide_evidence(self, evidence): # May be None but that's ok # If class is given, use it as base index if class_index is not None: if isinstance(class_index, list): indices = torch.tensor(class_index, device=self.__device).view(-1, 1) n = indices.shape[0] else: indices = torch.empty(size=(n, 1), device=self.__device) indices.fill_(class_index) # Create new sampling context ctx = SamplingContext(n=n, parent_indices=indices, repetition_indices=None) else: # Start sampling one of the C root nodes TODO: check what happens if C=1 ctx = self._sampling_root.sample(n=n) # Sample from RatSpn root layer: Results are indices into the stacked output channels of all repetitions ctx.repetition_indices = torch.zeros(n, dtype=int, device=self.__device) ctx = self.root.sample(context=ctx) # Indexes will now point to the stacked channels of all repetitions (R * S^2 (if D > 1) # or R * I^2 (else)). root_in_channels = self.root.in_channels // self.config.R # Obtain repetition indices ctx.repetition_indices = (ctx.parent_indices // root_in_channels).squeeze(1) # Shift indices ctx.parent_indices = ctx.parent_indices % root_in_channels # Now each sample in `indices` belongs to one repetition, index in `repetition_indices` # Continue at layers # Sample inner modules for layer in reversed(self._inner_layers): if isinstance(layer, Sum): ctx = layer.sample(context=ctx) elif isinstance(layer, CrossProduct): ctx = layer.sample(context=ctx) else: raise Exception( "Only Sum or CrossProduct is allowed as intermediate layer." ) # Sample leaf samples = self._leaf.sample(context=ctx) # Invert permutation for i in range(n): rep_index = ctx.repetition_indices[i] inv_rand_indices = invert_permutation( self.rand_indices[:, rep_index]) samples[i, :] = samples[i, inv_rand_indices] if evidence is not None: # Update NaN entries in evidence with the sampled values nan_indices = torch.isnan(evidence) # First make a copy such that the original object is not changed evidence = evidence.clone() evidence[nan_indices] = samples[nan_indices] return evidence else: return samples