def create_linear_transform(linear_transform, features): """Function for creating linear transforms. Parameters ---------- linear_transform : {'permutation', 'lu', 'svd'} Linear transform to use. featres : int Number of features. """ if linear_transform.lower() == 'permutation': return transforms.RandomPermutation(features=features) elif linear_transform.lower() == 'lu': return transforms.CompositeTransform([ transforms.RandomPermutation(features=features), LULinear(features, identity_init=True, using_cache=True) ]) elif linear_transform.lower() == 'svd': return transforms.CompositeTransform([ transforms.RandomPermutation(features=features), transforms.SVDLinear(features, num_householder=10, identity_init=True) ]) else: raise ValueError(f'Unknown linear transform: {linear_transform}. ' 'Choose from: {permutation, lu, svd}.')
def _make_scalar_linear_transform(transform, features): if transform == "permutation": return transforms.RandomPermutation(features=features) elif transform == "lu": return transforms.CompositeTransform( [transforms.RandomPermutation(features=features), transforms.LULinear(features, identity_init=True)] ) elif transform == "svd": return transforms.CompositeTransform( [ transforms.RandomPermutation(features=features), transforms.SVDLinear(features, num_householder=10, identity_init=True), ] ) else: raise ValueError
def __init__( self, x_size: int, y_size: int, arch: str = 'A', # ['PRQ', 'UMNN'] num_transforms: int = 5, lu_linear: bool = False, moments: Tuple[torch.Tensor, torch.Tensor] = None, **kwargs, ): kwargs.setdefault('hidden_features', 64) kwargs.setdefault('num_blocks', 2) kwargs.setdefault('use_residual_blocks', False) kwargs.setdefault('use_batch_norm', False) kwargs['activation'] = ACTIVATIONS[kwargs.get('activation', 'ReLU')]() if arch == 'PRQ': kwargs['tails'] = 'linear' kwargs.setdefault('num_bins', 8) kwargs.setdefault('tail_bound', 1.) tfrm = transforms.MaskedPiecewiseRationalQuadraticAutoregressiveTransform elif arch == 'UMNN': kwargs.setdefault('integrand_net_layers', [64, 64, 64]) kwargs.setdefault('cond_size', 32) kwargs.setdefault('nb_steps', 32) tfrm = transforms.MaskedUMNNAutoregressiveTransform else: # arch == 'A' tfrm = transforms.MaskedAffineAutoregressiveTransform compose = [] if moments is not None: shift, scale = moments compose.append( transforms.PointwiseAffineTransform(-shift / scale, 1 / scale)) for _ in range(num_transforms if x_size > 1 else 1): compose.extend([ tfrm( features=x_size, context_features=y_size, **kwargs, ), transforms.RandomPermutation(features=x_size), ]) if lu_linear: compose.append(transforms.LULinear(x_size, identity_init=True), ) transform = transforms.CompositeTransform(compose) distribution = distributions.StandardNormal((x_size, )) super().__init__(transform, distribution)
def create_flow(flow_type): distribution = distributions.StandardNormal((3,)) if flow_type == 'lu_flow': transform = transforms.CompositeTransform([ transforms.RandomPermutation(3), transforms.LULinear(3, identity_init=False) ]) elif flow_type == 'qr_flow': transform = transforms.QRLinear(3, num_householder=3) else: raise RuntimeError('Unknown type') return flows.Flow(transform, distribution)
def create_linear_transform(param_dim): """Create the composite linear transform PLU. Arguments: input_dim {int} -- dimension of the space Returns: Transform -- nde.Transform object """ return transforms.CompositeTransform([ transforms.RandomPermutation(features=param_dim), transforms.LULinear(param_dim, identity_init=True) ])
def make_flow(latent_dim, n_layers): transform_list = [BatchNormTransform(latent_dim)] for _ in range(n_layers): transform_list.extend( [ transforms.MaskedAffineAutoregressiveTransform( features=latent_dim, hidden_features=64, ), transforms.RandomPermutation(latent_dim), ] ) transform = transforms.CompositeTransform(transform_list) # Define a base distribution. base_distribution = distributions.StandardNormal(shape=[latent_dim]) # Combine into a flow. return flows.Flow(transform=transform, distribution=base_distribution)
def _create_linear_transform(self): return transforms.CompositeTransform([ transforms.RandomPermutation(features=self.dimensions), transforms.LULinear(self.dimensions, identity_init=True) ])
def neural_net_nsf( self, hidden_features, num_blocks, num_bins, xDim, thetaDim, batch_x=None, batch_theta=None, tail=3., bounded=False, embedding_net=torch.nn.Identity()) -> torch.nn.Module: """Builds NSF p(x|y). Args: batch_x: Batch of xs, used to infer dimensionality and (optional) z-scoring. batch_y: Batch of ys, used to infer dimensionality and (optional) z-scoring. z_score_x: Whether to z-score xs passing into the network. z_score_y: Whether to z-score ys passing into the network. hidden_features: Number of hidden features. num_transforms: Number of transforms. embedding_net: Optional embedding network for y. kwargs: Additional arguments that are passed by the build function but are not relevant for maf and are therefore ignored. Returns: Neural network. """ basic_transform = [ transforms.CompositeTransform([ transforms.PiecewiseRationalQuadraticCouplingTransform( mask=create_alternating_binary_mask(features=xDim, even=(i % 2 == 0)).to( self.args.device), transform_net_create_fn=lambda in_features, out_features: nets. ResidualNet( in_features=in_features, out_features=out_features, hidden_features=hidden_features, context_features=thetaDim, num_blocks=2, activation=torch.relu, dropout_probability=0., use_batch_norm=False, ), num_bins=num_bins, tails='linear', tail_bound=tail, apply_unconditional_transform=False, ), transforms.RandomPermutation(features=xDim, device=self.args.device), transforms.LULinear(xDim, identity_init=True), ]) for i in range(num_blocks) ] transform = transforms.CompositeTransform(basic_transform).to( self.args.device) if batch_theta != None: if bounded: transform_bounded = transforms.Logit(self.args.device) if self.sim.min[0].item() != 0 or self.sim.max[0].item() != 1: transfomr_affine = transforms.PointwiseAffineTransform( shift=-self.sim.min / (self.sim.max - self.sim.min), scale=1. / (self.sim.max - self.sim.min)) transform = transforms.CompositeTransform( [transfomr_affine, transform_bounded, transform]) else: transform = transforms.CompositeTransform( [transform_bounded, transform]) else: transform_zx = standardizing_transform(batch_x) transform = transforms.CompositeTransform( [transform_zx, transform]) embedding_net = torch.nn.Sequential(standardizing_net(batch_theta), embedding_net) distribution = distributions_.StandardNormal((xDim, ), self.args.device) neural_net = flows.Flow(self, transform, distribution, embedding_net=embedding_net).to( self.args.device) else: distribution = distributions_.StandardNormal((xDim, ), self.args.device) neural_net = flows.Flow(self, transform, distribution).to(self.args.device) return neural_net
def get_flow( model: str, dim_distribution: int, dim_context: Optional[int] = None, embedding: Optional[torch.nn.Module] = None, hidden_features: int = 50, made_num_mixture_components: int = 10, made_num_blocks: int = 4, flow_num_transforms: int = 5, mean=0.0, std=1.0, ) -> torch.nn.Module: """Density estimator Args: model: Model, one of maf / made / nsf dim_distribution: Dim of distribution dim_context: Dim of context embedding: Embedding network hidden_features: For all, number of hidden features made_num_mixture_components: For MADEs only, number of mixture components made_num_blocks: For MADEs only, number of blocks flow_num_transforms: For flows only, number of transforms mean: For normalization std: For normalization Returns: Neural network """ standardizing_transform = transforms.AffineTransform(shift=-mean / std, scale=1 / std) features = dim_distribution context_features = dim_context if model == "made": transform = standardizing_transform distribution = distributions_.MADEMoG( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=made_num_blocks, num_mixture_components=made_num_mixture_components, use_residual_blocks=True, random_mask=False, activation=torch.relu, dropout_probability=0.0, use_batch_norm=False, custom_initialization=True, ) neural_net = flows.Flow(transform, distribution, embedding) elif model == "maf": transform = transforms.CompositeTransform([ transforms.CompositeTransform([ transforms.MaskedAffineAutoregressiveTransform( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=2, use_residual_blocks=False, random_mask=False, activation=torch.tanh, dropout_probability=0.0, use_batch_norm=True, ), transforms.RandomPermutation(features=features), ]) for _ in range(flow_num_transforms) ]) transform = transforms.CompositeTransform( [standardizing_transform, transform]) distribution = distributions_.StandardNormal((features, )) neural_net = flows.Flow(transform, distribution, embedding) elif model == "nsf": transform = transforms.CompositeTransform([ transforms.CompositeTransform([ transforms.PiecewiseRationalQuadraticCouplingTransform( mask=create_alternating_binary_mask(features=features, even=(i % 2 == 0)), transform_net_create_fn=lambda in_features, out_features: nets.ResidualNet( in_features=in_features, out_features=out_features, hidden_features=hidden_features, context_features=context_features, num_blocks=2, activation=torch.relu, dropout_probability=0.0, use_batch_norm=False, ), num_bins=10, tails="linear", tail_bound=3.0, apply_unconditional_transform=False, ), transforms.LULinear(features, identity_init=True), ]) for i in range(flow_num_transforms) ]) transform = transforms.CompositeTransform( [standardizing_transform, transform]) distribution = distributions_.StandardNormal((features, )) neural_net = flows.Flow(transform, distribution, embedding) elif model == "nsf_bounded": transform = transforms.CompositeTransform([ transforms.CompositeTransform([ transforms.PiecewiseRationalQuadraticCouplingTransform( mask=create_alternating_binary_mask( features=dim_distribution, even=(i % 2 == 0)), transform_net_create_fn=lambda in_features, out_features: nets.ResidualNet( in_features=in_features, out_features=out_features, hidden_features=hidden_features, context_features=context_features, num_blocks=2, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, ), num_bins=10, tails="linear", tail_bound=np.sqrt( 3), # uniform with sqrt(3) bounds has unit-variance apply_unconditional_transform=False, ), transforms.RandomPermutation(features=dim_distribution), ]) for i in range(flow_num_transforms) ]) transform = transforms.CompositeTransform( [standardizing_transform, transform]) distribution = StandardUniform(shape=(dim_distribution, )) neural_net = flows.Flow(transform, distribution, embedding) else: raise ValueError return neural_net