Exemple #1
0
 def __init__(self, shape_xs, shape_ys,
     activation=hypothesis.default.activation,
     dropout=0.0,
     layers=(128, 128),
     transform_output="normalize"):
     super(MultiLayerPerceptron, self).__init__()
     mappings = []
     dropout = float(dropout)
     # Dimensionality properties
     self.xs_dimensionality = compute_dimensionality(shape_xs)
     self.ys_dimensionality = compute_dimensionality(shape_ys)
     # Allocate input mapping
     mappings.append(torch.nn.Linear(self.xs_dimensionality, layers[0]))
     # Allocate internal network structure
     for index in range(1, len(layers)):
         mappings.append(self._make_layer(activation, dropout,
             layers[index - 1], layers[index]))
     # Allocate tail
     mappings.append(activation(inplace=True))
     mappings.append(torch.nn.Linear(layers[-1], self.ys_dimensionality))
     operation = allocate_output_transform(transform_output, self.ys_dimensionality)
     if operation is not None:
         mappings.append(operation)
     # Allocate sequential mapping
     self.mapping = torch.nn.Sequential(*mappings)
Exemple #2
0
 def __init__(
         self,
         shape_inputs,
         shape_outputs,
         depth=18,  # Default ResNet depth
         activation=hypothesis.default.activation,
         channels=3,
         batchnorm=True,
         convolution_bias=False,
         dilate=False,
         in_planes=64,
         trunk=hypothesis.default.trunk,
         trunk_dropout=hypothesis.default.dropout):
     # Update dimensionality data
     self.shape_inputs = shape_inputs
     self.dimensionality_inputs = compute_dimensionality(shape_inputs)
     self.shape_outputs = shape_outputs
     self.dimensionality_outputs = compute_dimensionality(shape_outputs)
     ResNet.__init__(self,
                     depth=depth,
                     shape_xs=shape_outputs,
                     shape_ys=(1, ),
                     activation=activation,
                     batchnorm=batchnorm,
                     channels=channels,
                     convolution_bias=convolution_bias,
                     dilate=dilate,
                     in_planes=in_planes,
                     trunk=trunk,
                     trunk_dropout=trunk_dropout,
                     ys_transform=None)
     ConditionalRatioEstimator.__init__(self)
Exemple #3
0
 def __init__(self,
     shape_inputs,
     shape_outputs,
     activation=hypothesis.default.activation,
     batchnorm=True,
     channels=3,
     dense_dropout=hypothesis.default.dropout,
     depth=121, # Default DenseNet configuration
     trunk=hypothesis.default.trunk,
     trunk_dropout=hypothesis.default.dropout):
     self.shape_inputs = shape_inputs
     self.dimensionality_inputs = compute_dimensionality(shape_inputs)
     self.shape_outputs = shape_outputs
     self.dimensionality_outputs = compute_dimensionality(shape_outputs)
     DenseNet.__init__(
         activation=activation,
         batchnorm=batchnorm,
         channels=channels,
         dense_dropout=dense_dropout,
         dense_trunk=dense_trunk,
         depth=depth,
         shape_xs=shape_outputs,
         shape_ys=(1,),
         trunk=trunk,
         ys_transform=None)
     ConditionalRatioEstimator.__init__(self)
Exemple #4
0
 def __init__(self,
              shape_inputs,
              shape_outputs,
              activation=hypothesis.default.activation,
              dropout=hypothesis.default.dropout,
              layers=hypothesis.default.trunk):
     super(MutualInformationRatioEstimatorMLP, self).__init__()
     dimensionality = compute_dimensionality(
         shape_inputs) + compute_dimensionality(shape_outputs)
     self.mlp = MultiLayeredPerceptron(shape_xs=(dimensionality, ),
                                       shape_ys=(1, ),
                                       activation=activation,
                                       dropout=dropout,
                                       layers=layers,
                                       transform_output=None)
 def __init__(self,
              shape_inputs,
              shape_outputs,
              activation=hypothesis.default.activation,
              dropout=0.0,
              layers=(128, 128)):
     super(ConditionalMLPRatioEstimator, self).__init__()
     self.dimensionality_inputs = compute_dimensionality(shape_inputs)
     self.dimensionality_outputs = compute_dimensionality(shape_outputs)
     shared_dimensionality = self.dimensionality_inputs + self.dimensionality_outputs
     self.mlp = MLP(shape_xs=(shared_dimensionality, ),
                    shape_ys=(1, ),
                    activation=activation,
                    dropout=dropout,
                    layers=layers,
                    transform_output=None)
Exemple #6
0
def build_ratio_estimator(random_variables):
    # Flatten the shapes of the random variables
    for k in random_variables.keys():
        shape = random_variables[k]
        flattened_shape = (compute_dimensionality(shape), )
        random_variables[k] = flattened_shape

    class RatioEstimator(BaseRatioEstimator):
        def __init__(self,
                     activation=hypothesis.default.activation,
                     dropout=hypothesis.default.dropout,
                     layers=hypothesis.default.trunk):
            super(RatioEstimator, self).__init__()
            shape_xs = (sum([
                compute_dimensionality(shape)
                for shape in random_variables.values()
            ]), )
            self.mlp = MLP(activation=activation,
                           dropout=dropout,
                           layers=layers,
                           shape_xs=shape_xs,
                           shape_ys=(1, ),
                           transform_output=None)

        def log_ratio(self, **kwargs):
            tensors = [
                kwargs[k].view(-1, random_variables[k][0])
                for k in random_variables
            ]
            z = torch.cat(tensors, dim=1)
            log_ratios = self.mlp(z)

            return log_ratios

    return RatioEstimator
Exemple #7
0
 def __init__(self,
              activation=hypothesis.default.activation,
              dropout=hypothesis.default.dropout,
              layers=hypothesis.default.trunk):
     super(RatioEstimator, self).__init__()
     shape_xs = (sum([
         compute_dimensionality(shape)
         for shape in random_variables.values()
     ]), )
     self.mlp = MLP(activation=activation,
                    dropout=dropout,
                    layers=layers,
                    shape_xs=shape_xs,
                    shape_ys=(1, ),
                    transform_output=None)
Exemple #8
0
 def __init__(self,
              activation=hypothesis.default.activation,
              batchnorm=default_batchnorm,
              channels=default_channels,
              convolution_bias=default_convolution_bias,
              depth=depth,
              dilate=default_dilate,
              groups=default_groups,
              in_planes=default_in_planes,
              trunk_activation=None,
              trunk_dropout=hypothesis.default.dropout,
              trunk_layers=hypothesis.default.trunk,
              width_per_group=default_width_per_group):
     super(RatioEstimator, self).__init__()
     # Construct the convolutional ResNet head.
     self.head = ResNetHead(
         activation=hypothesis.default.activation,
         batchnorm=batchnorm,
         channels=channels,
         convolution_bias=convolution_bias,
         depth=depth,
         dilate=dilate,
         groups=groups,
         in_planes=in_planes,
         shape_xs=random_variables[convolve_variable],
         width_per_group=width_per_group)
     # Check if custom trunk settings have been defined.
     if trunk_activation is None:
         trunk_activation = activation
     # Construct the trunk of the network.
     self.embedding_dimensionality = self.head.embedding_dimensionality(
     )
     dimensionality = self.embedding_dimensionality + sum([
         compute_dimensionality(random_variables[k])
         for k in trunk_random_variables
     ])
     self.trunk = MLP(shape_xs=(dimensionality, ),
                      shape_ys=(1, ),
                      activation=trunk_activation,
                      dropout=trunk_dropout,
                      layers=trunk_layers,
                      transform_output=None)
Exemple #9
0
    def _build_trunk(self, trunk, dropout, transform_output):
        mappings = []

        # Build trunk
        mappings.append(torch.nn.Linear(self.embedding_dim, trunk[0]))
        for index in range(1, len(trunk)):
            mappings.append(self.module_activation(inplace=True))
            if dropout > 0:
                mappings.append(torch.nn.Dropout(p=dropout))
            mappings.append(torch.nn.Linear(trunk[index - 1], trunk[index]))
        # Compute output dimensionality
        output_shape = compute_dimensionality(self.shape_ys)
        # Add final fully connected mapping
        mappings.append(torch.nn.Linear(trunk[-1], output_shape))
        # Add output normalization
        output_mapping = allocate_output_transform(transform_output, output_shape)
        if output_mapping is not None:
            mappings.append(output_mapping)

        return torch.nn.Sequential(*mappings)
Exemple #10
0
 def __init__(self,
     shape_inputs,
     shape_outputs,
     activation=hypothesis.default.activation,
     batchnorm=default_batchnorm,
     channels=default_channels,
     convolution_bias=default_convolution_bias,
     depth=default_depth,
     dilate=default_dilate,
     groups=default_groups,
     in_planes=default_in_planes,
     width_per_group=default_width_per_group,
     trunk_activation=None,
     trunk_dropout=hypothesis.default.dropout,
     trunk_layers=hypothesis.default.trunk):
     super(LikelihoodToEvidenceRatioEstimatorResNet, self).__init__()
     # Construct the convolutional ResNet head.
     self.head = ResNetHead(
         activation=hypothesis.default.activation,
         batchnorm=batchnorm,
         channels=channels,
         convolution_bias=convolution_bias,
         depth=depth,
         dilate=dilate,
         groups=groups,
         in_planes=in_planes,
         shape_xs=shape_xs,
         width_per_group=width_per_group)
     # Check if custom trunk settings have been defined.
     if trunk_activation is None:
         trunk_activation = activation
     # Construct the trunk of the network.
     dimensionality = self.head.embedding_dimensionality() + compute_dimensionality(shape_inputs)
     self.trunk = MLP(
         shape_xs=(dimensionality,),
         shape_ys=(1,),
         activation=trunk_activation,
         dropout=trunk_dropout,
         layers=trunk_layers,
         transform_output=None)
Exemple #11
0
 def __init__(
         self,
         shape_inputs,
         shape_outputs,
         activation=hypothesis.default.activation,
         batchnorm=hypothesis.nn.densenet.default.batchnorm,
         bottleneck_factor=hypothesis.nn.densenet.default.bottleneck_factor,
         channels=hypothesis.nn.densenet.default.channels,
         convolution_bias=hypothesis.nn.densenet.default.convolution_bias,
         depth=hypothesis.nn.densenet.default.depth,
         dropout=hypothesis.default.dropout,
         trunk_activation=None,
         trunk_dropout=None,
         trunk_layers=hypothesis.default.trunk):
     super(LikelihoodToEvidenceRatioEstimatorDenseNet, self).__init__()
     # Construct the convolutional DenseNet head.
     self.head = DenseNetHead(activation=activation,
                              batchnorm=batchnorm,
                              bottleneck_factor=bottleneck_factor,
                              channels=channels,
                              convolution_bias=convolution_bias,
                              depth=depth,
                              dropout=dropout,
                              shape_xs=shape_outputs)
     # Compute the embedding dimensionality of the head.
     embedding_dim = self.head.embedding_dimensionality()
     # Check if custom trunk settings have been defined.
     if trunk_activation is None:
         trunk_activation = activation
     if trunk_dropout is None:
         trunk_dropout = dropout
     # Allocate the trunk.
     latent_dimensionality = compute_dimensionality(
         shape_inputs) + self.head.embedding_dimensionality()
     self.trunk = MultiLayeredPerceptron(shape_xs=(latent_dimensionality, ),
                                         shape_ys=(1, ),
                                         activation=trunk_activation,
                                         dropout=trunk_dropout,
                                         layers=trunk_layers,
                                         transform_output=None)
Exemple #12
0
def build_ratio_estimator(random_variables, **kwargs):
    depth = kwargs.get("depth", default_depth)
    convolve_variable = kwargs.get("convolve", "outputs")
    trunk_variables = set(random_variables.keys()) - set([convolve_variable])
    trunk_random_variables = {}
    for k in trunk_variables:
        trunk_random_variables[k] = (-1,
                                     compute_dimensionality(
                                         random_variables[k]))
    if convolve_variable not in random_variables.keys():
        raise ValueError(
            "No convolution random variable specified (default: outputs)!")

    class RatioEstimator(BaseRatioEstimator):
        def __init__(self,
                     activation=hypothesis.default.activation,
                     batchnorm=default_batchnorm,
                     channels=default_channels,
                     convolution_bias=default_convolution_bias,
                     depth=depth,
                     dilate=default_dilate,
                     groups=default_groups,
                     in_planes=default_in_planes,
                     trunk_activation=None,
                     trunk_dropout=hypothesis.default.dropout,
                     trunk_layers=hypothesis.default.trunk,
                     width_per_group=default_width_per_group):
            super(RatioEstimator, self).__init__()
            # Construct the convolutional ResNet head.
            self.head = ResNetHead(
                activation=hypothesis.default.activation,
                batchnorm=batchnorm,
                channels=channels,
                convolution_bias=convolution_bias,
                depth=depth,
                dilate=dilate,
                groups=groups,
                in_planes=in_planes,
                shape_xs=random_variables[convolve_variable],
                width_per_group=width_per_group)
            # Check if custom trunk settings have been defined.
            if trunk_activation is None:
                trunk_activation = activation
            # Construct the trunk of the network.
            self.embedding_dimensionality = self.head.embedding_dimensionality(
            )
            dimensionality = self.embedding_dimensionality + sum([
                compute_dimensionality(random_variables[k])
                for k in trunk_random_variables
            ])
            self.trunk = MLP(shape_xs=(dimensionality, ),
                             shape_ys=(1, ),
                             activation=trunk_activation,
                             dropout=trunk_dropout,
                             layers=trunk_layers,
                             transform_output=None)

        def log_ratio(self, **kwargs):
            z_head = self.head(kwargs[convolve_variable]).view(
                -1, self.embedding_dimensionality)
            tensors = [
                kwargs[k].view(v) for k, v in trunk_random_variables.items()
            ]
            tensors.append(z_head)
            features = torch.cat(tensors, dim=1)
            log_ratios = self.trunk(features)

            return log_ratios

    return RatioEstimator