Esempio n. 1
0
    def __init__(self, **params):
        super().__init__()

        filename = params.get('filename', None)

        # New MAF setup
        if filename is None:
            # Parameters for new MAF
            n_parameters = params['n_parameters']
            n_observables = params['n_observables']
            n_mades = params.get('n_mades', 2)
            n_made_hidden_layers = params.get('n_made_hidden_layers', 2)
            n_made_units_per_layer = params.get('n_made_units_per_layer', 20)
            activation = params.get('activation', 'relu')
            batch_norm = params.get('batch_norm', False)

            logging.info('Initialized NDE (MAF) with the following settings:')
            logging.info('  Parameters:    %s', n_parameters)
            logging.info('  Observables:   %s', n_observables)
            logging.info('  MADEs:         %s', n_mades)
            logging.info('  Hidden layers: %s', n_made_hidden_layers)
            logging.info('  Units:         %s', n_made_units_per_layer)
            logging.info('  Activation:    %s', activation)
            logging.info('  Batch norm:    %s', batch_norm)

            # MAF
            self.maf = ConditionalMaskedAutoregressiveFlow(
                n_conditionals=n_parameters,
                n_inputs=n_observables,
                n_hiddens=tuple([n_made_units_per_layer] * n_made_hidden_layers),
                n_mades=n_mades,
                activation=activation,
                batch_norm=batch_norm,
                input_order='sequential',
                mode='sequential',
                alpha=0.1
            )

        # Load trained model from file
        else:
            self.maf = torch.load(filename + '.pt')

            # Have everything on CPU (unless training)
            device = torch.device("cpu")
            self.maf = self.maf.to(device)

            logging.info('Loaded NDE (MAF) from file:')
            logging.info('  Filename:      %s', filename)
            logging.info('  Parameters:    %s', self.maf.n_conditionals)
            logging.info('  Observables:   %s', self.maf.n_inputs)
            logging.info('  MADEs:         %s', self.maf.n_mades)
            logging.info('  Hidden layers: %s', self.maf.n_hiddens)
            logging.info('  Activation:    %s', self.maf.activation)
            logging.info('  Batch norm:    %s', self.maf.batch_norm)
Esempio n. 2
0
class SCANDALInference(Inference):
    """ Neural conditional density estimation with masked autoregressive flows. """

    def __init__(self, **params):
        super().__init__()

        filename = params.get('filename', None)

        # New MAF setup
        if filename is None:
            # Parameters for new MAF
            n_parameters = params['n_parameters']
            n_observables = params['n_observables']
            n_mades = params.get('n_mades', 2)
            n_made_hidden_layers = params.get('n_made_hidden_layers', 2)
            n_made_units_per_layer = params.get('n_made_units_per_layer', 20)
            activation = params.get('activation', 'relu')
            batch_norm = params.get('batch_norm', False)

            logging.info('Initialized NDE (MAF) with the following settings:')
            logging.info('  Parameters:    %s', n_parameters)
            logging.info('  Observables:   %s', n_observables)
            logging.info('  MADEs:         %s', n_mades)
            logging.info('  Hidden layers: %s', n_made_hidden_layers)
            logging.info('  Units:         %s', n_made_units_per_layer)
            logging.info('  Activation:    %s', activation)
            logging.info('  Batch norm:    %s', batch_norm)

            # MAF
            self.maf = ConditionalMaskedAutoregressiveFlow(
                n_conditionals=n_parameters,
                n_inputs=n_observables,
                n_hiddens=tuple([n_made_units_per_layer] * n_made_hidden_layers),
                n_mades=n_mades,
                activation=activation,
                batch_norm=batch_norm,
                input_order='sequential',
                mode='sequential',
                alpha=0.1
            )

        # Load trained model from file
        else:
            self.maf = torch.load(filename + '.pt')

            # Have everything on CPU (unless training)
            device = torch.device("cpu")
            self.maf = self.maf.to(device)

            logging.info('Loaded NDE (MAF) from file:')
            logging.info('  Filename:      %s', filename)
            logging.info('  Parameters:    %s', self.maf.n_conditionals)
            logging.info('  Observables:   %s', self.maf.n_inputs)
            logging.info('  MADEs:         %s', self.maf.n_mades)
            logging.info('  Hidden layers: %s', self.maf.n_hiddens)
            logging.info('  Activation:    %s', self.maf.activation)
            logging.info('  Batch norm:    %s', self.maf.batch_norm)

    def requires_class_label(self):
        return False

    def requires_joint_ratio(self):
        return False

    def requires_joint_score(self):
        return True

    def fit(self,
            theta=None,
            x=None,
            y=None,
            r_xz=None,
            t_xz=None,
            batch_size=64,
            initial_learning_rate=0.001,
            final_learning_rate=0.0001,
            n_epochs=50,
            early_stopping=True,
            alpha=0.01,
            learning_curve_folder=None,
            learning_curve_filename=None,
            **params):
        """ Trains MAF """

        logging.info('Training SCANDAL (MAF + score) with settings:')
        logging.info('  alpha:          %s', alpha)
        logging.info('  theta given:    %s', theta is not None)
        logging.info('  x given:        %s', x is not None)
        logging.info('  y given:        %s', y is not None)
        logging.info('  r_xz given:     %s', r_xz is not None)
        logging.info('  t_xz given:     %s', t_xz is not None)
        logging.info('  Samples:        %s', x.shape[0])
        logging.info('  Parameters:     %s', theta.shape[1])
        logging.info('  Obserables:     %s', x.shape[1])
        logging.info('  Batch size:     %s', batch_size)
        logging.info('  Learning rate:  %s initially, decaying to %s', initial_learning_rate, final_learning_rate)
        logging.info('  Early stopping: %s', early_stopping)
        logging.info('  Epochs:         %s', n_epochs)

        assert theta is not None
        assert x is not None
        assert t_xz is not None

        train(
            model=self.maf,
            loss_functions=[negative_log_likelihood, score_mse],
            loss_weights=[1., alpha],
            loss_labels=['nll', 'score'],
            thetas=theta,
            xs=x,
            t_xzs=t_xz,
            batch_size=batch_size,
            initial_learning_rate=initial_learning_rate,
            final_learning_rate=final_learning_rate,
            n_epochs=n_epochs,
            early_stopping=early_stopping,
            learning_curve_folder=learning_curve_folder,
            learning_curve_filename=learning_curve_filename
        )

    def save(self, filename):
        torch.save(self.maf, filename + '.pt')

    def predict_density(self, theta, x, log=False):
        log_likelihood = self.maf.predict_log_likelihood(tensor(theta), tensor(x)).detach().numpy()

        if log:
            return log_likelihood
        return np.exp(log_likelihood)

    def predict_ratio(self, theta0, theta1, x, log=False):
        log_likelihood_theta0 = self.maf.predict_log_likelihood(tensor(theta0), tensor(x)).detach().numpy()
        log_likelihood_theta1 = self.maf.predict_log_likelihood(tensor(theta1), tensor(x)).detach().numpy()

        if log:
            return log_likelihood_theta0 - log_likelihood_theta1
        return np.exp(log_likelihood_theta0 - log_likelihood_theta1)

    def predict_score(self, theta, x):
        score = self.maf.predict_score(tensor(theta), tensor(x)).detach().numpy()

        return score

    def generate_samples(self, theta):
        samples = self.maf.generate_samples(theta).detach().numpy()
        return samples
Esempio n. 3
0
    def __init__(self, **params):
        super().__init__()

        filename = params.get('filename', None)

        if filename is None:

            # Parameters
            n_parameters = params['n_parameters']
            n_observables = params['n_observables']
            n_latent = params['n_latent']

            n_components = params.get('n_components', 1)
            n_mades = params.get('n_mades', 2)
            n_made_hidden_layers = params.get('n_made_hidden_layers', 2)
            n_made_units_per_layer = params.get('n_made_units_per_layer', 20)
            activation = params.get('activation', 'relu')
            batch_norm = params.get('batch_norm', False)

            n_step_hidden_layers = params.get('n_step_hidden_layers', 2)
            n_step_units_per_layer = params.get('n_step_units_per_layer', 20)
            step_activation = params.get('step_activation', 'relu')

            logging.info(
                'Initialized checkpointed NDE (MAF) with the following settings:'
            )
            logging.info('  Parameters:        %s', n_parameters)
            logging.info('  Observables:       %s', n_observables)
            logging.info('  Latent vars:       %s', n_latent)
            logging.info('  Checkpoint score estimator:')
            logging.info('    Hidden layers:   %s', n_step_hidden_layers)
            logging.info('    Units:           %s', n_step_units_per_layer)
            logging.info('    Activation:      %s', step_activation)
            logging.info('  Global flow:')
            logging.info('    Base components: %s', n_components)
            logging.info('    MADEs:           %s', n_mades)
            logging.info('    Hidden layers:   %s', n_made_hidden_layers)
            logging.info('    Units:           %s', n_made_units_per_layer)
            logging.info('    Activation:      %s', activation)
            logging.info('    Batch norm:      %s', batch_norm)

            # Step model
            self.step_model = CheckpointScoreEstimator(
                n_parameters=n_parameters,
                n_latent=n_latent,
                n_hidden=tuple([n_step_units_per_layer] *
                               n_step_hidden_layers),
                activation=step_activation)

            # Global model
            if n_components is not None and n_components > 1:
                self.global_model = ConditionalMixtureMaskedAutoregressiveFlow(
                    n_components=n_components,
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)
            else:
                self.global_model = ConditionalMaskedAutoregressiveFlow(
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)

        else:
            self.step_model = torch.load(filename + '_step.pt',
                                         map_location='cpu')
            self.global_model = torch.load(filename + '_global.pt',
                                           map_location='cpu')

            logging.info('Loaded checkpointed SCANDAL from file:')
            logging.info('  Filename:        %s', filename)
            logging.info('  Checkpoint score estimator:')
            logging.info('    Hidden layers: %s', self.step_model.n_hidden)
            logging.info('    Activation:    %s', self.step_model.activation)
            logging.info('    Parameters:    %s', self.step_model.n_parameters)
            logging.info('    Latents:       %s', self.step_model.n_latent)
            logging.info('  Global flow:')
            logging.info('    Parameters:    %s',
                         self.global_model.n_conditionals)
            logging.info('    Observables:   %s', self.global_model.n_inputs)
            try:
                logging.info('    Components:    %s',
                             self.global_model.n_components)
            except AttributeError:
                logging.info('    Components:    1')
            logging.info('    MADEs:         %s', self.global_model.n_mades)
            logging.info('    Hidden layers: %s', self.global_model.n_hiddens)
            logging.info('    Activation:    %s', self.global_model.activation)
            logging.info('    Batch norm:    %s', self.global_model.batch_norm)

        # Have everything on CPU (unless training)
        self.device = torch.device("cpu")
        self.dtype = torch.float
Esempio n. 4
0
class CheckpointedSCANDALInference(CheckpointedInference):
    """ Neural conditional density estimation with masked autoregressive flows. """
    def __init__(self, **params):
        super().__init__()

        filename = params.get('filename', None)

        if filename is None:

            # Parameters
            n_parameters = params['n_parameters']
            n_observables = params['n_observables']
            n_latent = params['n_latent']

            n_components = params.get('n_components', 1)
            n_mades = params.get('n_mades', 2)
            n_made_hidden_layers = params.get('n_made_hidden_layers', 2)
            n_made_units_per_layer = params.get('n_made_units_per_layer', 20)
            activation = params.get('activation', 'relu')
            batch_norm = params.get('batch_norm', False)

            n_step_hidden_layers = params.get('n_step_hidden_layers', 2)
            n_step_units_per_layer = params.get('n_step_units_per_layer', 20)
            step_activation = params.get('step_activation', 'relu')

            logging.info(
                'Initialized checkpointed NDE (MAF) with the following settings:'
            )
            logging.info('  Parameters:        %s', n_parameters)
            logging.info('  Observables:       %s', n_observables)
            logging.info('  Latent vars:       %s', n_latent)
            logging.info('  Checkpoint score estimator:')
            logging.info('    Hidden layers:   %s', n_step_hidden_layers)
            logging.info('    Units:           %s', n_step_units_per_layer)
            logging.info('    Activation:      %s', step_activation)
            logging.info('  Global flow:')
            logging.info('    Base components: %s', n_components)
            logging.info('    MADEs:           %s', n_mades)
            logging.info('    Hidden layers:   %s', n_made_hidden_layers)
            logging.info('    Units:           %s', n_made_units_per_layer)
            logging.info('    Activation:      %s', activation)
            logging.info('    Batch norm:      %s', batch_norm)

            # Step model
            self.step_model = CheckpointScoreEstimator(
                n_parameters=n_parameters,
                n_latent=n_latent,
                n_hidden=tuple([n_step_units_per_layer] *
                               n_step_hidden_layers),
                activation=step_activation)

            # Global model
            if n_components is not None and n_components > 1:
                self.global_model = ConditionalMixtureMaskedAutoregressiveFlow(
                    n_components=n_components,
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)
            else:
                self.global_model = ConditionalMaskedAutoregressiveFlow(
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)

        else:
            self.step_model = torch.load(filename + '_step.pt',
                                         map_location='cpu')
            self.global_model = torch.load(filename + '_global.pt',
                                           map_location='cpu')

            logging.info('Loaded checkpointed SCANDAL from file:')
            logging.info('  Filename:        %s', filename)
            logging.info('  Checkpoint score estimator:')
            logging.info('    Hidden layers: %s', self.step_model.n_hidden)
            logging.info('    Activation:    %s', self.step_model.activation)
            logging.info('    Parameters:    %s', self.step_model.n_parameters)
            logging.info('    Latents:       %s', self.step_model.n_latent)
            logging.info('  Global flow:')
            logging.info('    Parameters:    %s',
                         self.global_model.n_conditionals)
            logging.info('    Observables:   %s', self.global_model.n_inputs)
            try:
                logging.info('    Components:    %s',
                             self.global_model.n_components)
            except AttributeError:
                logging.info('    Components:    1')
            logging.info('    MADEs:         %s', self.global_model.n_mades)
            logging.info('    Hidden layers: %s', self.global_model.n_hiddens)
            logging.info('    Activation:    %s', self.global_model.activation)
            logging.info('    Batch norm:    %s', self.global_model.batch_norm)

        # Have everything on CPU (unless training)
        self.device = torch.device("cpu")
        self.dtype = torch.float

    def requires_class_label(self):
        return False

    def requires_joint_ratio(self):
        return False

    def requires_joint_score(self):
        return True

    def fit(self,
            theta=None,
            x=None,
            y=None,
            r_xz=None,
            t_xz=None,
            theta1=None,
            z_checkpoints=None,
            r_xz_checkpoints=None,
            t_xz_checkpoints=None,
            batch_size=64,
            trainer='adam',
            initial_learning_rate=0.001,
            final_learning_rate=0.0001,
            n_epochs=50,
            validation_split=0.2,
            early_stopping=True,
            alpha=1.,
            beta=1.,
            gamma=1.,
            freeze_flow=False,
            freeze_score_model=False,
            learning_curve_folder=None,
            learning_curve_filename=None,
            **params):
        """ Trains checkpointed flow """

        logging.info('Training checkpointed SCANDAL with settings:')
        logging.info('  alpha:                  %s', alpha)
        logging.info('  beta:                   %s', beta)
        logging.info('  gamma:                  %s', gamma)
        logging.info('  theta given:            %s', theta is not None)
        logging.info('  theta1 given:           %s', theta1 is not None)
        logging.info('  x given:                %s', x is not None)
        logging.info('  y given:                %s', y is not None)
        logging.info('  r_xz given:             %s', r_xz is not None)
        logging.info('  t_xz given:             %s', t_xz is not None)
        logging.info('  z_checkpoints given:    %s', z_checkpoints is not None)
        logging.info('  r_xz_checkpoints given: %s', r_xz_checkpoints
                     is not None)
        logging.info('  t_xz_checkpoints given: %s', t_xz_checkpoints
                     is not None)
        logging.info('  Samples:                %s', x.shape[0])
        logging.info('  Parameters:             %s', theta.shape[1])
        logging.info('  Observables:            %s', x.shape[1])
        logging.info('  Checkpoints:            %s', z_checkpoints.shape[1])
        logging.info('  Latent variables:       %s', z_checkpoints.shape[2])
        logging.info('  Batch size:             %s', batch_size)
        logging.info('  Optimizer:              %s', trainer)
        logging.info('  Freeze score model:     %s', freeze_score_model)
        logging.info('  Freeze flow model:      %s', freeze_flow)
        logging.info('  Learning rate:          %s initially, decaying to %s',
                     initial_learning_rate, final_learning_rate)
        logging.info('  Valid. split:           %s', validation_split)
        logging.info('  Early stopping:         %s', early_stopping)
        logging.info('  Epochs:                 %s', n_epochs)

        train_checkpointed_model(
            model=self.global_model,
            score_model=self.step_model,
            loss_functions=[
                negative_log_likelihood, score_mse, score_checkpoint_mse
            ],
            loss_weights=[gamma, alpha, beta],
            loss_labels=['nll', 'score', 'checkpoint_score'],
            thetas=theta,
            xs=x,
            ys=None,
            r_xz_checkpoints=r_xz_checkpoints,
            t_xz_checkpoints=t_xz_checkpoints,
            z_checkpoints=z_checkpoints,
            batch_size=batch_size,
            trainer=trainer,
            freeze_model=freeze_flow,
            freeze_score_model=freeze_score_model,
            initial_learning_rate=initial_learning_rate,
            final_learning_rate=final_learning_rate,
            n_epochs=n_epochs,
            validation_split=validation_split,
            early_stopping=early_stopping,
            learning_curve_folder=learning_curve_folder,
            learning_curve_filename=learning_curve_filename)

    def save(self, filename):
        # Fix a bug in pyTorch, see https://github.com/pytorch/text/issues/350
        self.global_model.to()
        torch.save(self.global_model, filename + '_global.pt')
        self.global_model.to(self.device, self.dtype)

        self.step_model.to()
        torch.save(self.step_model, filename + '_step.pt')
        self.step_model.to(self.device, self.dtype)

    def predict_checkpoint_scores(self, theta, z_checkpoints):
        # If just one theta given, broadcast to number of samples
        theta = expand_array_2d(theta, z_checkpoints.shape[0])

        self.step_model = self.step_model.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)
        z_checkpoints = tensor(z_checkpoints).to(self.device, self.dtype)

        that_xv_checkpoints = self.step_model.forward_trajectory(
            theta_tensor, z_checkpoints)
        that_xv_checkpoints = that_xv_checkpoints.detach().numpy()

        return that_xv_checkpoints

    def predict_score_conditional_on_checkpoints(self, theta, z_checkpoints):
        that_xv_checkpoints = self.predict_checkpoint_scores(
            theta, z_checkpoints)
        that_xv = np.sum(that_xv_checkpoints, axis=1)

        return that_xv

    def predict_density(self, theta, x, log=False):
        # If just one theta given, broadcast to number of samples
        theta = expand_array_2d(theta, x.shape[0])

        self.global_model = self.global_model.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)
        x_tensor = tensor(x).to(self.device, self.dtype)

        _, log_likelihood = self.global_model.log_likelihood(
            theta_tensor, x_tensor)
        log_likelihood = log_likelihood.detach().numpy()

        if log:
            return log_likelihood
        return np.exp(log_likelihood)

    def predict_ratio(self, theta0, theta1, x, log=False):
        # If just one theta given, broadcast to number of samples
        theta0 = expand_array_2d(theta0, x.shape[0])
        theta1 = expand_array_2d(theta1, x.shape[0])

        self.global_model = self.global_model.to(self.device, self.dtype)
        theta0_tensor = tensor(theta0).to(self.device, self.dtype)
        theta1_tensor = tensor(theta1).to(self.device, self.dtype)
        x_tensor = tensor(x).to(self.device, self.dtype)

        _, log_likelihood_theta0 = self.global_model.log_likelihood(
            theta0_tensor, x_tensor)
        _, log_likelihood_theta1 = self.global_model.log_likelihood(
            theta1_tensor, x_tensor)

        log_likelihood_theta0 = log_likelihood_theta0.detach().numpy()
        log_likelihood_theta1 = log_likelihood_theta1.detach().numpy()

        if log:
            return log_likelihood_theta0 - log_likelihood_theta1
        return np.exp(log_likelihood_theta0 - log_likelihood_theta1)

    def predict_score(self, theta, x):
        # If just one theta given, broadcast to number of samples
        theta = expand_array_2d(theta, x.shape[0])

        self.global_model = self.global_model.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)
        x_tensor = tensor(x).to(self.device, self.dtype)

        _, _, score = self.global_model.log_likelihood_and_score(
            theta_tensor, x_tensor)

        score = score.detach().numpy()

        return score

    def generate_samples(self, theta):
        self.global_model = self.global_model.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)

        samples = self.global_model.generate_samples(
            theta_tensor).detach().numpy()
        return samples
Esempio n. 5
0
    def __init__(self, **params):
        super().__init__()

        filename = params.get('filename', None)

        if filename is None:
            # Parameters for new MAF
            n_parameters = params['n_parameters']
            n_observables = params['n_observables']
            n_components = params.get('n_components', 1)
            n_mades = params.get('n_mades', 2)
            n_made_hidden_layers = params.get('n_made_hidden_layers', 2)
            n_made_units_per_layer = params.get('n_made_units_per_layer', 20)
            activation = params.get('activation', 'relu')
            batch_norm = params.get('batch_norm', False)

            logging.info('Initialized NDE (MAF) with the following settings:')
            logging.info('  Parameters:      %s', n_parameters)
            logging.info('  Observables:     %s', n_observables)
            logging.info('  Base components: %s', n_components)
            logging.info('  MADEs:           %s', n_mades)
            logging.info('  Hidden layers:   %s', n_made_hidden_layers)
            logging.info('  Units:           %s', n_made_units_per_layer)
            logging.info('  Activation:      %s', activation)
            logging.info('  Batch norm:      %s', batch_norm)

            # MAF
            if n_components is not None and n_components > 1:
                self.maf = ConditionalMixtureMaskedAutoregressiveFlow(
                    n_components=n_components,
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)
            else:
                self.maf = ConditionalMaskedAutoregressiveFlow(
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)

        else:
            self.maf = torch.load(filename + '.pt', map_location='cpu')

            logging.info('Loaded NDE (MAF) from file:')
            logging.info('  Filename:      %s', filename)
            logging.info('  Parameters:    %s', self.maf.n_conditionals)
            logging.info('  Observables:   %s', self.maf.n_inputs)
            try:
                logging.info('  Components:    %s', self.maf.n_components)
            except AttributeError:
                logging.info('  Components:    1')
            logging.info('  MADEs:         %s', self.maf.n_mades)
            logging.info('  Hidden layers: %s', self.maf.n_hiddens)
            logging.info('  Activation:    %s', self.maf.activation)
            logging.info('  Batch norm:    %s', self.maf.batch_norm)

        # Have everything on CPU (unless training)
        self.device = torch.device("cpu")
        self.dtype = torch.float
Esempio n. 6
0
class MAFInference(Inference):
    """ Neural conditional density estimation with masked autoregressive flows. """
    def __init__(self, **params):
        super().__init__()

        filename = params.get('filename', None)

        if filename is None:
            # Parameters for new MAF
            n_parameters = params['n_parameters']
            n_observables = params['n_observables']
            n_components = params.get('n_components', 1)
            n_mades = params.get('n_mades', 2)
            n_made_hidden_layers = params.get('n_made_hidden_layers', 2)
            n_made_units_per_layer = params.get('n_made_units_per_layer', 20)
            activation = params.get('activation', 'relu')
            batch_norm = params.get('batch_norm', False)

            logging.info('Initialized NDE (MAF) with the following settings:')
            logging.info('  Parameters:      %s', n_parameters)
            logging.info('  Observables:     %s', n_observables)
            logging.info('  Base components: %s', n_components)
            logging.info('  MADEs:           %s', n_mades)
            logging.info('  Hidden layers:   %s', n_made_hidden_layers)
            logging.info('  Units:           %s', n_made_units_per_layer)
            logging.info('  Activation:      %s', activation)
            logging.info('  Batch norm:      %s', batch_norm)

            # MAF
            if n_components is not None and n_components > 1:
                self.maf = ConditionalMixtureMaskedAutoregressiveFlow(
                    n_components=n_components,
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)
            else:
                self.maf = ConditionalMaskedAutoregressiveFlow(
                    n_conditionals=n_parameters,
                    n_inputs=n_observables,
                    n_hiddens=tuple([n_made_units_per_layer] *
                                    n_made_hidden_layers),
                    n_mades=n_mades,
                    activation=activation,
                    batch_norm=batch_norm,
                    input_order='random',
                    mode='sequential',
                    alpha=0.1)

        else:
            self.maf = torch.load(filename + '.pt', map_location='cpu')

            logging.info('Loaded NDE (MAF) from file:')
            logging.info('  Filename:      %s', filename)
            logging.info('  Parameters:    %s', self.maf.n_conditionals)
            logging.info('  Observables:   %s', self.maf.n_inputs)
            try:
                logging.info('  Components:    %s', self.maf.n_components)
            except AttributeError:
                logging.info('  Components:    1')
            logging.info('  MADEs:         %s', self.maf.n_mades)
            logging.info('  Hidden layers: %s', self.maf.n_hiddens)
            logging.info('  Activation:    %s', self.maf.activation)
            logging.info('  Batch norm:    %s', self.maf.batch_norm)

        # Have everything on CPU (unless training)
        self.device = torch.device("cpu")
        self.dtype = torch.float

    def requires_class_label(self):
        return False

    def requires_joint_ratio(self):
        return False

    def requires_joint_score(self):
        return False

    def fit(self,
            theta=None,
            x=None,
            y=None,
            r_xz=None,
            t_xz=None,
            theta1=None,
            batch_size=64,
            trainer='adam',
            initial_learning_rate=0.001,
            final_learning_rate=0.0001,
            n_epochs=50,
            validation_split=0.2,
            early_stopping=True,
            alpha=None,
            learning_curve_folder=None,
            learning_curve_filename=None,
            **params):
        """ Trains MAF """

        logging.info('Training NDE (MAF) with settings:')
        logging.info('  theta given:    %s', theta is not None)
        logging.info('  theta1 given:   %s', theta1 is not None)
        logging.info('  x given:        %s', x is not None)
        logging.info('  y given:        %s', y is not None)
        logging.info('  r_xz given:     %s', r_xz is not None)
        logging.info('  t_xz given:     %s', t_xz is not None)
        logging.info('  Samples:        %s', x.shape[0])
        logging.info('  Parameters:     %s', theta.shape[1])
        logging.info('  Obserables:     %s', x.shape[1])
        logging.info('  Batch size:     %s', batch_size)
        logging.info('  Optimizer:      %s', trainer)
        logging.info('  Learning rate:  %s initially, decaying to %s',
                     initial_learning_rate, final_learning_rate)
        logging.info('  Valid. split:   %s', validation_split)
        logging.info('  Early stopping: %s', early_stopping)
        logging.info('  Epochs:         %s', n_epochs)

        train_model(model=self.maf,
                    loss_functions=[negative_log_likelihood],
                    thetas=theta,
                    xs=x,
                    ys=None,
                    batch_size=batch_size,
                    trainer=trainer,
                    initial_learning_rate=initial_learning_rate,
                    final_learning_rate=final_learning_rate,
                    n_epochs=n_epochs,
                    validation_split=validation_split,
                    early_stopping=early_stopping,
                    learning_curve_folder=learning_curve_folder,
                    learning_curve_filename=learning_curve_filename)

    def save(self, filename):
        # Fix a bug in pyTorch, see https://github.com/pytorch/text/issues/350
        self.maf.to()
        torch.save(self.maf, filename + '.pt')
        self.maf.to(self.device, self.dtype)

    def predict_density(self, theta, x, log=False):
        # If just one theta given, broadcast to number of samples
        theta = expand_array_2d(theta, x.shape[0])

        self.maf = self.maf.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)
        x_tensor = tensor(x).to(self.device, self.dtype)

        _, log_likelihood = self.maf.log_likelihood(theta_tensor, x_tensor)
        log_likelihood = log_likelihood.detach().numpy()

        if log:
            return log_likelihood
        return np.exp(log_likelihood)

    def predict_ratio(self, theta0, theta1, x, log=False):
        # If just one theta given, broadcast to number of samples
        theta0 = expand_array_2d(theta0, x.shape[0])
        theta1 = expand_array_2d(theta1, x.shape[0])

        self.maf = self.maf.to(self.device, self.dtype)
        theta0_tensor = tensor(theta0).to(self.device, self.dtype)
        theta1_tensor = tensor(theta1).to(self.device, self.dtype)
        x_tensor = tensor(x).to(self.device, self.dtype)

        _, log_likelihood_theta0 = self.maf.log_likelihood(
            theta0_tensor, x_tensor)
        _, log_likelihood_theta1 = self.maf.log_likelihood(
            theta1_tensor, x_tensor)

        log_likelihood_theta0 = log_likelihood_theta0.detach().numpy()
        log_likelihood_theta1 = log_likelihood_theta1.detach().numpy()

        if log:
            return log_likelihood_theta0 - log_likelihood_theta1
        return np.exp(log_likelihood_theta0 - log_likelihood_theta1)

    def predict_score(self, theta, x):
        # If just one theta given, broadcast to number of samples
        theta = expand_array_2d(theta, x.shape[0])

        self.maf = self.maf.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)
        x_tensor = tensor(x).to(self.device, self.dtype)

        _, _, score = self.maf.log_likelihood_and_score(theta_tensor, x_tensor)

        score = score.detach().numpy()

        return score

    def generate_samples(self, theta):
        self.maf = self.maf.to(self.device, self.dtype)
        theta_tensor = tensor(theta).to(self.device, self.dtype)

        samples = self.maf.generate_samples(theta_tensor).detach().numpy()
        return samples