Exemplo n.º 1
0
def test_getters(laue_inputs, mono_inputs):
    for inputs in laue_inputs, mono_inputs:
        if BaseModel.is_laue(inputs):
            BaseModel.get_harmonic_id(inputs)
            BaseModel.get_wavelength(inputs)
        BaseModel.get_image_id(inputs)
        BaseModel.get_intensities(inputs)
        BaseModel.get_metadata(inputs)
        BaseModel.get_refl_id(inputs)
        BaseModel.get_uncertainties(inputs)
Exemplo n.º 2
0
    def split_data_by_image(self, test_fraction=0.5):
        """
        Method for splitting data given a boolean mask. 
        This method will designate full images as belonging to the 
        train or test sets. 

        Parameters
        ----------
        test_fraction : float (optional)
            The fraction of images which will be reserved for testing.

        Returns
        -------
        train : tuple
        test  : tuple
        """
        image_id = BaseModel.get_image_id(self.inputs)
        test_idx = np.random.random(image_id.max() + 1) <= test_fraction

        # Low image count edge case (mostly just for testing purposes)
        if True not in test_idx:
            test_idx[0] = True
        elif False not in test_idx:
            test_idx[0] = False

        test_idx = test_idx[image_id]
        if BaseModel.is_laue(self.inputs):
            train, test = self.split_laue_data_by_mask(test_idx)
        else:
            train, test = self.split_mono_data_by_mask(test_idx)

        #return self.get_tf_dataset(train), self.get_tf_dataset(test)
        return train, test
Exemplo n.º 3
0
def test_laue(likelihood_model, prior_model, scaling_model, laue_inputs,
              mc_samples):
    nrefls = np.max(BaseModel.get_refl_id(laue_inputs)) + 1
    n_images = np.max(BaseModel.get_image_id(laue_inputs)) + 1

    #For the students
    dof = 4.
    if likelihood_model == StudentTLikelihood:
        likelihood = likelihood_model(dof)
    else:
        likelihood = likelihood_model()

    if prior_model == WilsonPrior:
        prior = prior_model(
            np.random.choice([True, False], nrefls),
            np.ones(nrefls).astype('float32'),
        )
    elif prior_model == StudentTReferencePrior:
        prior = prior_model(
            np.ones(nrefls).astype('float32'),
            np.ones(nrefls).astype('float32'), dof)
    else:
        prior = prior_model(
            np.ones(nrefls).astype('float32'),
            np.ones(nrefls).astype('float32'),
        )

    mlp_scaler = MLPScaler(2, 3)
    if scaling_model == HybridImageScaler:
        image_scaler = ImageScaler(n_images)
        scaler = HybridImageScaler(mlp_scaler, image_scaler)
    elif scaling_model == MLPScaler:
        scaler = mlp_scaler

    surrogate_posterior = tfd.TruncatedNormal(
        tf.Variable(prior.mean()),
        tfp.util.TransformedVariable(
            prior.stddev() / 10.,
            tfb.Softplus(),
        ),
        low=1e-5,
        high=1e10,
    )

    merger = VariationalMergingModel(surrogate_posterior, prior, likelihood,
                                     scaler, mc_samples)
    ipred = merger(laue_inputs)

    isfinite = np.all(np.isfinite(ipred.numpy()))
    assert isfinite

    merger = VariationalMergingModel(surrogate_posterior, prior, likelihood,
                                     scaler)
    merger.compile('Adam')
Exemplo n.º 4
0
    def build_model(self,
                    parser=None,
                    surrogate_posterior=None,
                    prior=None,
                    likelihood=None,
                    scaling_model=None,
                    mc_sample_size=None):
        """
        Build the model specified in parser, a careless.parser.parser.parse_args() result. Optionally override any of the 
        parameters taken by the VariationalMergingModel constructor.
        The `parser` parameter is required if self.parser is not set. 
        """
        from careless.models.merging.surrogate_posteriors import TruncatedNormal
        from careless.models.merging.variational import VariationalMergingModel
        from careless.models.scaling.image import HybridImageScaler, ImageScaler
        from careless.models.scaling.nn import MLPScaler
        if parser is None:
            parser = self.parser
        if parser is None:
            raise ValueError("No parser supplied, but self.parser is unset")

        if parser.type == 'poly':
            from careless.models.likelihoods.laue import NormalLikelihood, StudentTLikelihood
        elif parser.type == 'mono':
            from careless.models.likelihoods.mono import NormalLikelihood, StudentTLikelihood

        if prior is None:
            prior = self.get_wilson_prior(parser.wilson_prior_b)
        loc, scale = prior.mean(), prior.stddev() / 10.
        low = (1e-32 * self.asu_collection.centric).astype('float32')
        if surrogate_posterior is None:
            surrogate_posterior = TruncatedNormal.from_loc_and_scale(
                loc, scale, low)

        if likelihood is None:
            dof = parser.studentt_likelihood_dof
            if dof is None:
                likelihood = NormalLikelihood()
            else:
                likelihood = StudentTLikelihood(dof)

        if scaling_model is None:
            mlp_width = parser.mlp_width
            if mlp_width is None:
                mlp_width = BaseModel.get_metadata(self.inputs).shape[-1]

            mlp_scaler = MLPScaler(parser.mlp_layers, mlp_width)
            if parser.use_image_scales:
                n_images = np.max(BaseModel.get_image_id(self.inputs)) + 1
                image_scaler = ImageScaler(n_images)
                scaling_model = HybridImageScaler(mlp_scaler, image_scaler)
            else:
                scaling_model = mlp_scaler

        model = VariationalMergingModel(surrogate_posterior, prior, likelihood,
                                        scaling_model, parser.mc_samples)

        opt = tf.keras.optimizers.Adam(
            parser.learning_rate,
            parser.beta_1,
            parser.beta_2,
        )

        model.compile(opt)
        return model