Ejemplo n.º 1
0
def test_getters(laue_inputs, mono_inputs):
    for inputs in laue_inputs, mono_inputs:
        if BaseModel.is_laue(inputs):
            BaseModel.get_harmonic_id(inputs)
            BaseModel.get_wavelength(inputs)
        BaseModel.get_image_id(inputs)
        BaseModel.get_intensities(inputs)
        BaseModel.get_metadata(inputs)
        BaseModel.get_refl_id(inputs)
        BaseModel.get_uncertainties(inputs)
Ejemplo n.º 2
0
def test_mono_formatter(
    intensity_key,
    sigma_key,
    image_id_key,
    separate_outputs,
    anomalous,
    dmin,
    isigi_cutoff,
    positional_encoding_keys,
    encoding_bit_depth,
    mono_data_set,
):
    ds = mono_data_set.copy()
    f = MonoFormatter(
        intensity_key,
        sigma_key,
        image_id_key,
        metadata_keys,
        separate_outputs,
        anomalous,
        dmin,
        isigi_cutoff,
        positional_encoding_keys,
        encoding_bit_depth,
    )
    inputs, rac = f([ds])
    length = None
    for v in inputs:
        assert v.ndim == 2
        assert v.dtype in ('float32', 'int64')
        if length is None:
            length = v.shape[0]
        assert v.shape[0] == length

    metadata = BaseModel.get_metadata(inputs)
Ejemplo n.º 3
0
    def build_model(self,
                    parser=None,
                    surrogate_posterior=None,
                    prior=None,
                    likelihood=None,
                    scaling_model=None,
                    mc_sample_size=None):
        """
        Build the model specified in parser, a careless.parser.parser.parse_args() result. Optionally override any of the 
        parameters taken by the VariationalMergingModel constructor.
        The `parser` parameter is required if self.parser is not set. 
        """
        from careless.models.merging.surrogate_posteriors import TruncatedNormal
        from careless.models.merging.variational import VariationalMergingModel
        from careless.models.scaling.image import HybridImageScaler, ImageScaler
        from careless.models.scaling.nn import MLPScaler
        if parser is None:
            parser = self.parser
        if parser is None:
            raise ValueError("No parser supplied, but self.parser is unset")

        if parser.type == 'poly':
            from careless.models.likelihoods.laue import NormalLikelihood, StudentTLikelihood
        elif parser.type == 'mono':
            from careless.models.likelihoods.mono import NormalLikelihood, StudentTLikelihood

        if prior is None:
            prior = self.get_wilson_prior(parser.wilson_prior_b)
        loc, scale = prior.mean(), prior.stddev() / 10.
        low = (1e-32 * self.asu_collection.centric).astype('float32')
        if surrogate_posterior is None:
            surrogate_posterior = TruncatedNormal.from_loc_and_scale(
                loc, scale, low)

        if likelihood is None:
            dof = parser.studentt_likelihood_dof
            if dof is None:
                likelihood = NormalLikelihood()
            else:
                likelihood = StudentTLikelihood(dof)

        if scaling_model is None:
            mlp_width = parser.mlp_width
            if mlp_width is None:
                mlp_width = BaseModel.get_metadata(self.inputs).shape[-1]

            mlp_scaler = MLPScaler(parser.mlp_layers, mlp_width)
            if parser.use_image_scales:
                n_images = np.max(BaseModel.get_image_id(self.inputs)) + 1
                image_scaler = ImageScaler(n_images)
                scaling_model = HybridImageScaler(mlp_scaler, image_scaler)
            else:
                scaling_model = mlp_scaler

        model = VariationalMergingModel(surrogate_posterior, prior, likelihood,
                                        scaling_model, parser.mc_samples)

        opt = tf.keras.optimizers.Adam(
            parser.learning_rate,
            parser.beta_1,
            parser.beta_2,
        )

        model.compile(opt)
        return model