Exemplo n.º 1
0
def test_getters(laue_inputs, mono_inputs):
    for inputs in laue_inputs, mono_inputs:
        if BaseModel.is_laue(inputs):
            BaseModel.get_harmonic_id(inputs)
            BaseModel.get_wavelength(inputs)
        BaseModel.get_image_id(inputs)
        BaseModel.get_intensities(inputs)
        BaseModel.get_metadata(inputs)
        BaseModel.get_refl_id(inputs)
        BaseModel.get_uncertainties(inputs)
Exemplo n.º 2
0
def test_laue_StudentTLikelihood(dof, laue_inputs):
    likelihood = StudentTLikelihood(dof)(laue_inputs)
    iobs = BaseModel.get_intensities(laue_inputs)
    sigiobs = BaseModel.get_uncertainties(laue_inputs)
    ipred = fake_ipred(laue_inputs)

    l_true = tfd.StudentT(dof, iobs, sigiobs)

    iconv = likelihood.convolve(ipred)

    test = likelihood.log_prob(ipred).numpy()
    expected = l_true.log_prob(iobs).numpy()

    nobs = BaseModel.get_harmonic_id(laue_inputs).max() + 1

    test = likelihood.log_prob(ipred).numpy()
    expected = l_true.log_prob(iobs).numpy().T

    #The zero padded entries at the end of the input will disagree
    #with the expected values. This is fine, because they will not
    #contribute to the gradient
    test = test[:, :nobs]
    expected = expected[:, :nobs]

    assert np.array_equal(expected.shape, test.shape)
    assert np.allclose(expected, test)

    #Test batches larger than 1
    ipred = np.concatenate((ipred, ipred, ipred), axis=0)
    likelihood.convolve(ipred).numpy()
    test = likelihood.log_prob(ipred).numpy()
    test = test[:, :nobs]
    assert np.array_equiv(expected, test)
Exemplo n.º 3
0
def test_mono_LaplaceLikelihood(mono_inputs):
    likelihood = LaplaceLikelihood()(mono_inputs)
    iobs = BaseModel.get_intensities(mono_inputs)
    sigiobs = BaseModel.get_uncertainties(mono_inputs)

    l_true = tfd.Laplace(
        tf.squeeze(iobs), 
        tf.squeeze(sigiobs)/np.sqrt(2.),
    )
    z = l_true.sample()

    assert np.allclose(likelihood.log_prob(z), l_true.log_prob(z))
Exemplo n.º 4
0
def test_mono_StudentTLikelihood(dof, mono_inputs):
    likelihood = StudentTLikelihood(dof)(mono_inputs)
    iobs = BaseModel.get_intensities(mono_inputs)
    sigiobs = BaseModel.get_uncertainties(mono_inputs)

    l_true = tfd.StudentT(
        dof, 
        tf.squeeze(iobs), 
        tf.squeeze(sigiobs),
    )
    z = l_true.sample()

    assert np.allclose(likelihood.log_prob(z), l_true.log_prob(z))
Exemplo n.º 5
0
    def get_predictions(self, model, inputs=None):
        """ 
        Extract results from a surrogate_posterior.

        Parameters
        ----------
        model : VariationalMergingModel
            A merging model from careless
        inputs : tuple (optional)
            Inputs for which to make the predictions if None, self.inputs is used.

        Returns
        -------
        predictions : tuple
            A tuple of rs.DataSet objects containing the predictions for each 
            ReciprocalASU contained in self.asu_collection
        """
        if inputs is None:
            inputs = self.inputs

        refl_id = BaseModel.get_refl_id(inputs)
        iobs = BaseModel.get_intensities(inputs).flatten()
        sig_iobs = BaseModel.get_uncertainties(inputs).flatten()
        asu_id, H = self.asu_collection.to_asu_id_and_miller_index(refl_id)
        #ipred = model(inputs)
        ipred, sigipred = model.prediction_mean_stddev(inputs)

        h, k, l = H.T
        results = ()
        for i, asu in enumerate(self.asu_collection):
            idx = asu_id == i
            idx = idx.flatten()
            output = rs.DataSet(
                {
                    'H': h[idx],
                    'K': k[idx],
                    'L': l[idx],
                    'Iobs': iobs[idx],
                    'SigIobs': sig_iobs[idx],
                    'Ipred': ipred[idx],
                    'SigIpred': sigipred[idx],
                },
                cell=asu.cell,
                spacegroup=asu.spacegroup,
                merged=False,
            ).infer_mtz_dtypes().set_index(['H', 'K', 'L'])
            results += (output, )
        return results
Exemplo n.º 6
0
    def get_tf_dataset(self, inputs=None):
        """
        Pack a dataset in the way that keras and careless expect.

        Parameters
        ----------
        inputs : tuple (optional)
            If None, self.inputs will be used
        """
        if inputs is None:
            inputs = self.inputs

        inputs = tuple(inputs)
        iobs = BaseModel.get_intensities(inputs)
        sigiobs = BaseModel.get_uncertainties(inputs)
        packed = (inputs, iobs, sigiobs)
        tfds = tf.data.Dataset.from_tensor_slices(packed)
        return tfds.batch(len(iobs))
Exemplo n.º 7
0
def test_laue_NormalLikelihood(laue_inputs):
    likelihood = NormalLikelihood()(laue_inputs)
    iobs = BaseModel.get_intensities(laue_inputs)
    sigiobs = BaseModel.get_uncertainties(laue_inputs)
    ipred = fake_ipred(laue_inputs)

    l_true = tfd.Normal(iobs, sigiobs)

    iconv = likelihood.convolve(ipred)

    test = likelihood.log_prob(ipred).numpy()
    expected = l_true.log_prob(iobs).numpy()
    assert np.array_equal(expected.shape, test.T.shape)
    assert np.allclose(expected, test.T)

    #Test batches larger than 1
    ipred = np.concatenate((ipred, ipred, ipred), axis=0)
    likelihood.convolve(ipred).numpy()
    test = likelihood.log_prob(ipred).numpy()
    assert np.array_equiv(expected, test.T)
Exemplo n.º 8
0
def fake_ipred(inputs):
    harmonic_id = BaseModel.get_harmonic_id(inputs).flatten()
    intensities = BaseModel.get_intensities(inputs).flatten()
    result = intensities[harmonic_id] / np.bincount(harmonic_id)[harmonic_id]
    return result[None, :].astype('float32')