Esempio n. 1
0
    def test__unconverged_sample_size__uses_value_unless_fewer_samples(self):
        model = af.ModelMapper(mock_class_1=MockClassx4)

        log_likelihood_list = 4 * [0.0] + [1.0]
        weight_list = 4 * [0.0] + [1.0]

        samples = MockSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=5 * [[]],
                log_likelihood_list=log_likelihood_list,
                log_prior_list=[1.0, 1.0, 1.0, 1.0, 1.0],
                weight_list=weight_list,
            ),
            unconverged_sample_size=2,
        )

        assert samples.pdf_converged is False
        assert samples.unconverged_sample_size == 2

        samples = MockSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=5 * [[]],
                log_likelihood_list=log_likelihood_list,
                log_prior_list=[1.0, 1.0, 1.0, 1.0, 1.0],
                weight_list=weight_list,
            ),
            unconverged_sample_size=6,
        )

        assert samples.pdf_converged is False
        assert samples.unconverged_sample_size == 5
def test__search_summary_to_file(model):
    file_search_summary = path.join(text_path, "search.summary")

    parameters = [[1.0, 2.0], [1.2, 2.2]]

    log_likelihood_list = [1.0, 0.0]

    samples = MockSamples(
        model=model,
        samples=Sample.from_lists(
            parameter_lists=parameters,
            log_likelihood_list=log_likelihood_list,
            log_prior_list=[0.0, 0.0],
            weight_list=log_likelihood_list,
            model=model
        ),
        time=None,
    )

    text_util.search_summary_to_file(samples=samples, log_likelihood_function_time=1.0, filename=file_search_summary)

    results = open(file_search_summary)
    lines = results.readlines()
    assert lines[0] == "Total Samples = 2\n"
    results.close()

    samples = MockNestSamples(
        model=model,
        samples=Sample.from_lists(
            parameter_lists=parameters,
            log_likelihood_list=log_likelihood_list + [2.0],
            log_prior_list=[1.0, 1.0],
            weight_list=log_likelihood_list,
            model=model
        ),
        total_samples=10,
        time=2,
        number_live_points=1,
        log_evidence=1.0,
    )

    text_util.search_summary_to_file(samples=samples, log_likelihood_function_time=1.0, filename=file_search_summary)

    results = open(file_search_summary)
    lines = results.readlines()
    assert lines[0] == "Total Samples = 10\n"
    assert lines[1] == "Total Accepted Samples = 2\n"
    assert lines[2] == "Acceptance Ratio = 0.2\n"
    assert lines[3] == "Time To Run = 2\n"
    assert lines[4] == "Log Likelihood Function Evaluation Time (seconds) = 1.0"
    results.close()
Esempio n. 3
0
    def samples(self):
        """
        Create a `Samples` object from this non-linear search's output files on the hard-disk and model.

        For Emcee, all quantities are extracted via the hdf5 backend of results.

        Parameters
        ----------
        model
            The model which generates instances for different points in parameter space. This maps the points from unit
            cube values to physical values via the priors.
        paths : af.Paths
            Manages all paths, e.g. where the search outputs are stored, the `NonLinearSearch` chains,
            etc.
        """

        if self._samples is not None:
            return self._samples

        parameters = self.results["weighted_samples"]["points"]
        log_likelihood_list = self.results["weighted_samples"]["logl"]
        log_prior_list = [
            sum(self.model.log_prior_list_from_vector(vector=vector))
            for vector in parameters
        ]
        weight_list = self.results["weighted_samples"]["weights"]

        self._samples = Sample.from_lists(
            model=self.model,
            parameter_lists=parameters,
            log_likelihood_list=log_likelihood_list,
            log_prior_list=log_prior_list,
            weight_list=weight_list)

        return self._samples
Esempio n. 4
0
    def __init__(
            self,
            model,
            sample_list=None,
            total_samples=10,
            log_evidence=0.0,
            number_live_points=5,
            time: Optional[float] = None,
    ):

        self.model = model

        if sample_list is None:

            sample_list = [
                Sample(
                    log_likelihood=log_likelihood,
                    log_prior=0.0,
                    weight=0.0
                )
                for log_likelihood
                in self.log_likelihood_list
            ]

        super().__init__(
            model=model,
            sample_list=sample_list,
            time=time
        )

        self._total_samples = total_samples
        self._log_evidence = log_evidence
        self._number_live_points = number_live_points
Esempio n. 5
0
    def test__unconverged__median_pdf_vector(self):
        parameters = [
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.1, 2.1],
            [0.9, 1.9],
        ]

        log_likelihood_list = 9 * [0.0] + [1.0]
        weight_list = 9 * [0.0] + [1.0]

        model = af.ModelMapper(mock_class=MockClassx2)
        samples = MockSamples(model=model,
                              samples=Sample.from_lists(
                                  model=model,
                                  parameter_lists=parameters,
                                  log_likelihood_list=log_likelihood_list,
                                  log_prior_list=10 * [0.0],
                                  weight_list=weight_list,
                              ))

        assert samples.pdf_converged is False

        median_pdf_vector = samples.median_pdf_vector

        assert median_pdf_vector[0] == pytest.approx(0.9, 1.0e-4)
        assert median_pdf_vector[1] == pytest.approx(1.9, 1.0e-4)
Esempio n. 6
0
    def test__offset_vector_from_input_vector(self):
        model = af.ModelMapper(mock_class_1=MockClassx4)

        parameters = [
            [1.1, 2.1, 3.1, 4.1],
            [1.0, 2.0, 3.0, 4.0],
            [1.0, 2.0, 3.0, 4.0],
            [1.0, 2.0, 3.0, 4.0],
            [1.0, 2.0, 3.0, 4.1],
        ]

        weight_list = [0.3, 0.2, 0.2, 0.2, 0.1]

        log_likelihood_list = list(
            map(lambda weight: 10.0 * weight, weight_list))

        samples = MockSamples(model=model,
                              samples=Sample.from_lists(
                                  model=model,
                                  parameter_lists=parameters,
                                  log_likelihood_list=log_likelihood_list,
                                  log_prior_list=10 * [0.0],
                                  weight_list=weight_list,
                              ))

        offset_values = samples.offset_vector_from_input_vector(
            input_vector=[1.0, 1.0, 2.0, 3.0])

        assert offset_values == pytest.approx([0.0, 1.0, 1.0, 1.025], 1.0e-4)
Esempio n. 7
0
    def test__log_prior_list_and_max_log_posterior_vector_and_instance(self):
        model = af.ModelMapper(mock_class_1=MockClassx4)

        parameters = [
            [0.0, 1.0, 2.0, 3.0],
            [0.0, 1.0, 2.0, 3.0],
            [0.0, 1.0, 2.0, 3.0],
            [0.0, 1.0, 2.0, 3.0],
            [21.0, 22.0, 23.0, 24.0],
        ]

        samples = MockSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=parameters,
                log_likelihood_list=[1.0, 2.0, 3.0, 0.0, 5.0],
                log_prior_list=[1.0, 2.0, 3.0, 10.0, 6.0],
                weight_list=[1.0, 1.0, 1.0, 1.0, 1.0],
            ))

        assert samples.log_posterior_list == [2.0, 4.0, 6.0, 10.0, 11.0]

        assert samples.max_log_posterior_vector == [21.0, 22.0, 23.0, 24.0]

        instance = samples.max_log_posterior_instance

        assert instance.mock_class_1.one == 21.0
        assert instance.mock_class_1.two == 22.0
        assert instance.mock_class_1.three == 23.0
        assert instance.mock_class_1.four == 24.0
Esempio n. 8
0
    def from_results_internal(
        cls,
        results_internal: np.ndarray,
        log_posterior_list: np.ndarray,
        model: AbstractPriorModel,
        total_iterations: int,
        time: Optional[float] = None,
    ):
        """
        The `Samples` classes in **PyAutoFit** provide an interface between the results of a `NonLinearSearch` (e.g.
        as files on your hard-disk) and Python.

        To create a `Samples` object after an `pyswarms` model-fit the results must be converted from the
        native format used by `pyswarms` (which are numpy ndarrays) to lists of values, the format used by
        the **PyAutoFit** `Samples` objects.

        This classmethod performs this conversion before creating a `PySwarmsSamples` object.

        Parameters
        ----------
        results_internal
            The Pyswarms results in their native internal format from which the samples are computed.
        log_posterior_list
            The log posterior of the PySwarms accepted samples.
        model
            Maps input vectors of unit parameter values to physical values and model instances via priors.
        total_iterations
            The total number of PySwarms iterations, which cannot be estimated from the sample list (which contains
            only accepted samples).
        time
            The time taken to perform the model-fit, which is passed around `Samples` objects for outputting
            information on the overall fit.
        """
        parameter_lists = [
            param.tolist() for parameters in results_internal
            for param in parameters
        ]
        log_prior_list = model.log_prior_list_from(
            parameter_lists=parameter_lists)
        log_likelihood_list = [
            lp - prior for lp, prior in zip(log_posterior_list, log_prior_list)
        ]
        weight_list = len(log_likelihood_list) * [1.0]

        sample_list = Sample.from_lists(
            model=model,
            parameter_lists=[
                parameters.tolist()[0] for parameters in results_internal
            ],
            log_likelihood_list=log_likelihood_list,
            log_prior_list=log_prior_list,
            weight_list=weight_list)

        return PySwarmsSamples(
            model=model,
            sample_list=sample_list,
            total_iterations=total_iterations,
            time=time,
            results_internal=results_internal,
        )
Esempio n. 9
0
    def samples_from(self, model):

        parameter_lists = self.paths.load_object("parameter_lists")

        log_prior_list = [
            sum(model.log_prior_list_from_vector(vector=vector))
            for vector in parameter_lists
        ]
        log_likelihood_list = [
            lp - prior for lp, prior in zip(
                self.paths.load_object("log_posterior_list"), log_prior_list)
        ]

        weight_list = len(log_likelihood_list) * [1.0]

        sample_list = Sample.from_lists(
            model=model,
            parameter_lists=parameter_lists,
            log_likelihood_list=log_likelihood_list,
            log_prior_list=log_prior_list,
            weight_list=weight_list)

        return Samples(model=model,
                       sample_list=sample_list,
                       time=self.timer.time)
Esempio n. 10
0
    def samples(self):
        """
        Create a `Samples` object from this non-linear search's output files on the hard-disk and model.

        Parameters
        ----------
        model
            The model which generates instances for different points in parameter space. This maps the points from unit
            cube values to physical values via the priors.
        paths : af.Paths
            Manages all paths, e.g. where the search outputs are stored, the `NonLinearSearch` chains,
            etc.
        """

        if self._samples is not None:
            return self._samples

        parameter_lists = self.zeus_sampler.get_chain(flat=True).tolist()
        log_prior_list = [
            sum(self.model.log_prior_list_from_vector(vector=vector)) for vector in parameter_lists
        ]
        log_posterior_list = self.zeus_sampler.get_log_prob(flat=True).tolist()
        log_likelihood_list = [log_posterior - log_prior for log_posterior, log_prior in zip(log_posterior_list, log_prior_list)]

        weight_list = len(log_likelihood_list) * [1.0]

        self._samples = Sample.from_lists(
            model=self.model,
            parameter_lists=parameter_lists,
            log_likelihood_list=log_likelihood_list,
            log_prior_list=log_prior_list,
            weight_list=weight_list
        )

        return self._samples
Esempio n. 11
0
    def test__instance_from_sample_index(self):
        model = af.ModelMapper(mock_class=MockClassx4)

        parameters = [
            [1.0, 2.0, 3.0, 4.0],
            [5.0, 6.0, 7.0, 8.0],
            [1.0, 2.0, 3.0, 4.0],
            [1.0, 2.0, 3.0, 4.0],
            [1.1, 2.1, 3.1, 4.1],
        ]

        samples = MockSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=parameters,
                log_likelihood_list=[0.0, 0.0, 0.0, 0.0, 0.0],
                log_prior_list=[0.0, 0.0, 0.0, 0.0, 0.0],
                weight_list=[1.0, 1.0, 1.0, 1.0, 1.0],
            ))

        instance = samples.instance_from_sample_index(sample_index=0)

        assert instance.mock_class.one == 1.0
        assert instance.mock_class.two == 2.0
        assert instance.mock_class.three == 3.0
        assert instance.mock_class.four == 4.0

        instance = samples.instance_from_sample_index(sample_index=1)

        assert instance.mock_class.one == 5.0
        assert instance.mock_class.two == 6.0
        assert instance.mock_class.three == 7.0
        assert instance.mock_class.four == 8.0
Esempio n. 12
0
    def test__gaussian_priors(self):
        parameters = [
            [1.0, 2.0, 3.0, 4.0],
            [1.0, 2.0, 3.0, 4.1],
            [1.0, 2.0, 3.0, 4.1],
            [0.88, 1.88, 2.88, 3.88],
            [1.12, 2.12, 3.12, 4.32],
        ]

        model = af.ModelMapper(mock_class=MockClassx4)
        samples = MockSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=parameters,
                log_likelihood_list=[10.0, 0.0, 0.0, 0.0, 0.0],
                log_prior_list=[0.0, 0.0, 0.0, 0.0, 0.0],
                weight_list=[1.0, 1.0, 1.0, 1.0, 1.0],
            ))

        gaussian_priors = samples.gaussian_priors_at_sigma(sigma=1.0)

        assert gaussian_priors[0][0] == 1.0
        assert gaussian_priors[1][0] == 2.0
        assert gaussian_priors[2][0] == 3.0
        assert gaussian_priors[3][0] == 4.0

        assert gaussian_priors[0][1] == pytest.approx(0.12, 1.0e-4)
        assert gaussian_priors[1][1] == pytest.approx(0.12, 1.0e-4)
        assert gaussian_priors[2][1] == pytest.approx(0.12, 1.0e-4)
        assert gaussian_priors[3][1] == pytest.approx(0.32, 1.0e-4)
Esempio n. 13
0
    def from_results_internal(
            cls,
            results_internal: Results,
            model: AbstractPriorModel,
            number_live_points: int,
            unconverged_sample_size: int = 100,
            time: Optional[float] = None,
    ):
        """
        The `Samples` classes in **PyAutoFit** provide an interface between the results of a `NonLinearSearch` (e.g.
        as files on your hard-disk) and Python.

        To create a `Samples` object after a `dynesty` model-fit the results must be converted from the
        native format used by `dynesty` to lists of values, the format used by the **PyAutoFit** `Samples` objects.

        This classmethod performs this conversion before creating a `DynestySamples` object.

        Parameters
        ----------
        results_internal
            The `dynesty` results in their native internal format from which the samples are computed.
        model
            Maps input vectors of unit parameter values to physical values and model instances via priors.
        number_live_points
            The number of live points used by the `dynesty` search.
        unconverged_sample_size
            If the samples are for a search that is yet to convergence, a reduced set of samples are used to provide
            a rough estimate of the parameters. The number of samples is set by this parameter.
        time
            The time taken to perform the model-fit, which is passed around `Samples` objects for outputting
            information on the overall fit.
        """
        parameter_lists = results_internal.samples.tolist()
        log_prior_list = model.log_prior_list_from(parameter_lists=parameter_lists)
        log_likelihood_list = list(results_internal.logl)

        try:
            weight_list = list(
                np.exp(np.asarray(results_internal.logwt) - results_internal.logz[-1])
            )
        except:
            weight_list = results_internal["weights"]

        sample_list = Sample.from_lists(
            model=model,
            parameter_lists=parameter_lists,
            log_likelihood_list=log_likelihood_list,
            log_prior_list=log_prior_list,
            weight_list=weight_list,
        )

        return DynestySamples(
            model=model,
            sample_list=sample_list,
            number_live_points=number_live_points,
            unconverged_sample_size=unconverged_sample_size,
            time=time,
            results_internal=results_internal,
        )
Esempio n. 14
0
    def samples(self):
        if self._samples is not None:
            return self._samples

        return [
            Sample(log_likelihood=log_likelihood, log_prior=0.0, weight=0.0)
            for log_likelihood in self.log_likelihood_list
        ]
Esempio n. 15
0
def make_sample():
    return Sample(
        log_likelihood=1.0,
        log_prior=1.0,
        weight=0.5,
        centre=1.0,
        intensity=2.0,
        sigma=3.0
    )
Esempio n. 16
0
def samples_with_log_likelihood_list(
        log_likelihood_list
):
    return [
        Sample(
            log_likelihood=log_likelihood,
            log_prior=0,
            weight=0
        )
        for log_likelihood
        in log_likelihood_list
    ]
def make_samples(model):
    parameters = [[1.0, 2.0], [1.2, 2.2]]

    log_likelihood_list = [1.0, 0.0]

    return StoredSamples(model=model,
                         samples=Sample.from_lists(
                             parameter_lists=parameters,
                             log_likelihood_list=log_likelihood_list,
                             log_prior_list=[0.0, 0.0],
                             weight_list=log_likelihood_list,
                             model=model))
Esempio n. 18
0
    def default_sample_list(self):

        if self._log_likelihood_list is not None:
            log_likelihood_list = self._log_likelihood_list
        else:
            log_likelihood_list = range(3)

        return [
            Sample(
                log_likelihood=log_likelihood,
                log_prior=0.0,
                weight=0.0
            )
            for log_likelihood
            in log_likelihood_list
        ]
Esempio n. 19
0
    def test__samples_within_parameter_range(self, samples):
        model = af.ModelMapper(mock_class_1=MockClassx4)

        parameters = [
            [0.0, 1.0, 2.0, 3.0],
            [0.0, 1.0, 2.0, 3.0],
            [0.0, 1.0, 2.0, 3.0],
            [21.0, 22.0, 23.0, 24.0],
            [0.0, 1.0, 2.0, 3.0],
        ]

        samples = MockNestSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=parameters,
                log_likelihood_list=[1.0, 2.0, 3.0, 10.0, 5.0],
                log_prior_list=[0.0, 0.0, 0.0, 0.0, 0.0],
                weight_list=[1.0, 1.0, 1.0, 1.0, 1.0],
            ),
            total_samples=10,
            log_evidence=0.0,
            number_live_points=5,
        )

        samples_range = samples.samples_within_parameter_range(
            parameter_index=0, parameter_range=[-1.0, 100.0])

        assert len(samples_range.parameter_lists) == 5
        assert samples.parameter_lists[0] == samples_range.parameter_lists[0]

        samples_range = samples.samples_within_parameter_range(
            parameter_index=0, parameter_range=[1.0, 100.0])

        assert len(samples_range.parameter_lists) == 1
        assert samples_range.parameter_lists[0] == [21.0, 22.0, 23.0, 24.0]

        samples_range = samples.samples_within_parameter_range(
            parameter_index=2, parameter_range=[1.5, 2.5])

        assert len(samples_range.parameter_lists) == 4
        assert samples_range.parameter_lists[0] == [0.0, 1.0, 2.0, 3.0]
        assert samples_range.parameter_lists[1] == [0.0, 1.0, 2.0, 3.0]
        assert samples_range.parameter_lists[2] == [0.0, 1.0, 2.0, 3.0]
        assert samples_range.parameter_lists[3] == [0.0, 1.0, 2.0, 3.0]
Esempio n. 20
0
    def test__acceptance_ratio_is_correct(self):
        model = af.ModelMapper(mock_class_1=MockClassx4)

        samples = MockNestSamples(
            model=model,
            samples=Sample.from_lists(
                model=model,
                parameter_lists=5 * [[]],
                log_likelihood_list=[1.0, 2.0, 3.0, 4.0, 5.0],
                log_prior_list=5 * [0.0],
                weight_list=5 * [0.0],
            ),
            total_samples=10,
            log_evidence=0.0,
            number_live_points=5,
        )

        assert samples.acceptance_ratio == 0.5
Esempio n. 21
0
    def samples_via_sampler_from_model(self, model):
        """Create a `Samples` object from this non-linear search's output files on the hard-disk and model.

        For Emcee, all quantities are extracted via the hdf5 backend of results.

        Parameters
        ----------
        model
            The model which generates instances for different points in parameter space. This maps the points from unit
            cube values to physical values via the priors.
        paths : af.Paths
            Manages all paths, e.g. where the search outputs are stored, the `NonLinearSearch` chains,
            etc.
        """

        parameters = self.backend.get_chain(flat=True).tolist()
        log_priors = [
            sum(model.log_priors_from_vector(vector=vector))
            for vector in parameters
        ]
        log_likelihoods = self.backend.get_log_prob(flat=True).tolist()
        weights = len(log_likelihoods) * [1.0]
        auto_correlation_time = self.backend.get_autocorr_time(tol=0)
        total_walkers = len(self.backend.get_chain()[0, :, 0])
        total_steps = len(self.backend.get_log_prob())

        return EmceeSamples(
            model=model,
            samples=Sample.from_lists(model=model,
                                      parameters=parameters,
                                      log_likelihoods=log_likelihoods,
                                      log_priors=log_priors,
                                      weights=weights),
            total_walkers=total_walkers,
            total_steps=total_steps,
            auto_correlation_times=auto_correlation_time,
            auto_correlation_check_size=self.auto_correlation_check_size,
            auto_correlation_required_length=self.
            auto_correlation_required_length,
            auto_correlation_change_threshold=self.
            auto_correlation_change_threshold,
            backend=self.backend,
            time=self.timer.time,
        )
Esempio n. 22
0
def make_samples(model):
    galaxy_0 = ag.Galaxy(redshift=0.5, light=ag.lp.EllSersic(centre=(0.0, 1.0)))
    galaxy_1 = ag.Galaxy(redshift=1.0, light=ag.lp.EllSersic())

    plane = ag.Plane(galaxies=[galaxy_0, galaxy_1])

    parameters = [model.prior_count * [0.0], model.prior_count * [10.0]]

    sample_list = Sample.from_lists(
        model=model,
        parameter_lists=parameters,
        log_likelihood_list=[1.0, 2.0],
        log_prior_list=[0.0, 0.0],
        weight_list=[0.0, 1.0],
    )

    return mock.MockSamples(
        model=model, sample_list=sample_list, max_log_likelihood_instance=plane
    )
def make_samples():
    model = af.ModelMapper(mock_class_1=MockClassx4)

    parameters = [
        [0.0, 1.0, 2.0, 3.0],
        [0.0, 1.0, 2.0, 3.0],
        [0.0, 1.0, 2.0, 3.0],
        [21.0, 22.0, 23.0, 24.0],
        [0.0, 1.0, 2.0, 3.0],
    ]

    return OptimizerSamples(model=model,
                            samples=Sample.from_lists(
                                model=model,
                                parameter_lists=parameters,
                                log_likelihood_list=[1.0, 2.0, 3.0, 10.0, 5.0],
                                log_prior_list=[0.0, 0.0, 0.0, 0.0, 0.0],
                                weight_list=[1.0, 1.0, 1.0, 1.0, 1.0],
                            ))
Esempio n. 24
0
    def __init__(
        self,
        max_log_likelihood_instance=None,
        log_likelihoods=None,
        gaussian_tuples=None,
    ):

        if log_likelihoods is None:
            log_likelihoods = [1.0, 2.0, 3.0]

        super().__init__(model=None,
                         samples=[
                             Sample(log_likelihood=log_likelihood,
                                    log_prior=0.0,
                                    weights=0.0)
                             for log_likelihood in log_likelihoods
                         ])

        self._max_log_likelihood_instance = max_log_likelihood_instance
        self.gaussian_tuples = gaussian_tuples
Esempio n. 25
0
    def test__unconverged_vector_at_lower_and_upper_sigma(self):
        parameters = [
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.1, 2.1],
            [0.9, 1.9],
        ]

        log_likelihood_list = 9 * [0.0] + [1.0]
        weight_list = 9 * [0.0] + [1.0]

        model = af.ModelMapper(mock_class=MockClassx2)
        samples = MockSamples(model=model,
                              samples=Sample.from_lists(
                                  model=model,
                                  parameter_lists=parameters,
                                  log_likelihood_list=log_likelihood_list,
                                  log_prior_list=10 * [0.0],
                                  weight_list=weight_list,
                              ))

        assert samples.pdf_converged is False

        vector_at_sigma = samples.vector_at_sigma(sigma=1.0)

        assert vector_at_sigma[0] == pytest.approx(((0.9, 1.1)), 1e-2)
        assert vector_at_sigma[1] == pytest.approx(((1.9, 2.1)), 1e-2)

        vector_at_sigma = samples.vector_at_sigma(sigma=3.0)

        assert vector_at_sigma[0] == pytest.approx(((0.9, 1.1)), 1e-2)
        assert vector_at_sigma[1] == pytest.approx(((1.9, 2.1)), 1e-2)
Esempio n. 26
0
    def test__converged__median_pdf_vector_and_instance(self):
        parameters = [
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [1.0, 2.0],
            [0.9, 1.9],
            [1.1, 2.1],
        ]

        log_likelihood_list = 10 * [0.1]
        weight_list = 10 * [0.1]

        model = af.ModelMapper(mock_class=MockClassx2)
        samples = MockSamples(model=model,
                              samples=Sample.from_lists(
                                  model=model,
                                  parameter_lists=parameters,
                                  log_likelihood_list=log_likelihood_list,
                                  log_prior_list=10 * [0.0],
                                  weight_list=weight_list,
                              ))

        assert samples.pdf_converged is True

        median_pdf_vector = samples.median_pdf_vector

        assert median_pdf_vector[0] == pytest.approx(1.0, 1.0e-4)
        assert median_pdf_vector[1] == pytest.approx(2.0, 1.0e-4)

        median_pdf_instance = samples.median_pdf_instance

        assert median_pdf_instance.mock_class.one == pytest.approx(1.0, 1e-1)
        assert median_pdf_instance.mock_class.two == pytest.approx(2.0, 1e-1)
Esempio n. 27
0
    def from_results_internal(
        cls,
        results_internal,
        model: AbstractPriorModel,
        auto_correlation_settings: AutoCorrelationsSettings,
        unconverged_sample_size: int = 100,
        time: Optional[float] = None,
    ):
        """
        The `Samples` classes in **PyAutoFit** provide an interface between the results of a `NonLinearSearch` (e.g.
        as files on your hard-disk) and Python.

        To create a `Samples` object after an `Zeus` model-fit the results must be converted from the
        native format used by `Zeus` (which is a HDFBackend) to lists of values, the format used by the **PyAutoFit**
        `Samples` objects.

        This classmethod performs this conversion before creating a `ZeusSamples` object.

        Parameters
        ----------
        results_internal
            The MCMC results in their native internal format from which the samples are computed.
        model
            Maps input vectors of unit parameter values to physical values and model instances via priors.
        auto_correlations_settings
            Customizes and performs auto correlation calculations performed during and after the search.
        unconverged_sample_size
            If the samples are for a search that is yet to convergence, a reduced set of samples are used to provide
            a rough estimate of the parameters. The number of samples is set by this parameter.
        time
            The time taken to perform the model-fit, which is passed around `Samples` objects for outputting
            information on the overall fit.
        """

        parameter_lists = results_internal.get_chain(flat=True).tolist()
        log_posterior_list = results_internal.get_log_prob(flat=True).tolist()
        log_prior_list = model.log_prior_list_from(
            parameter_lists=parameter_lists)

        log_likelihood_list = [
            log_posterior - log_prior for log_posterior, log_prior in zip(
                log_posterior_list, log_prior_list)
        ]

        weight_list = len(log_likelihood_list) * [1.0]

        sample_list = Sample.from_lists(
            model=model,
            parameter_lists=parameter_lists,
            log_likelihood_list=log_likelihood_list,
            log_prior_list=log_prior_list,
            weight_list=weight_list)

        return ZeusSamples(
            model=model,
            sample_list=sample_list,
            auto_correlation_settings=auto_correlation_settings,
            unconverged_sample_size=unconverged_sample_size,
            time=time,
            results_internal=results_internal,
        )
Esempio n. 28
0
def samples_with_log_likelihoods(log_likelihoods):
    return [
        Sample(log_likelihood=log_likelihood, log_prior=0, weights=0)
        for log_likelihood in log_likelihoods
    ]
Esempio n. 29
0
    def test__converged__errors_vector_and_instance_at_upper_and_lower_sigma(
            self):
        parameters = [
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.0, 0.5],
            [0.2, 0.3],
        ]

        log_likelihood_list = list(range(10))

        weight_list = 10 * [0.1]

        model = af.ModelMapper(mock_class=MockClassx2)
        samples = MockSamples(model=model,
                              samples=Sample.from_lists(
                                  model=model,
                                  parameter_lists=parameters,
                                  log_likelihood_list=log_likelihood_list,
                                  log_prior_list=10 * [0.0],
                                  weight_list=weight_list,
                              ))

        assert samples.pdf_converged is True

        errors = samples.error_magnitude_vector_at_sigma(sigma=3.0)

        assert errors == pytest.approx([0.19514, 0.19514], 1e-1)

        errors = samples.error_vector_at_upper_sigma(sigma=3.0)

        assert errors == pytest.approx([0.09757, 0.09757], 1e-1)

        errors = samples.error_vector_at_lower_sigma(sigma=3.0)

        assert errors == pytest.approx([0.09757, 0.09757], 1e-1)

        errors = samples.error_vector_at_sigma(sigma=3.0)
        assert errors[0] == pytest.approx((0.09757, 0.09757), 1e-1)
        assert errors[1] == pytest.approx((0.09757, 0.09757), 1e-1)

        errors = samples.error_magnitude_vector_at_sigma(sigma=1.0)

        assert errors == pytest.approx([0.0, 0.0], 1e-1)

        errors_instance = samples.error_instance_at_sigma(sigma=1.0)

        assert errors_instance.mock_class.one == pytest.approx(0.0, 1e-1)
        assert errors_instance.mock_class.two == pytest.approx(0.0, 1e-1)

        errors_instance = samples.error_instance_at_upper_sigma(sigma=3.0)

        assert errors_instance.mock_class.one == pytest.approx(0.09757, 1e-1)
        assert errors_instance.mock_class.two == pytest.approx(0.09757, 1e-1)

        errors_instance = samples.error_instance_at_lower_sigma(sigma=3.0)

        assert errors_instance.mock_class.one == pytest.approx(0.09757, 1e-1)
        assert errors_instance.mock_class.two == pytest.approx(0.09757, 1e-1)
Esempio n. 30
0
    def test__converged__vector_and_instance_at_upper_and_lower_sigma(self):
        parameters = [
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.1, 0.4],
            [0.0, 0.5],
            [0.2, 0.3],
        ]

        log_likelihood_list = list(range(10))

        weight_list = 10 * [0.1]

        model = af.ModelMapper(mock_class=MockClassx2)
        samples = MockSamples(model=model,
                              samples=Sample.from_lists(
                                  model=model,
                                  parameter_lists=parameters,
                                  log_likelihood_list=log_likelihood_list,
                                  log_prior_list=10 * [0.0],
                                  weight_list=weight_list,
                              ))

        assert samples.pdf_converged is True

        vector_at_sigma = samples.vector_at_sigma(sigma=3.0)

        assert vector_at_sigma[0] == pytest.approx((0.00242, 0.19757), 1e-1)
        assert vector_at_sigma[1] == pytest.approx((0.30243, 0.49757), 1e-1)

        vector_at_sigma = samples.vector_at_upper_sigma(sigma=3.0)

        assert vector_at_sigma[0] == pytest.approx(0.19757, 1e-1)
        assert vector_at_sigma[1] == pytest.approx(0.49757, 1e-1)

        vector_at_sigma = samples.vector_at_lower_sigma(sigma=3.0)

        assert vector_at_sigma[0] == pytest.approx(0.00242, 1e-1)
        assert vector_at_sigma[1] == pytest.approx(0.30243, 1e-1)

        vector_at_sigma = samples.vector_at_sigma(sigma=1.0)

        assert vector_at_sigma[0] == pytest.approx((0.1, 0.1), 1e-1)
        assert vector_at_sigma[1] == pytest.approx((0.4, 0.4), 1e-1)

        instance_at_sigma = samples.instance_at_sigma(sigma=1.0)

        assert instance_at_sigma.mock_class.one == pytest.approx((0.1, 0.1),
                                                                 1e-1)
        assert instance_at_sigma.mock_class.two == pytest.approx((0.4, 0.4),
                                                                 1e-1)

        instance_at_sigma = samples.instance_at_upper_sigma(sigma=3.0)

        assert instance_at_sigma.mock_class.one == pytest.approx(0.19757, 1e-1)
        assert instance_at_sigma.mock_class.two == pytest.approx(0.49757, 1e-1)

        instance_at_sigma = samples.instance_at_lower_sigma(sigma=3.0)

        assert instance_at_sigma.mock_class.one == pytest.approx(0.00242, 1e-1)
        assert instance_at_sigma.mock_class.two == pytest.approx(0.30243, 1e-1)