コード例 #1
0
ファイル: test_analysis_results.py プロジェクト: giacomov/3ML
def test_analysis_set_input_output(xy_fitted_joint_likelihood):

    # Collect twice the same analysis results just to see if we can
    # save them in a file as set of results

    jl, _, _ = xy_fitted_joint_likelihood  # type: JointLikelihood, None, None

    jl.restore_best_fit()

    ar = jl.results  # type: MLEResults

    ar2 = jl.results

    analysis_set = AnalysisResultsSet([ar, ar2])

    analysis_set.set_bins("testing", [-1, 1], [3, 5], unit = 's')

    temp_file = "_analysis_set_test"

    analysis_set.write_to(temp_file, overwrite=True)

    analysis_set_reloaded = load_analysis_results(temp_file)

    os.remove(temp_file)

    # Test they are the same
    assert len(analysis_set_reloaded) == len(analysis_set)

    for res1, res2 in zip(analysis_set, analysis_set_reloaded):

        _results_are_same(res1, res2)
コード例 #2
0
    def go(self,
           continue_on_failure=True,
           compute_covariance=False,
           verbose=False,
           **options_for_parallel_computation):

        # Generate the data frame which will contain all results

        self._continue_on_failure = continue_on_failure

        self._compute_covariance = compute_covariance

        # let's iterate, perform the fit and fill the data frame

        if threeML_config["parallel"]["use_parallel"]:

            # Parallel computation

            with silence_console_log(and_progress_bars=False):
                client = ParallelClient(**options_for_parallel_computation)

                results = client.execute_with_progress_bar(
                    self.worker, list(range(self._n_iterations)))

        else:

            # Serial computation

            results = []

            with silence_console_log(and_progress_bars=False):

                for i in trange(self._n_iterations,
                                desc="Goodness of fit computation"):

                    results.append(self.worker(i))

        assert len(results) == self._n_iterations, (
            "Something went wrong, I have %s results "
            "for %s intervals" % (len(results), self._n_iterations))

        # Store the results in the data frames

        parameter_frames = pd.concat([x[0] for x in results],
                                     keys=list(range(self._n_iterations)))
        like_frames = pd.concat([x[1] for x in results],
                                keys=list(range(self._n_iterations)))

        # Store a list with all results (this is a list of lists, each list contains the results for the different
        # iterations for the same model)
        self._all_results = []

        for i in range(self._n_models):

            this_model_results = [x[2][i] for x in results]

            self._all_results.append(AnalysisResultsSet(this_model_results))

        return parameter_frames, like_frames
コード例 #3
0
    def go(self, continue_on_failure=True, compute_covariance=False, verbose=False, **options_for_parallel_computation):

        # Generate the data frame which will contain all results

        if verbose:

            log.setLevel(logging.INFO)

        self._continue_on_failure = continue_on_failure

        self._compute_covariance = compute_covariance

        # let's iterate, perform the fit and fill the data frame

        if threeML_config['parallel']['use-parallel']:

            # Parallel computation

            client = ParallelClient(**options_for_parallel_computation)

            results = client.execute_with_progress_bar(self.worker, range(self._n_iterations))


        else:

            # Serial computation

            results = []

            with progress_bar(self._n_iterations, title='Goodness of fit computation') as p:

                for i in range(self._n_iterations):

                    results.append(self.worker(i))

                    p.increase()

        assert len(results) == self._n_iterations, "Something went wrong, I have %s results " \
                                                   "for %s intervals" % (len(results), self._n_iterations)

        # Store the results in the data frames

        parameter_frames = pd.concat(map(lambda x: x[0], results), keys=range(self._n_iterations))
        like_frames = pd.concat(map(lambda x: x[1], results), keys=range(self._n_iterations))

        # Store a list with all results (this is a list of lists, each list contains the results for the different
        # iterations for the same model)
        self._all_results = []

        for i in range(self._n_models):

            this_model_results = map(lambda x: x[2][i], results)

            self._all_results.append(AnalysisResultsSet(this_model_results))

        return parameter_frames, like_frames
コード例 #4
0
def test_analysis_set_input_output(xy_fitted_joint_likelihood):

    # Collect twice the same analysis results just to see if we can
    # save them in a file as set of results

    jl, _, _ = xy_fitted_joint_likelihood  # type: JointLikelihood, None, None

    jl.restore_best_fit()

    ar = jl.results  # type: MLEResults

    ar2 = jl.results

    analysis_set = AnalysisResultsSet([ar, ar2])

    analysis_set.set_bins("testing", [-1, 1], [3, 5], unit='s')

    temp_file = "_analysis_set_test"

    analysis_set.write_to(temp_file, overwrite=True)

    analysis_set_reloaded = load_analysis_results(temp_file)

    os.remove(temp_file)

    # Test they are the same
    assert len(analysis_set_reloaded) == len(analysis_set)

    for res1, res2 in zip(analysis_set, analysis_set_reloaded):

        _results_are_same(res1, res2)
コード例 #5
0
def test_conversion_fits2hdf(xy_fitted_joint_likelihood):

    jl, _, _ = xy_fitted_joint_likelihood  # type: JointLikelihood, None, None

    jl.restore_best_fit()

    ar = jl.results  # type: MLEResults

    ar2 = jl.results

    analysis_set = AnalysisResultsSet([ar, ar2])

    analysis_set.set_bins("testing", [-1, 1], [3, 5], unit="s")

    temp_file = "_analysis_set_test.fits"

    analysis_set.write_to(temp_file, overwrite=True)

    convert_fits_analysis_result_to_hdf(temp_file)

    analysis_set_reloaded = load_analysis_results_hdf("_analysis_set_test.h5")

        # Test they are the same
    assert len(analysis_set_reloaded) == len(analysis_set)

    for res1, res2 in zip(analysis_set, analysis_set_reloaded):

        _results_are_same(res1, res2)