Beispiel #1
0
def test_progress_bars():

    threeML_config.interface.progress_bars = 'on'

    toggle_progress_bars()

    assert not threeML_config.interface.progress_bars

    toggle_progress_bars()

    assert threeML_config.interface.progress_bars

    silence_progress_bars()

    for i in tqdm(range(10), desc="test"):
        pass

    for i in trange(1, 10, 1, desc="test"):
        pass

    assert not threeML_config.interface.progress_bars

    activate_progress_bars()

    for i in tqdm(range(10), desc="test"):
        pass

    for i in trange(1, 10, 1, desc="test"):
        pass

    assert threeML_config.interface.progress_bars
Beispiel #2
0
    def go(self,
           continue_on_failure=True,
           compute_covariance=False,
           verbose=False,
           **options_for_parallel_computation):

        # Generate the data frame which will contain all results

        self._continue_on_failure = continue_on_failure

        self._compute_covariance = compute_covariance

        # let's iterate, perform the fit and fill the data frame

        if threeML_config["parallel"]["use_parallel"]:

            # Parallel computation

            with silence_console_log(and_progress_bars=False):
                client = ParallelClient(**options_for_parallel_computation)

                results = client.execute_with_progress_bar(
                    self.worker, list(range(self._n_iterations)))

        else:

            # Serial computation

            results = []

            with silence_console_log(and_progress_bars=False):

                for i in trange(self._n_iterations,
                                desc="Goodness of fit computation"):

                    results.append(self.worker(i))

        assert len(results) == self._n_iterations, (
            "Something went wrong, I have %s results "
            "for %s intervals" % (len(results), self._n_iterations))

        # Store the results in the data frames

        parameter_frames = pd.concat([x[0] for x in results],
                                     keys=list(range(self._n_iterations)))
        like_frames = pd.concat([x[1] for x in results],
                                keys=list(range(self._n_iterations)))

        # Store a list with all results (this is a list of lists, each list contains the results for the different
        # iterations for the same model)
        self._all_results = []

        for i in range(self._n_models):

            this_model_results = [x[2][i] for x in results]

            self._all_results.append(AnalysisResultsSet(this_model_results))

        return parameter_frames, like_frames
    def _minimize(self):

        # Gather the setup
        islands = self._setup_dict["islands"]
        pop_size = self._setup_dict["population_size"]
        evolution_cycles = self._setup_dict["evolution_cycles"]

        # Print some info
        print("\nPAGMO setup:")
        print("------------")
        print("- Number of islands:            %i" % islands)
        print("- Population size per island:   %i" % pop_size)
        print("- Evolutions cycles per island: %i\n" % evolution_cycles)

        Npar = len(self._internal_parameters)

        if is_parallel_computation_active():

            wrapper = PAGMOWrapper(function=self.function,
                                   parameters=self._internal_parameters,
                                   dim=Npar)

            # use the archipelago, which uses the ipyparallel computation

            archi = pg.archipelago(
                udi=pg.ipyparallel_island(),
                n=islands,
                algo=self._setup_dict["algorithm"],
                prob=wrapper,
                pop_size=pop_size,
            )
            archi.wait()

            # Display some info
            print("\nSetup before parallel execution:")
            print("--------------------------------\n")
            print(archi)

            # Evolve populations on islands
            print(
                "Evolving... (progress not available for parallel execution)")

            # For some weird reason, ipyparallel looks for _winreg on Linux (where does
            # not exist, being a Windows module). Let's mock it with an empty module'
            mocked = False
            if os.path.exists("_winreg.py") is False:

                with open("_winreg.py", "w+") as f:

                    f.write("pass")

                mocked = True

            archi.evolve()

            # Wait for completion (evolve() is async)

            archi.wait_check()

            # Now remove _winreg.py if needed
            if mocked:

                os.remove("_winreg.py")

            # Find best and worst islands

            fOpts = np.array([x[0] for x in archi.get_champions_f()])
            xOpts = archi.get_champions_x()

        else:

            # do not use ipyparallel. Evolve populations on islands serially

            wrapper = PAGMOWrapper(function=self.function,
                                   parameters=self._internal_parameters,
                                   dim=Npar)

            xOpts = []
            fOpts = np.zeros(islands)

            for island_id in trange(islands, desc="pygmo minimization"):

                pop = pg.population(prob=wrapper, size=pop_size)

                for i in range(evolution_cycles):

                    pop = self._setup_dict["algorithm"].evolve(pop)

                # Gather results

                xOpts.append(pop.champion_x)
                fOpts[island_id] = pop.champion_f[0]

        # Find best and worst islands

        min_idx = fOpts.argmin()
        max_idx = fOpts.argmax()

        fOpt = fOpts[min_idx]
        fWorse = fOpts[max_idx]
        xOpt = np.array(xOpts)[min_idx]

        # Some information
        print("\nSummary of evolution:")
        print("---------------------")
        print("Best population has minimum %.3f" % (fOpt))
        print("Worst population has minimum %.3f" % (fWorse))
        print("")

        # Transform to numpy.array

        best_fit_values = np.array(xOpt)

        return best_fit_values, fOpt
Beispiel #4
0
    def __init__(self,
                 pha_file_or_instance: Union[str, Path, PHAII],
                 file_type: str = "observed",
                 rsp_file: Optional[str] = None,
                 arf_file: Optional[str] = None):
        """
        A spectrum with dispersion build from an OGIP-compliant PHA FITS file. Both Type I & II files can be read. Type II
        spectra are selected either by specifying the spectrum_number or via the {spectrum_number} file name convention used
        in XSPEC. If the file_type is background, a 3ML InstrumentResponse or subclass must be passed so that the energy
        bounds can be obtained.


        :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
        :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
        :param file_type: observed or background
        :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
        :param arf_file: (optional) and ARF filename
        """

        # extract the spectrum number if needed

        for t in _valid_input_types:

            if isinstance(pha_file_or_instance, t):
                break

        else:

            log.error(
                f"Must provide a FITS file name or PHAII instance. Got {type(pha_file_or_instance)}"
            )

            raise RuntimeError()

        with fits.open(pha_file_or_instance) as f:

            try:

                HDUidx = f.index_of("SPECTRUM")

            except:

                raise RuntimeError("The input file %s is not in PHA format" %
                                   (pha_file_or_instance))

            spectrum = f[HDUidx]
            data = spectrum.data

            if "COUNTS" in data.columns.names:

                has_rates = False
                data_column_name = "COUNTS"

            elif "RATE" in data.columns.names:

                has_rates = True
                data_column_name = "RATE"

            else:

                log.error(
                    "This file does not contain a RATE nor a COUNTS column. "
                    "This is not a valid PHA file")

                raise RuntimeError()

                # Determine if this is a PHA I or PHA II
            if len(data.field(data_column_name).shape) == 2:

                num_spectra = data.field(data_column_name).shape[0]

            else:

                log.error("This appears to be a PHA I and not PHA II file")

                raise RuntimeError()

        pha_information = _read_pha_or_pha2_file(
            pha_file_or_instance,
            None,
            file_type,
            rsp_file,
            arf_file,
            treat_as_time_series=True,
        )

        # default the grouping to all open bins
        # this will only be altered if the spectrum is rebinned
        self._grouping = np.ones_like(pha_information["counts"])

        # this saves the extra properties to the class

        self._gathered_keywords = pha_information["gathered_keywords"]

        self._file_type = file_type

        # need to see if we have count errors, tstart, tstop
        # if not, we create an list of None

        if pha_information["count_errors"] is None:

            count_errors = [None] * num_spectra

        else:

            count_errors = pha_information["count_errors"]

        if pha_information["tstart"] is None:

            tstart = [None] * num_spectra

        else:

            tstart = pha_information["tstart"]

        if pha_information["tstop"] is None:

            tstop = [None] * num_spectra

        else:

            tstop = pha_information["tstop"]

        # now build the list of binned spectra

        list_of_binned_spectra = []

        for i in trange(num_spectra, desc="Loading PHAII Spectra"):

            list_of_binned_spectra.append(
                BinnedSpectrumWithDispersion(
                    counts=pha_information["counts"][i],
                    exposure=pha_information["exposure"][i, 0],
                    response=pha_information["rsp"],
                    count_errors=count_errors[i],
                    sys_errors=pha_information["sys_errors"][i],
                    is_poisson=pha_information["is_poisson"],
                    quality=pha_information["quality"].get_slice(i),
                    mission=pha_information["gathered_keywords"]["mission"],
                    instrument=pha_information["gathered_keywords"]
                    ["instrument"],
                    tstart=tstart[i],
                    tstop=tstop[i],
                ))

        # now get the time intervals

        _allowed_time_keys = (("TIME", "ENDTIME"), ("TSTART", "TSTOP"))

        for keys in _allowed_time_keys:

            try:

                start_times = data.field(keys[0])
                stop_times = data.field(keys[1])
                break

            except (KeyError):

                pass

        else:

            log.error(
                f"Could not find times in {pha_file_or_instance}. Tried: {_allowed_time_keys}"
            )

            raise RuntimeError()

        time_intervals = TimeIntervalSet.from_starts_and_stops(
            start_times, stop_times)

        reference_time = 0

        # see if there is a reference time in the file

        if "TRIGTIME" in spectrum.header:
            reference_time = spectrum.header["TRIGTIME"]

        for t_number in range(spectrum.header["TFIELDS"]):

            if "TZERO%d" % t_number in spectrum.header:
                reference_time = spectrum.header["TZERO%d" % t_number]

        super(PHASpectrumSet, self).__init__(
            list_of_binned_spectra,
            reference_time=reference_time,
            time_intervals=time_intervals,
        )
Beispiel #5
0
    def _unbinned_fit_global_and_determine_optimum_grade(
            self, events, exposure, bayes=False):
        """
        Provides the ability to find the optimum polynomial grade for *unbinned* events by fitting the
        total (all channels) to 0-2 order polynomials and then comparing them via a likelihood ratio test.


        :param events: an event list
        :param exposure: the exposure per event
        :return: polynomial grade
        """

        # Fit the sum of all the channels to determine the optimal polynomial
        # grade

        min_grade = 0
        max_grade = 2
        log_likelihoods = []

        t_start = self._poly_intervals.start_times
        t_stop = self._poly_intervals.stop_times

        log.debug("attempting to find best fit poly with unbinned")

        if threeML_config["parallel"]["use_parallel"]:

            def worker(grade):

                polynomial, log_like = unbinned_polyfit(events,
                                                        grade,
                                                        t_start,
                                                        t_stop,
                                                        exposure,
                                                        bayes=bayes)

                return log_like

            client = ParallelClient()

            log_likelihoods = client.execute_with_progress_bar(
                worker,
                list(range(min_grade, max_grade + 1)),
                name="Finding best polynomial Order")

        else:

            for grade in trange(min_grade,
                                max_grade + 1,
                                desc="Finding best polynomial Order"):
                polynomial, log_like = unbinned_polyfit(events,
                                                        grade,
                                                        t_start,
                                                        t_stop,
                                                        exposure,
                                                        bayes=bayes)

                log_likelihoods.append(log_like)

        # Found the best one
        delta_loglike = np.array([
            2 * (x[0] - x[1])
            for x in zip(log_likelihoods[:-1], log_likelihoods[1:])
        ])

        log.debug(f"log likes {log_likelihoods}")
        log.debug(f" delta loglikes {delta_loglike}")

        delta_threshold = 9.0

        mask = delta_loglike >= delta_threshold

        if len(mask.nonzero()[0]) == 0:

            # best grade is zero!
            best_grade = 0

        else:

            best_grade = mask.nonzero()[0][-1] + 1

        return best_grade
Beispiel #6
0
    def _fit_global_and_determine_optimum_grade(self,
                                                cnts,
                                                bins,
                                                exposure,
                                                bayes=False):
        """
        Provides the ability to find the optimum polynomial grade for *binned* counts by fitting the
        total (all channels) to 0-4 order polynomials and then comparing them via a likelihood ratio test.


        :param cnts: counts per bin
        :param bins: the bins used
        :param exposure: exposure per bin
        :param bayes:
        :return: polynomial grade
        """

        min_grade = 0
        max_grade = 4
        log_likelihoods = []

        log.debug("attempting to find best poly with binned data")

        if threeML_config["parallel"]["use_parallel"]:

            def worker(grade):

                polynomial, log_like = polyfit(bins,
                                               cnts,
                                               grade,
                                               exposure,
                                               bayes=bayes)

                return log_like

            client = ParallelClient()

            log_likelihoods = client.execute_with_progress_bar(
                worker,
                list(range(min_grade, max_grade + 1)),
                name="Finding best polynomial Order")

        else:

            for grade in trange(min_grade,
                                max_grade + 1,
                                desc="Finding best polynomial Order"):
                polynomial, log_like = polyfit(bins,
                                               cnts,
                                               grade,
                                               exposure,
                                               bayes=bayes)

                log_likelihoods.append(log_like)

        # Found the best one
        delta_loglike = np.array([
            2 * (x[0] - x[1])
            for x in zip(log_likelihoods[:-1], log_likelihoods[1:])
        ])

        log.debug(f"log likes {log_likelihoods}")
        log.debug(f" delta loglikes {delta_loglike}")

        delta_threshold = 9.0

        mask = delta_loglike >= delta_threshold

        if len(mask.nonzero()[0]) == 0:

            # best grade is zero!
            best_grade = 0

        else:

            best_grade = mask.nonzero()[0][-1] + 1

        return best_grade