Beispiel #1
0
    def _test_process(self):
        """
        Test process() function
        """
        # Test only if Pool module is available
        try:
            # Import Pool module
            from multiprocessing import Pool

            # Get test class
            cls = mputils_test()

            # Setup arguments for mpfunc() computation
            args = [(cls, '_compute_bin', i) for i in range(10)]

            # Multiprocessing
            poolresults = mputils.process(2, mputils.mpfunc, args)

            # Loop over arguments
            for i in range(10):
                result = poolresults[i][0]
                self.test_value(result, i, 'Check process for argument %d' % i)

        # ... otherwise if Pool is not available then skip this test
        except:
            pass

        # Return
        return
Beispiel #2
0
    def run(self):
        """
        Run the script
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Set test source model for this observation
        self.models(modutils.test_source(self.obs().models(), self._srcname))

        # Write observation into logger
        self._log_observations(gammalib.NORMAL, self.obs(), 'Input observation')

        # Write models into logger
        self._log_models(gammalib.NORMAL, self.obs().models(), 'Input model')

        # Write header
        self._log_header1(gammalib.TERSE, 'Generate TS distribution')

        # Get number of trials
        ntrials = self['ntrials'].integer()

        # If more than a single thread is requested then use multiprocessing
        if self._nthreads > 1:
            args        = [(self, '_trial', i) for i in range(ntrials)]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

        # Continue with regular processing
        for i in range(ntrials):

            # If multiprocessing was used then recover results and put them
            # into the log file
            if self._nthreads > 1:
                result = poolresults[i][0]
                self._log_string(gammalib.TERSE, poolresults[i][1]['log'], False)

            # ... otherwise make a trial now
            else:
                result = self._trial(i)

            # Write out trial result
            ioutils.write_csv_row(self['outfile'].filename().url(), i,
                                  result['colnames'], result['values'])

        # Return
        return
Beispiel #3
0
    def run(self):
        """
        Run the script
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write observation into logger
        self._log_observations(gammalib.NORMAL, self.obs(), 'Observation')

        # Dictionary to save phase fitted models
        self._fitmodels = {}

        # Write header
        self._log_header1(gammalib.TERSE, 'Generate phase curve')

        # If more than a single thread is requested then use multiprocessing
        if self._nthreads > 1:

            # Compute phase bins
            args = [(self, '_phase_bin', phbin) for phbin in self._phbins]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

            # Construct results
            for i in range(len(self._phbins)):
                self._fitmodels[poolresults[i][0]
                                ['phstr']] = poolresults[i][0]['fitmodels']
                self._log_string(gammalib.TERSE, poolresults[i][1]['log'],
                                 False)

        # Otherwise, loop over all phases
        else:
            for phbin in self._phbins:
                result = self._phase_bin(phbin)
                self._fitmodels[result['phstr']] = result['fitmodels']

        # Create FITS file
        self._create_fits()

        # Optionally publish phase curve
        if self['publish'].boolean():
            self.publish()

        # Return
        return
Beispiel #4
0
    def _fit_energy_bins(self):
        """
        Fit model to energy bins

        Returns
        -------
        results : list of dict
            List of dictionaries with fit results
        """
        # Write header
        self._log_header1(gammalib.TERSE, 'Generate spectrum')
        self._log_string(gammalib.TERSE, str(self._ebounds))

        like          = ctools.ctlike(self.obs())
        like['edisp'] = self['edisp'].boolean()
        like.run()
        logL          = like.obs().logL()

        # print( 'Total LogLikelihood {:.7e}'.format( logL ) )

        # Initialise results
        results = []

        # If more than a single thread is requested then use multiprocessing
        if self._nthreads > 1:

            # Compute energy bins
            args        = [(self, '_fit_energy_bin', i)
                           for i in range(self._ebounds.size())]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

            # Construct results
            for i in range(self._ebounds.size()):
                results.append(poolresults[i][0])
                self._log_string(gammalib.TERSE, poolresults[i][1]['log'], False)

        # Otherwise, loop over energy bins
        else:
            for i in range(self._ebounds.size()):

                # Fit energy bin
                result = self._fit_energy_bin( i )

                # Append results
                results.append(result)

        # Return results
        return results
Beispiel #5
0
    def _fit_mass_points(self) :
        """
        Fit for GVector masses

        Return
        ------
        results: dictionary with result for every mass point
        """

        self._log_header1(gammalib.TERSE, 'Fitting models for different masses')
        self._log_string(gammalib.TERSE, str(self._masses))

        # Initialise results
        results = []

        #   I need to check how to use multiprocessing
        #   I hope, this is not caused by a bug in the code
        if self._nthreads > 1 :

            #   force to set nthreads to one

            # self._nthreads = 1
            # Compute for mass points
            args        = [(self, '_fit_mass_point', i)
                            for i in range(len(self._masses))]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

            # Construct results
            for i in range(len(self._masses)) :

                results.append(poolresults[i][0])
                self._log_string(gammalib.TERSE,poolresults[i][1]['log'],False)

        # Otherwise, loop over energy bins
        else:
            for i in range(len(self._masses)) :

                # Fit energy bin
                result = self._fit_mass_point(i)

                # Append results
                results.append(result)

        # Return results
        return results
Beispiel #6
0
    def run(self):
        """
        Run the script
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write observation into logger
        self._log_observations(gammalib.NORMAL, self.obs(), 'Observation')

        # Set true energy bins
        self._etruebounds = self._etrue_ebounds()

        # Write header
        self._log_header1(gammalib.TERSE, 'Spectral binning')

        # Log reconstructed energy bins
        for i in range(self._ebounds.size()):
            value = '%s - %s' % (str(
                self._ebounds.emin(i)), str(self._ebounds.emax(i)))
            self._log_value(gammalib.TERSE, 'Bin %d' % (i + 1), value)

        # Write header
        self._log_header1(gammalib.NORMAL,
                          'Generation of source and background spectra')

        # Initialise run variables
        outobs = gammalib.GObservations()
        self._bkg_regs = []
        results = []

        # If there is more than one observation and we use multiprocessing
        if self._nthreads > 1 and self.obs().size() > 1:

            # Compute observations
            args = [(self, '_process_observation', i)
                    for i in range(self.obs().size())]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

            # Construct results
            for i in range(self.obs().size()):
                result = poolresults[i][0]
                outobs = self._unpack_result(outobs, result)
                results.append(result)
                self._log_string(gammalib.TERSE, poolresults[i][1]['log'],
                                 False)

        # Otherwise, loop through observations and generate pha, arf, rmf files
        else:
            for i in range(self.obs().size()):
                # Process individual observation
                result = self._process_observation(i)
                outobs = self._unpack_result(outobs, result)
                results.append(result)

        # Stack observations
        if outobs.size() > 1 and self['stack'].boolean():

            # Write header
            self._log_header1(gammalib.NORMAL,
                              'Stacking %d observations' % (outobs.size()))

            # Stack observations
            stacked_obs = gammalib.GCTAOnOffObservation(outobs)

            # Set statistic according to background model usage
            stacked_obs = self._set_statistic(stacked_obs)

            # Put stacked observations in output container
            outobs = gammalib.GObservations()
            outobs.append(stacked_obs)

        # Create models that allow On/Off fitting
        models = self._set_models(results)

        # Set models in output container
        outobs.models(models)

        # Set observation container
        self.obs(outobs)

        # Return
        return
Beispiel #7
0
    def run(self):
        """
        Run the script
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Loop over observations and store a deep copy of the energy
        # boundaries for later use
        for obs in self.obs():
            self._obs_ebounds.append(obs.events().ebounds().copy())

        # Initialise script
        colnames = [
            'loge', 'emin', 'emax', 'crab_flux', 'photon_flux', 'energy_flux',
            'sensitivity', 'regcoeff', 'nevents', 'npred'
        ]
        results = []

        # Set test source model for this observation
        self._models = modutils.test_source(self.obs().models(),
                                            self._srcname,
                                            ra=self._ra,
                                            dec=self._dec)

        # Write observation into logger
        self._log_observations(gammalib.NORMAL, self.obs(),
                               'Input observation')

        # Write models into logger
        self._log_models(gammalib.NORMAL, self._models, 'Input model')

        # Write header
        self._log_header1(gammalib.TERSE, 'Sensitivity determination')
        self._log_value(gammalib.TERSE, 'Type', self['type'].string())

        # If using multiprocessing
        if self._nthreads > 1 and self._ebounds.size() > 1:

            # Compute energy bins
            args = [(self, '_e_bin', i) for i in range(self._ebounds.size())]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

            # Construct results
            for ieng in range(self._ebounds.size()):
                results.append(poolresults[ieng][0])
                self._log_string(gammalib.TERSE, poolresults[ieng][1]['log'],
                                 False)

        # Otherwise, loop over energy bins
        else:
            for ieng in range(self._ebounds.size()):

                #Run analysis in energy bin
                result = self._e_bin(ieng)

                # Append results
                results.append(result)

        # Write out trial result
        for ieng, result in enumerate(results):
            ioutils.write_csv_row(self['outfile'].filename().url(), ieng,
                                  colnames, result)

        # Return
        return