Ejemplo n.º 1
0
    def _get_parameters(self):
        """
        Get parameters from parfile
        """
        # If there are no observations in container then get some ...
        if self.obs().is_empty():
            self.obs(self._get_observations())

        # ... otherwise add response information and energy boundaries
        # in case they are missing
        else:
            self._setup_observations(self.obs())

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Get number of energy bins
        enumbins = self['enumbins'].integer()

        # Query parameters for On/Off observation
        if gammalib.toupper(self['onsrc'].string()) != 'NONE':
            self['onrad'].real()

        # Query parameters for binned if requested
        elif enumbins > 0:
            self['npix'].integer()
            self['binsz'].real()
            self['coordsys'].string()
            self['proj'].string()

        # Set models if we have none
        if self.obs().models().is_empty():
            self.obs().models(self['inmodel'].filename())

        # Query other parameters
        self['ntrials'].integer()
        self['edisp'].boolean()
        self['seed'].integer()
        self['chatter'].integer()

        # Query some parameters
        self['outfile'].filename()
        self['profile'].boolean()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 2
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Query datapath. If the parameter is not NONE then use it, otherwise
        # use the datapath from the VHEFITS environment variable
        datapath = self['datapath'].string()
        if gammalib.toupper(datapath) != 'NONE':
            self._datapath = datapath
        else:
            self._datapath = os.getenv('VHEFITS', '')

        # Expand environment
        self._datapath = gammalib.expand_env(self._datapath)

        # Get filename of master index file
        self._master_indx = self['master_indx'].string()
        self._master_file = os.path.join(self._datapath, self._master_indx)

        # Check for presence of master index file
        if not os.path.isfile(self._master_file):
            raise RuntimeError('Master index file "' + self._master_file +
                               '" not found. Use hidden parameter ' +
                               '"master_indx" to specifiy a different ' +
                               'filename.')

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 3
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Set observation if not done before
        if self.obs().size() == 0:
            self.obs(self._get_observations())

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Get source name
        self._srcname = self['srcname'].string()

        # Set models if we have none
        if self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Query parameters
        self['edisp'].boolean()
        self['ntrials'].integer()
        self['debug'].boolean()
        self['outfile'].filename()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Set number of processes for multiprocessing
        self._nthreads = mputils.nthreads(self)

        # Return
        return
Ejemplo n.º 4
0
 def check_set_obs_statistic_onoff(self, statistic):
     obs = gammalib.GObservations()
     cta = gammalib.GCTAOnOffObservation()
     obs.append(cta)
     self.obs(obs)
     self._set_obs_statistic(statistic)
     return gammalib.toupper(self.obs()[0].statistic())
Ejemplo n.º 5
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Set observation if not done before
        if self.obs().size() == 0:
            self.obs(self._get_observations())

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Get source name
        self._srcname = self['srcname'].string()

        # Set models if we have none
        if self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Query parameters
        self['edisp'].boolean()
        self['ntrials'].integer()
        
        self['debug'].boolean()

        # Read ahead output parameters
        #if self._read_ahead():
        #    self['outfile'].filename()
        self['outfile'].filename()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 6
0
 def check_set_obs_statistic_binned(self, statistic):
     obs  = gammalib.GObservations()
     cube = gammalib.GCTAEventCube()
     cta  = gammalib.GCTAObservation()
     cta.events(cube)
     obs.append(cta)
     self.obs(obs)
     self._set_obs_statistic(statistic)
     return gammalib.toupper(self.obs()[0].statistic())
Ejemplo n.º 7
0
    def _get_parameters(self):
        """
        Get user parameters from parfile
        """
        # Set observation if not done before
        if self.obs().size() == 0:
            self.obs(self._set_obs(self['emin'].real(), self['emax'].real()))

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Set models if we have none
        if self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Get source name
        self._srcname = self['srcname'].string()

        # Read further parameters
        emin = self['emin'].real()
        emax = self['emax'].real()
        bins = self['bins'].integer()

        # Query parameters for binned if requested
        enumbins = self['enumbins'].integer()
        if not enumbins == 0:
            self['npix'].integer()
            self['binsz'].real()

        # Query input parameters
        self['sigma'].real()
        self['max_iter'].integer()
        self['type'].string()
        self['outfile'].filename()
        self['edisp'].boolean()
        self['debug'].boolean()

        # Derive some parameters
        self._ebounds = gammalib.GEbounds(bins, gammalib.GEnergy(emin, 'TeV'),
                                          gammalib.GEnergy(emax, 'TeV'))

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Set number of processes for multiprocessing
        self._nthreads = mputils.nthreads(self)

        # Return
        return
Ejemplo n.º 8
0
    def _get_parameters(self):
        """
        Get user parameters from parfile
        """
        # Set observation if not done before
        if self.obs().size() == 0:
            self.obs(self._set_obs(self['emin'].real(), self['emax'].real()))

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Set models if we have none
        if self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Get source name
        self._srcname = self['srcname'].string()

        # Read further parameters
        emin = self['emin'].real()
        emax = self['emax'].real()
        bins = self['bins'].integer()

        # Query parameters for binned if requested
        enumbins = self['enumbins'].integer()
        if not enumbins == 0:
            self['npix'].integer()
            self['binsz'].real()

        # Query input parameters
        self['sigma'].real()
        self['max_iter'].integer()
        self['type'].string()
        self['outfile'].filename()
        self['edisp'].boolean()
        self['debug'].boolean()

        # Derive some parameters
        self._ebounds = gammalib.GEbounds(bins,
                                          gammalib.GEnergy(emin, 'TeV'),
                                          gammalib.GEnergy(emax, 'TeV'))

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 9
0
    def _get_parameters(self):
        """
        Get parameters from parfile
        """
        # Setup observations (require response and allow event list, don't
        # allow counts cube)
        self._setup_observations(self.obs(), True, True, False)

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Set models if there are none in the container
        if self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Get phase boundaries
        self._phbins = self._create_tbounds()

        # Set On/Off analysis flag and query relevant user parameters
        self._onoff = self._is_onoff()

        # Get source name
        self._srcname = self['srcname'].string()

        # If cube analysis is selected
        # Set stacked analysis flag and query relevant user parameters
        if not self._onoff:
            self._stacked = self._is_stacked()

        # Query the hidden parameters, just in case
        self['edisp'].boolean()

        # Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Set number of processes for multiprocessing
        self._nthreads = mputils.nthreads(self)

        # Return
        return
Ejemplo n.º 10
0
    def _get_parameters(self):
        """
        Get parameters from parfile
        """
        # Setup observations (require response and allow event list, don't
        # allow counts cube)
        self._setup_observations(self.obs(), True, True, False)

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Set models if there are none in the container
        if self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Get source name
        self._srcname = self['srcname'].string()

        # Get time boundaries
        self._tbins = self._create_tbounds()

        # Set On/Off analysis flag and query relevant user parameters
        self._onoff = self._is_onoff()

        # Set stacked analysis flag and query relevant user parameters
        if not self._onoff:
            self._stacked = self._is_stacked()

        # Query the hidden parameters, just in case
        self['edisp'].boolean()
        self['calc_ulim'].boolean()
        self['calc_ts'].boolean()
        self['fix_bkg'].boolean()
        self['fix_srcs'].boolean()

        # Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 11
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Set observation if not done before
        if self.obs().is_empty():
            self._require_inobs('csspec::get_parameters()')
            self.obs(self._get_observations())

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Set models if we have none
        if self.obs().models().is_empty():
            self.obs().models(self['inmodel'].filename())

        # Query source name
        self['srcname'].string()

        # Get spectrum generation method
        self._method = self['method'].string()

        # Collect number of unbinned, binned and On/Off observations in
        # observation container
        n_unbinned = 0
        n_binned = 0
        n_onoff = 0
        for obs in self.obs():
            if obs.classname() == 'GCTAObservation':
                if obs.eventtype() == 'CountsCube':
                    n_binned += 1
                else:
                    n_unbinned += 1
            elif obs.classname() == 'GCTAOnOffObservation':
                n_onoff += 1
        n_cta = n_unbinned + n_binned + n_onoff
        n_other = self.obs().size() - n_cta

        # If spectrum method is not "NODES" then set spectrum method and
        # script mode according to type of observations
        if self._method != 'NODES':
            if n_other > 0:
                self._method = 'NODES'
            else:
                if n_unbinned == 0 and n_binned != 0 and n_onoff == 0:
                    self._binned_mode = True
                    self._method = 'SLICE'
                elif n_unbinned == 0 and n_binned == 0 and n_onoff != 0:
                    self._onoff_mode = True
                    self._method = 'SLICE'
                elif n_unbinned == 0 and n_binned != 0 and n_onoff != 0:
                    msg = 'Mix of binned and On/Off CTA observations found ' \
                          'in observation container. csscript does not support ' \
                          'this mix.'
                    raise RuntimeError(msg)
                elif n_unbinned != 0 and (n_binned != 0 or n_onoff != 0):
                    msg = 'Mix of unbinned and binned or On/Off CTA observations ' \
                          'found in observation container. csscript does not ' \
                          'support this mix.'
                    raise RuntimeError(msg)
                elif n_unbinned != 0:
                    self._method = 'SLICE'

        # Set ebounds
        self._set_ebounds()

        # Query other parameeters
        self['edisp'].boolean()
        self['calc_ulim'].boolean()
        self['calc_ts'].boolean()
        self['fix_bkg'].boolean()
        self['fix_srcs'].boolean()

        # Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Set number of processes for multiprocessing
        self._nthreads = mputils.nthreads(self)

        # Write spectrum method header and parameters
        self._log_header1(gammalib.TERSE, 'Spectrum method')
        self._log_value(gammalib.TERSE, 'Unbinned CTA observations',
                        n_unbinned)
        self._log_value(gammalib.TERSE, 'Binned CTA observations', n_binned)
        self._log_value(gammalib.TERSE, 'On/off CTA observations', n_onoff)
        self._log_value(gammalib.TERSE, 'Other observations', n_other)

        # If there are a mix of CTA and non-CTA observations and the method
        # is 'SLICE' then log a warning that non-CTA observations will be
        # ignored
        warning = False
        if n_cta > 0 and n_other > 0 and self._method == 'SLICE':
            warning = True

        # If there are only non-CTA observations and the method is 'SLICE'
        # then stop now
        elif n_other > 0:
            if self._method == 'SLICE':
                msg = 'Selected "SLICE" method but none of the observations ' \
                      'is a CTA observation. Please select "AUTO" or "NODES" ' \
                      'if no CTA observation is provided.'
                raise RuntimeError(msg)
            else:
                self._method = 'NODES'

        # Log selected spectrum method
        self._log_value(gammalib.TERSE, 'Selected spectrum method',
                        self._method)

        # Signal warning
        if warning:
            self._log_string(
                gammalib.TERSE, ' WARNING: Only CTA observation '
                'can be handled with the "SLICE" method, all '
                'non-CTA observation will be ignored.')

        # Return
        return
Ejemplo n.º 12
0
def plot_irf(irf, emin, emax, tmin, tmax, plotfile):
    """
    Plot Instrument Response Function

    Parameters
    ----------
    irf : `~gammalib.GCTAResponseIrf`
        Instrument Response Function
    emin : float
        Minimum energy (TeV)
    emax : float
        Maximum energy (TeV)
    tmin : float
        Minimum offset angle (deg)
    tmax : float
        Maximum offset angle (deg)
    plotfile : str
        Plot filename
    """
    # Build selection string
    selection  = ''
    eselection = ''
    tselection = ''
    if emin != None and emax != None:
        eselection += '%.3f-%.1f TeV' % (emin, emax)
    elif emin != None:
        eselection += ' >%.3f TeV' % (emin)
    elif emax != None:
        eselection += ' <%.1f TeV' % (emax)
    if tmin != None and tmax != None:
        tselection += '%.1f-%.1f deg' % (tmin, tmax)
    elif tmin != None:
        tselection += ' >%.1f deg' % (tmin)
    elif tmax != None:
        tselection += ' <%.1f deg' % (tmax)
    if len(eselection) > 0 and len(tselection) > 0:
        selection = ' (%s, %s)' % (eselection, tselection)
    elif len(eselection) > 0:
        selection = ' (%s)' % (eselection)
    elif len(tselection) > 0:
        selection = ' (%s)' % (tselection)

    # Build title
    mission    = irf.caldb().mission()
    instrument = irf.caldb().instrument()
    response   = irf.rspname()
    title      = '%s "%s" Instrument Response Function "%s"%s' % \
                 (gammalib.toupper(mission), instrument, response, selection)

    # Create figure
    fig = plt.figure(figsize=(16,8))

    # Add title
    fig.suptitle(title, fontsize=16)

    # Plot Aeff
    ax1 = fig.add_subplot(231)
    plot_aeff(ax1, irf.aeff(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Plot Psf
    ax2 = fig.add_subplot(232)
    plot_psf(ax2, irf.psf(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Plot Background
    ax3 = fig.add_subplot(233)
    plot_bkg(ax3, irf.background(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Plot Edisp
    fig.add_subplot(234)
    plot_edisp(irf.edisp(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Show plots or save it into file
    if len(plotfile) > 0:
        plt.savefig(plotfile)
    else:
        plt.show()

    # Return
    return
Ejemplo n.º 13
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Query datapath. If the parameter is not NONE then use it, otherwise
        # use the datapath from the VHEFITS environment variable
        datapath = self['datapath'].string()
        if gammalib.toupper(datapath) != 'NONE':
            self._datapath = datapath
        else:
            self._datapath = os.getenv('VHEFITS', '')

        # Expand environment
        self._datapath = gammalib.expand_env(self._datapath)

        # Query input parameters
        self['inmodel'].query()

        # Read FITS production
        self._prodname = self['prodname'].string()

        # Read runlist file if list not already filled
        if len(self._runlist) == 0:

            # Get file name
            self._runlistfile = self['infile'].filename()

            # Read runlist from file
            runfile = open(self._runlistfile.url())
            for line in runfile.readlines():
                if len(line) == 0:
                    continue
                if line[0] == '#':
                    continue
                if len(line.split()) > 0:
                    self._runlist.append(line.split()[0])
            runfile.close()

        # Read number of background parameters
        self._bkgpars = self['bkgpars'].integer()

        # Query ahead output parameters
        if self._read_ahead():
            self['outmodel'].filename()
            self['outobs'].filename()

        # Master index file name
        self._master_indx = self['master_indx'].string()

        # Read flag for background scaling factor
        self._use_bkg_scale = self['bkg_scale'].boolean()

        # Read hierarchy of file loading
        self._ev_hiera = self['ev_hiera'].string().split('|')
        self._aeff_hiera = self['aeff_hiera'].string().split('|')
        self._psf_hiera = self['psf_hiera'].string().split('|')
        self._bkg_hiera = self['bkg_hiera'].string().split('|')
        self._edisp_hiera = self['edisp_hiera'].string().split('|')
        self._bkg_mod_hiera = self['bkg_mod_hiera'].string().split('|')

        # Read hidden background parameters
        self._bkg_gauss_norm = self['bkg_gauss_norm'].real()
        self._bkg_gauss_index = self['bkg_gauss_index'].real()
        self._bkg_gauss_sigma = self['bkg_gauss_sigma'].real()
        self._bkg_aeff_index = self['bkg_aeff_index'].real()
        self._bkg_aeff_norm = self['bkg_aeff_norm'].real()
        self._bkg_range_factor = self['bkg_range_factor'].real()

        # Open master index file and look for prodname
        master_file = os.path.join(self._datapath, self._master_indx)
        if not os.path.isfile(master_file):
            raise RuntimeError('FITS data store not available. No master '
                               'index file found. Make sure the file is '
                               'copied from the server and your datapath '
                               'is set correctly.')

        # Open and load JSON file
        json_data = open(master_file).read()
        data = json.loads(json_data)
        if not 'datasets' in data:
            raise RuntimeError('Key "datasets" not available in master '
                               'index file.')

        # Get configurations
        configs = data['datasets']

        # Initialise HDUs
        self._hdu_index = self._obs_index = ''

        # Get HDUs
        for config in configs:

            # Check if prodname is present
            if self._prodname == config['name']:
                self._hdu_index = str(
                    os.path.join(self._datapath, config['hduindx']))
                self._obs_index = str(
                    os.path.join(self._datapath, config['obsindx']))

                # Leave loop if index file names were found
                break

        # Check index files
        if self._hdu_index == '' or self._obs_index == '':
            raise RuntimeError('*** ERROR: FITS data store "' +
                               self._prodname +
                               '" not available. Run csiactdata to get a list '
                               'of available storage names')

        # Check HDU names
        filename = gammalib.GFilename(self._hdu_index + '[HDU_INDEX]')
        if not filename.is_fits():
            raise RuntimeError('*** ERROR: HDU index file "' +
                               self._hdu_index +
                               '[HDU_INDEX]" for FITS data store "' +
                               self._prodname + '" not available. Check your '
                               'master index file or run csiactdata to get '
                               'a list of available storage names.')

        # Check for existence of 'BKG_SCALE' in the observation index file if
        # required
        if self._use_bkg_scale:

            # Create filename
            filename = gammalib.GFilename(self._obs_index + '[OBS_INDEX]')

            # Check if it is a FITS file
            if filename.is_fits():

                # Open FITS file
                fits = gammalib.GFits(self._obs_index)

                # Check if column "BKG_SCALE" is found and signal its possible
                # usage
                if not fits['OBS_INDEX'].contains('BKG_SCALE'):
                    self._use_bkg_scale = False

                # Close FITS file
                fits.close()

            else:
                # Signal that there is no background scale
                self._use_bkg_scale = False

        # Create base data directory from hdu index file location
        self._subdir = os.path.dirname(self._hdu_index)
        self._debug = False  # Debugging in client tools

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 14
0
    def _get_parameters(self):
        """
        Get parameters and setup the observation
        """

        #   Set observation if not done before
        if self.obs().is_empty():
            self._require_inobs('csdmatter::get_parameters')
            self.obs(self._get_observations())

        #   Set Obs statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        #   Set Models
        if self.obs().models().is_empty():
            self.obs().models(self['inmodel'].filename())

        #   Query source name
        self['srcname'].string()

        #   Collect number of unbinned, binned and OnOff obs
        #   in observation container
        n_unbinned = 0
        n_binned = 0
        n_onoff = 0

        for obs in self.obs():
            if obs.classname() == 'GCTAObservation':
                if obs.eventtype() == 'CountsCube':
                    n_binned += 1
                else:
                    n_unbinned += 1
            elif obs.classname() == 'GCTAOnOffObservation':
                n_onoff += 1
        n_cta = n_unbinned + n_binned + n_onoff
        n_other = self.obs().size() - n_cta

        #   Set energy bounds
        self._set_ebounds()

        #   Query other parameters
        self['edisp'].boolean()
        self['calc_ulim'].boolean()
        self['calc_ts'].boolean()
        self['fix_bkg'].boolean()
        self['fix_srcs'].boolean()
        # self[ 'dmass' ].real()
        # self[ 'sigmav' ].real()

        #   Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        #   Write into logger
        self._log_parameters(gammalib.TERSE)

        #   Set number of processes for multiprocessing
        self._nthreads = mputils.nthreads(self)

        self._log_header1(gammalib.TERSE, 'DM analysis')
        self._log_value(gammalib.TERSE, 'Unbinned observations', n_unbinned)
        self._log_value(gammalib.TERSE, 'Binned observations', n_binned)
        self._log_value(gammalib.TERSE, 'OnOff Observations', n_onoff)
        self._log_value(gammalib.TERSE, 'NonCTA Observations', n_other)

        if n_other == 0:

            if n_unbinned == 0 and n_binned != 0 and n_onoff == 0:
                self._binned_mode = True

            elif n_unbinned == 0 and n_binned == 0 and n_onoff != 0:
                self._onoff_mode = True

            elif n_unbinned == 0 and n_binned != 0 and n_onoff != 0:
                msg = 'Mixing of binned and OnOff Observations'
                raise RuntimeError(msg)

            elif n_unbinned != 0 and (n_binned != 0 or n_onoff != 0):
                msg = 'Mixing of different CTA Observations'
                raise RuntimeError(msg)

        else:

            msg = 'csdmatter only supports CTA-observations'
            raise RuntimeError(msg)

        return
Ejemplo n.º 15
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Set observation if not done before
        if self.obs().is_empty():
            self._require_inobs('csspec::get_parameters()')
            self.obs(self._get_observations())

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Set models if we have none
        if self.obs().models().is_empty():
            self.obs().models(self['inmodel'].filename())

        # Query source name
        self['srcname'].string()

        # Get spectrum generation method
        self._method = self['method'].string()

        # Collect number of unbinned, binned and On/Off observations in
        # observation container
        n_unbinned = 0
        n_binned   = 0
        n_onoff    = 0
        for obs in self.obs():
            if obs.classname() == 'GCTAObservation':
                if obs.eventtype() == 'CountsCube':
                    n_binned += 1
                else:
                    n_unbinned += 1
            elif obs.classname() == 'GCTAOnOffObservation':
                n_onoff += 1
        n_cta   = n_unbinned + n_binned + n_onoff
        n_other = self.obs().size() - n_cta

        # If spectrum method is not "NODES" then set spectrum method and
        # script mode according to type of observations
        if self._method != 'NODES':
            if n_other > 0:
                self._method = 'NODES'
            else:
                if n_unbinned == 0 and n_binned != 0 and n_onoff == 0:
                    self._binned_mode = True
                    self._method      = 'SLICE'
                elif n_unbinned == 0 and n_binned == 0 and n_onoff != 0:
                    self._onoff_mode = True
                    self._method      = 'SLICE'
                elif n_unbinned == 0 and n_binned != 0 and n_onoff != 0:
                    msg = 'Mix of binned and On/Off CTA observations found ' \
                          'in observation container. csscript does not support ' \
                          'this mix.'
                    raise RuntimeError(msg)
                elif n_unbinned != 0 and (n_binned != 0 or n_onoff != 0):
                    msg = 'Mix of unbinned and binned or On/Off CTA observations ' \
                          'found in observation container. csscript does not ' \
                          'support this mix.'
                    raise RuntimeError(msg)
                elif n_unbinned != 0:
                    self._method = 'SLICE'

        # Set ebounds
        self._set_ebounds()

        # Query other parameeters
        self['edisp'].boolean()
        self['calc_ulim'].boolean()
        self['calc_ts'].boolean()
        self['fix_bkg'].boolean()
        self['fix_srcs'].boolean()

        # Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Write spectrum method header and parameters
        self._log_header1(gammalib.TERSE, 'Spectrum method')
        self._log_value(gammalib.TERSE, 'Unbinned CTA observations', n_unbinned)
        self._log_value(gammalib.TERSE, 'Binned CTA observations', n_binned)
        self._log_value(gammalib.TERSE, 'On/off CTA observations', n_onoff)
        self._log_value(gammalib.TERSE, 'Other observations', n_other)

        # If there are a mix of CTA and non-CTA observations and the method
        # is 'SLICE' then log a warning that non-CTA observations will be
        # ignored
        warning = False
        if n_cta > 0 and n_other > 0 and self._method == 'SLICE':
            warning = True

        # If there are only non-CTA observations and the method is 'SLICE'
        # then stop now
        elif n_other > 0:
            if self._method == 'SLICE':
                msg = 'Selected "SLICE" method but none of the observations ' \
                      'is a CTA observation. Please select "AUTO" or "NODES" ' \
                      'if no CTA observation is provided.'
                raise RuntimeError(msg)
            else:
                self._method = 'NODES'

        # Log selected spectrum method
        self._log_value(gammalib.TERSE, 'Selected spectrum method', self._method)

        # Signal warning
        if warning:
            self._log_string(gammalib.TERSE, ' WARNING: Only CTA observation '
                             'can be handled with the "SLICE" method, all '
                             'non-CTA observation will be ignored.')

        # Return
        return
Ejemplo n.º 16
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation.
        """
        # Query datapath. If the parameter is not NONE then use it, otherwise
        # use the datapath from the VHEFITS environment variable
        datapath = self['datapath'].string()
        if gammalib.toupper(datapath) != 'NONE':
            self._datapath = datapath
        else:
            self._datapath = os.getenv('VHEFITS', '')

        # Expand environment
        self._datapath = gammalib.expand_env(self._datapath)

        # Get production name
        self._prodname = self['prodname'].string()

        # Master index file name
        master_indx = self['master_indx'].string()

        # Initialise flag if spatial selection is required
        self._select_radec = True

        # Initialise invalid radius
        self._radius = 0.0

        # Check for validity of spatial parameters
        if (self['ra'].is_valid() and self['dec'].is_valid()
                and self['rad'].is_valid()):

            # Read spatial parameters
            self._ra = self['ra'].real()
            self._dec = self['dec'].real()
            self._radius = self['rad'].real()

        # ... otherwise signal that there are no spatial parameters for
        # selection
        else:
            self._select_radec = False

        # Check Radius for validity
        if self._radius <= 0.0:
            self._select_radec = False

        # Query other parameters
        self['min_qual'].integer()
        self['expression'].string()

        # Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        # Set filename of JSON master file and raise an exception if the file
        # does not exist
        master_file = os.path.join(self._datapath, master_indx)
        if not os.path.isfile(master_file):
            msg = ('FITS data store not available. No master index file found '
                   'at "%s". Make sure the file is copied from the server and '
                   'your datapath is set correctly.' % master_file)
            raise RuntimeError(msg)

        # Open and load JSON master file. If the "dataset" key is not available
        # then raise an exception
        json_data = open(master_file).read()
        data = json.loads(json_data)
        if not 'datasets' in data:
            msg = ('Key "datasets" not available in master index file.')
            raise RuntimeError(msg)

        # Get configurations from JSON master file
        configs = data['datasets']

        # Initialise obs index file
        self._obs_index = ''

        # Get name of observation index file
        for config in configs:
            if self._prodname == config['name']:
                self._obs_index = str(
                    os.path.join(self._datapath, config['obsindx']))
                break

        # If the observation index file name is empty then raise an exception
        if self._obs_index == '':
            msg = ('FITS data store "%s" not available. Run csiactdata to get '
                   'a list of available storage names.' % self._prodname)
            raise RuntimeError(msg)

        # If the observation index file is not a FITS file then raise an
        # exception
        filename = gammalib.GFilename(self._obs_index + '[OBS_INDEX]')
        if not filename.is_fits():
            msg = (
                'Observation index file "%s[OBS_INDEX]" for FITS data store '
                '"%s" not available. Check your master index file or run '
                'csiactdata to get a list of available storage names.' %
                (self._obs_index, self._prodname))
            raise RuntimeError(msg)

        #  Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Return
        return
Ejemplo n.º 17
0
def plot_irf(irf, emin, emax, tmin, tmax, plotfile):
    """
    Plot Instrument Response Function

    Parameters
    ----------
    irf : `~gammalib.GCTAResponseIrf`
        Instrument Response Function
    emin : float
        Minimum energy (TeV)
    emax : float
        Maximum energy (TeV)
    tmin : float
        Minimum offset angle (deg)
    tmax : float
        Maximum offset angle (deg)
    plotfile : str
        Plot filename
    """
    # Build selection string
    selection  = ''
    eselection = ''
    tselection = ''
    if emin != None and emax != None:
        eselection += '%.3f-%.1f TeV' % (emin, emax)
    elif emin != None:
        eselection += ' >%.3f TeV' % (emin)
    elif emax != None:
        eselection += ' <%.1f TeV' % (emax)
    if tmin != None and tmax != None:
        tselection += '%.1f-%.1f deg' % (tmin, tmax)
    elif tmin != None:
        tselection += ' >%.1f deg' % (tmin)
    elif tmax != None:
        tselection += ' <%.1f deg' % (tmax)
    if len(eselection) > 0 and len(tselection) > 0:
        selection = ' (%s, %s)' % (eselection, tselection)
    elif len(eselection) > 0:
        selection = ' (%s)' % (eselection)
    elif len(tselection) > 0:
        selection = ' (%s)' % (tselection)

    # Build title
    mission    = irf.caldb().mission()
    instrument = irf.caldb().instrument()
    response   = irf.rspname()
    title      = '%s "%s" Instrument Response Function "%s"%s' % \
                 (gammalib.toupper(mission), instrument, response, selection)

    # Create figure
    fig = plt.figure(figsize=(16,8))

    # Add title
    fig.suptitle(title, fontsize=16)

    # Plot Aeff
    ax1 = fig.add_subplot(231)
    plot_aeff(ax1, irf.aeff(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Plot Psf
    ax2 = fig.add_subplot(232)
    plot_psf(ax2, irf.psf(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Plot Background
    ax3 = fig.add_subplot(233)
    plot_bkg(ax3, irf.background(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Plot Edisp
    fig.add_subplot(234)
    plot_edisp(irf.edisp(), emin=emin, emax=emax, tmin=tmin, tmax=tmax)

    # Show plots or save it into file
    if len(plotfile) > 0:
        plt.savefig(plotfile)
    else:
        plt.show()

    # Return
    return
Ejemplo n.º 18
0
    def _get_parameters(self):
        """
        Get parameters from parfile and setup the observation
        """
        # Setup observations (require response and allow event list as well as
        # counts cube)
        self._setup_observations(self.obs(), True, True, True)

        # Set observation statistic
        self._set_obs_statistic(gammalib.toupper(self['statistic'].string()))

        # Collect number of unbinned, binned and On/Off observations in
        # observation container
        n_unbinned = 0
        n_binned = 0
        n_onoff = 0
        for obs in self.obs():
            if obs.classname() == 'GCTAObservation':
                if obs.eventtype() == 'CountsCube':
                    n_binned += 1
                else:
                    n_unbinned += 1
            elif obs.classname() == 'GCTAOnOffObservation':
                n_onoff += 1
        n_cta = n_unbinned + n_binned + n_onoff
        n_other = self.obs().size() - n_cta

        # Query whether to compute model for individual components
        components = self['components'].boolean()

        # If there is only one binned observation and no model for individual
        # components is required, query for precomputed model file and set
        # use_maps to True
        if self.obs().size() == 1 and n_binned == 1 and not components:
            self._use_maps = self['modcube'].is_valid()

        # If there are unbinned observations query the energy binning parameters
        if n_unbinned != 0:
            self['ebinalg'].string()
            if self['ebinalg'].string() == 'FILE':
                self['ebinfile'].filename()
            else:
                self['emin'].real()
                self['emax'].real()
                self['enumbins'].integer()
            if n_cta > n_unbinned:
                n_notunbin = n_cta - n_unbinned

        # If there is more than one observation, and observations are all
        # unbinned or all onoff query user to know if they wish stacked results
        if self.obs().size() > 1 and \
                (n_unbinned == self.obs().size() or n_onoff == self.obs().size()):
            self._stack = self['stack'].boolean()
            # If we are to stack event lists query parameters for cube creation
            if self._stack and n_unbinned == self.obs().size():
                self['coordsys'].string()
                self['proj'].string()
                self['xref'].real()
                self['yref'].real()
                self['nxpix'].integer()
                self['nypix'].integer()
                self['binsz'].real()

        # If we are not using a precomputed model and no models are available
        # in the observation container query input XML model file
        if not self._use_maps and self.obs().models().size() == 0:
            self.obs().models(self['inmodel'].filename())

        # Unless all observations are On/Off query for mask definition
        if n_onoff == n_cta:
            pass
        else:
            self._mask = self['mask'].boolean()
            if self._mask:
                self['ra'].real()
                self['dec'].real()
                self['rad'].real()
                self['regfile'].query()

        # Unless all observations are On/Off, or we are using precomputed model
        # maps query whether to use energy dispersion
        if n_onoff == n_cta or self._use_maps:
            pass
        else:
            self['edisp'].boolean()

        # Query algorithm for residual computation
        self['algorithm'].string()

        # Read ahead output parameters
        if self._read_ahead():
            self['outfile'].filename()

        # Write input parameters into logger
        self._log_parameters(gammalib.TERSE)

        # Write header for observation census
        self._log_header1(gammalib.TERSE, 'Observation census')

        # Log census of input observations
        self._log_value(gammalib.NORMAL, 'Unbinned CTA observations',
                        n_unbinned)
        self._log_value(gammalib.NORMAL, 'Binned CTA observations', n_binned)
        self._log_value(gammalib.NORMAL, 'On/off CTA observations', n_onoff)
        self._log_value(gammalib.NORMAL, 'Other observations', n_other)
        if n_other > 0:
            msg = 'WARNING: Only CTA observation can be handled, all non-CTA ' \
                  + 'observations will be ignored.'
            self._log_string(gammalib.TERSE, msg)

        # Log for unbinned observations
        if n_unbinned != 0:
            msg = ' User defined energy binning will be used for %d unbinned ' \
                  'observations.' % (n_unbinned)
            self._log_string(gammalib.TERSE, msg)
            if n_cta > n_unbinned:
                msg = ' The intrinsic binning will be used for the remaining ' \
                      '%d CTA observations.' % (n_notunbin)
                self._log_string(gammalib.TERSE, msg)

        # Signal how energy dispersion is applied
        if n_onoff == n_cta or self._use_maps:
            msg = ' Energy dispersion is applied based on the input data/model ' \
                  + 'and not according to the edisp parameter'
            self._log_string(gammalib.TERSE, msg)

        # Return
        return
Ejemplo n.º 19
0
    def _trial(self, seed):
        """
        Compute the pull for a single trial

        Parameters
        ----------
        seed : int
            Random number generator seed

        Returns
        -------
        result : dict
            Dictionary of results
        """
        # Write header
        self._log_header2(gammalib.NORMAL, 'Trial %d' %
                          (seed-self['seed'].integer()+1))

        # Get number of energy bins and On source name and initialise
        # some parameters
        nbins     = self['enumbins'].integer()
        onsrc     = self['onsrc'].string()
        edisp     = self['edisp'].boolean()
        statistic = self['statistic'].string()
        emin      = None
        emax      = None
        binsz     = 0.0
        npix      = 0
        proj      = 'TAN'
        coordsys  = 'CEL'

        # If we have a On source name then set On region radius
        if gammalib.toupper(onsrc) != 'NONE':
            onrad = self['onrad'].real()
            emin  = self['emin'].real()
            emax  = self['emax'].real()
            edisp = True   # Use always energy dispersion for On/Off
        else:

            # Reset On region source name and radius
            onrad = 0.0
            onsrc = None

            # If we have a binned obeservation then specify the lower and
            # upper energy limit in TeV
            if nbins > 0:
                emin     = self['emin'].real()
                emax     = self['emax'].real()
                binsz    = self['binsz'].real()
                npix     = self['npix'].integer()
                proj     = self['proj'].string()
                coordsys = self['coordsys'].string()

        # Simulate events
        obs = obsutils.sim(self.obs(),
                           emin=emin, emax=emax, nbins=nbins,
                           onsrc=onsrc, onrad=onrad,
                           addbounds=True, seed=seed,
                           binsz=binsz, npix=npix, proj=proj, coord=coordsys,
                           edisp=edisp, log=False, debug=self._logDebug(),
                           chatter=self['chatter'].integer())

        # Determine number of events in simulation
        nevents = 0.0
        for run in obs:
            nevents += run.nobserved()

        # Write simulation results
        self._log_header3(gammalib.NORMAL, 'Simulation')
        for run in self.obs():
            self._log_value(gammalib.NORMAL, 'Input observation %s' % run.id(),
                            self._obs_string(run))
        for run in obs:
            self._log_value(gammalib.NORMAL, 'Output observation %s' % run.id(),
                            self._obs_string(run))
        self._log_value(gammalib.NORMAL, 'Number of simulated events', nevents)

        # Fit model
        if self['profile'].boolean():
            models = self.obs().models()
            for model in models:
                like = ctools.cterror(obs)
                like['srcname']   = model.name()
                like['edisp']     = edisp
                like['statistic'] = statistic
                like['debug']     = self._logDebug()
                like['chatter']   = self['chatter'].integer()
                like.run()
        else:
            like = ctools.ctlike(obs)
            like['edisp']     = edisp
            like['statistic'] = statistic
            like['debug']     = self._logDebug()
            like['chatter']   = self['chatter'].integer()
            like.run()

        # Store results
        logL   = like.opt().value()
        npred  = like.obs().npred()
        models = like.obs().models()

        # Write result header
        self._log_header3(gammalib.NORMAL, 'Pulls')

        # Gather results in form of a list of result columns and a
        # dictionary containing the results. The result contains the
        # log-likelihood, the number of simulated events, the number of
        # predicted events and for each fitted parameter the fitted value,
        # the pull and the fit error.
        #
        # Note that we do not use the model and parameter iterators
        # because we need the indices to get the true (or real) parameter
        # values from the input models.
        colnames = ['LogL', 'Sim_Events', 'Npred_Events']
        values   = {'LogL': logL, 'Sim_Events': nevents, 'Npred_Events': npred}
        for i in range(models.size()):
            model = models[i]
            for k in range(model.size()):
                par = model[k]
                if par.is_free():

                    # Set name as a combination of model name and parameter
                    # name separated by an underscore. In that way each
                    # parameter has a unique name.
                    name = model.name()+'_'+par.name()

                    # Append parameter, Pull_parameter and e_parameter column
                    # names
                    colnames.append(name)
                    colnames.append('Pull_'+name)
                    colnames.append('e_'+name)

                    # Compute pull for this parameter as the difference
                    #               (fitted - true) / error
                    # In case that the error is 0 the pull is set to 99
                    fitted_value = par.value()
                    real_value   = self.obs().models()[i][k].value()
                    error        = par.error()
                    if error != 0.0:
                        pull = (fitted_value - real_value) / error
                    else:
                        pull = 99.0

                    # Store results in dictionary
                    values[name]         = fitted_value
                    values['Pull_'+name] = pull
                    values['e_'+name]    = error

                    # Write results into logger
                    value = '%.4f (%e +/- %e)' % (pull, fitted_value, error)
                    self._log_value(gammalib.NORMAL, name, value)

        # Bundle together results in a dictionary
        result = {'colnames': colnames, 'values': values}

        # Return
        return result