示例#1
0
    def make_dirs(self):

        # Create gammalib Caldb        
        caldb = gammalib.GCaldb()
        
        # Set calibration directory 
        self.cal_dir  = "data"
        self.cal_dir += "/"+self.m_mission.lower()
        self.cal_dir += "/"+self.m_instrument.lower()
        self.cal_dir += "/bcf/"+self.m_rspname
        
        # Set absolute path
        self.base_dir = caldb.rootdir() +"/data"
        self.base_dir += "/"+self.m_mission.lower()
        self.base_dir += "/"+self.m_instrument.lower()
        
        # Set directory for irf file
        self.rsp_dir = caldb.rootdir() + "/" + self.cal_dir
        
        # Create directories and log information
        if not os.path.isdir(self.rsp_dir):
            if self.logExplicit():
                self.log(gammalib.parformat("Directory"))
                self.log(self.rsp_dir)
                self.log("\n")
            os.makedirs(self.rsp_dir)
        else:
            if self.logExplicit():
                self.log(gammalib.parformat("Directory (existing)"))
                self.log(self.rsp_dir)
                self.log("\n")

        # Return
        return
示例#2
0
    def save(self):
        """
        Save calibration database FITS file.
        """
        # Write header into logger
        if self._logTerse():
            self._log("\n")
            self._log.header1("Save calibration database")

        # Set response filename
        filename = self._rsp_dir + "/" + self._outfile
        
        # Write filenames into logger
        if self._logNormal():
            self._log(gammalib.parformat("CALDB index file"))
            self._log(self._caldb_inx.filename().url())
            self._log("\n")
            self._log(gammalib.parformat("Response file"))
            self._log(filename)
            self._log("\n")

        # Save caldb index file
        self._caldb_inx.save(self._clobber())

        # Save response file
        self._irf_fits.saveto(filename, self._clobber())
        
        # Return
        return
示例#3
0
    def save(self):
        """ 
        Save TS map and remove slices if requested.
        """
        # Write header
        if self._logTerse():
            self._log("\n")
            self._log.header1("Save TS map")

        # Get output filename in case it was not read ahead
        outmap = self["outmap"].filename()

        # Log filename
        if self._logTerse():
            self._log(gammalib.parformat("TS map file"))
            self._log(outmap.url())
            self._log("\n")

        # Create FITS file
        fits = gammalib.GFits()

        # Write TS map into primary
        self._tsmap.write(fits)

        # Loop over maps and write them to fits
        for i in range(len(self._maps)):
            self._maps[i].write(fits)

        # Set map names as extensions
        for i in range(len(self._mapnames)):
            fits[i + 1].extname(self._mapnames[i])

        # Check if map is fully done
        done = True
        for pix in self._statusmap:
            if pix < 0.5:
                done = False
                break

        # Write status map if we are not done yet
        if not done:
            self._statusmap.write(fits)
            fits[fits.size() - 1].extname("STATUS MAP")

        # Save FITS file
        fits.saveto(outmap, self._clobber())

        # Delete TS input maps if requested
        if self._delete:
            for filename in self._merged_files:
                os.remove(filename)
                if self._logTerse():
                    self._log(gammalib.parformat("Deleted input file"))
                    self._log(filename)
                    self._log("\n")

        # Return
        return
示例#4
0
    def run(self):
        """
        Run the script
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write header
        if self._logTerse():
            self._log('\n')
            self._log.header1('Merge models')

        # Initialise model container
        self._models = gammalib.GModels()

        # Loop over model files
        for f in self._files:

            # Construct container from XML file
            models = gammalib.GModels(f)

            # Log number of models to add
            if self._logTerse():
                nmodels = models.size()
                if nmodels == 0:
                    self._log(gammalib.parformat('Add no model from file'))
                elif nmodels == 1:
                    self._log(gammalib.parformat('Add 1 model from file'))
                else:
                    self._log(
                        gammalib.parformat('Add %d models from file' %
                                           nmodels))
                self._log(f)
                self._log('\n')

            # Extend model container by adding all models in the model file
            self._models.extend(models)

        # Log total number of models
        if self._logTerse():
            self._log(gammalib.parformat('Models after merging'))
            self._log(self._models.size())
            self._log('\n')

        # Return
        return
示例#5
0
    def save(self):
        """
        Save spectrum.
        """
        # Write header
        if self._logTerse():
            self._log("\n")
            self._log.header1("Save spectrum")

        # Get outmap parameter
        outfile = self["outfile"].filename()

        # Continue only filename and residual map are valid
        if self._fits != None:

            # Log file name
            if self._logTerse():
                self._log(gammalib.parformat("Spectrum file"))
                self._log(outfile.url())
                self._log("\n")

            # Save spectrum
            self._fits.saveto(outfile, self._clobber)

        # Return
        return
示例#6
0
    def save(self):
        """ 
        Save model definition XML file
        """
        # Write header
        if self._logTerse():
            self._log('\n')
            self._log.header1('Save models')

        # Get output filename in case it was not read ahead
        outmodel = self['outmodel'].filename()

        # If file exists and clobber flag is false then raise an exception
        if outmodel.exists() and not self._clobber:
            msg = 'Cannot save ""+outmodel.url()+"": File already exists. '\
                  'Use parameter clobber=yes to allow overwriting of files.'
            raise RuntimeError(msg)

        else:

            # Log filename
            if self._logTerse():
                self._log(gammalib.parformat('Model definition XML file'))
                self._log(outmodel.url())
                self._log('\n')

            # Save models
            self._models.save(outmodel)

        # Return
        return
示例#7
0
    def save(self):
        """
        Save runlist.
        """
        # Write header
        if self._logTerse():
            self._log('\n')
            self._log.header1('Save runlist')

        # Get filename
        outfile = self['outfile'].filename()

        # Check for clobber
        if outfile.exists() and not self._clobber():
            if self._logTerse():
                self._log('File ' + outfile + ' already exists. ')
                self._log('Set clobber=yes to overwrite file.')
                self._log('\n')
        else:

            # Log file name
            if self._logTerse():
                self._log(gammalib.parformat('Runlist file'))
                self._log(outfile.url())
                self._log('\n')

            # Write runs to file
            f = open(outfile.url(), 'w')
            for run in self._runs:
                f.write(str(run) + ' \n')
            f.close()

        # Return
        return
示例#8
0
    def save(self):
        """
        Save observation definition XML file.
        """
        # Write header and filename into logger
        if self._logTerse():
            self._log('\n')
            self._log.header1('Save observation definition XML file')

        # Get output filename in case it was not read ahead
        outobs = self['outobs'].filename()

        # Check if observation definition XML file is valid
        if outobs.url() != 'NONE':

            # Log filename
            if self._logTerse():
                self._log(gammalib.parformat('Observation XML file'))
                self._log(outobs.url())
                self._log('\n')

            # Save observation definition XML file
            self._obs.save(outobs)
        
        # Return
        return
示例#9
0
    def save(self):
        """
        Save light curve.
        """
        # Write header
        if self._logTerse():
            self._log('\n')
            self._log.header1('Save light curve')

        # Get light curve filename
        outfile = self["outfile"].filename()

        # Continue only filename and residual map are valid
        if self._fits != None:

            # Log file name
            if self._logTerse():
                self._log(gammalib.parformat("Light curve file"))
                self._log(outfile.url())
                self._log("\n")

            # Save spectrum
            self._fits.saveto(outfile, self._clobber())

        # Return
        return
示例#10
0
    def save(self):
        """
        Save residual map.
        """
        # Write header
        if self._logTerse():
            self._log("\n")
            self._log.header1("Save residual map")

        # Get outmap parameter
        outmap = self["outmap"].filename()
        
        # Continue only filename and residual map are valid
        if self._resmap != None:

            # Log file name
            if self._logTerse():
                self._log(gammalib.parformat("Residual map file"))
                self._log(outmap.url())
                self._log("\n")

            # Save residual map
            self._resmap.save(outmap, self._clobber)

        # Return
        return
示例#11
0
    def _make_dirs(self):
        """
        Make CALDB directories.
        """
        # Write header into logger
        if self._logTerse():
            self._log("\n")
            self._log.header2("Creating directory structure")

        # Create calibration database        
        caldb = gammalib.GCaldb(self["rootdir"].string())
        
        # Set calibration directory 
        self._cal_dir  = "data"
        self._cal_dir += "/"+self._mission.lower()
        self._cal_dir += "/"+self._caldb.lower()
        self._cal_dir += "/bcf/"+self["irf"].string()
        
        # Set absolute path
        self._base_dir = caldb.rootdir() +"/data"
        self._base_dir += "/"+self._mission.lower()
        self._base_dir += "/"+self._caldb.lower()
        
        # Set directory for irf file
        self._rsp_dir = caldb.rootdir() + "/" + self._cal_dir

        # Log resulting FITS table
        if self._logNormal():
            self._log(gammalib.parformat("Calibration directory"))
            self._log(self._cal_dir)
            self._log("\n")
            self._log(gammalib.parformat("Base directory"))
            self._log(self._base_dir)
            self._log("\n")
            if not os.path.isdir(self._rsp_dir):
                self._log(gammalib.parformat("IRF directory"))
            else:
                self._log(gammalib.parformat("IRF directory (existing)"))
            self._log(self._rsp_dir)
            self._log("\n")
        
        # Create IRF directory is it does not yet exist
        if not os.path.isdir(self._rsp_dir):
            os.makedirs(self._rsp_dir)

        # Return
        return
示例#12
0
    def _adjust_model_pars(self):
        """
        Adjust model parameters dependent on user parameters.
        """
        # Write header
        if self._logTerse():
            self._log("\n")
            self._log.header1("Adjust model parameters")

        # Adjust model parameters dependent on input user parameters
        for model in self._obs.models():

            # Set TS flag for all models to false. The source of interest
            # will be set to true later
            model.tscalc(False)

            # Log model name
            if self._logNormal():
                self._log.header3(model.name())

            # Deal with the source of interest
            if model.name() == self._srcname:
                if self["calc_ts"].boolean():
                    model.tscalc(True)

            elif (self["fix_bkg"].boolean()
                  and model.classname() != "GModelSky"):
                for par in model:
                    if par.is_free():
                        par.fix()
                        if self._logNormal():
                            self._log(gammalib.parformat(par.name()))
                            self._log("fixed\n")

            elif (self["fix_srcs"].boolean()
                  and model.classname() == "GModelSky"):
                for par in model:
                    if par.is_free():
                        par.fix()
                        if self._logNormal():
                            self._log(gammalib.parformat(par.name()))
                            self._log("fixed\n")

        # Return
        return
示例#13
0
    def save(self):
        """ 
        Save models to ds9 region file if required
        """

        # Get output filename in case it was not read ahead
        self._ds9file = self["ds9file"].filename()

        # Check if DS9 file is valid
        if self._ds9file != "NONE":

            # Write header
            if self._logTerse():
                self._log("\n")
                self._log.header1("Save models in DS9 file")

            # Log filename
            if self._logTerse():
                self._log(gammalib.parformat("DS9 filename"))
                self._log(self._ds9file.url())
                self._log("\n")

            # Open file
            f = open(self._ds9file.url(), "w")

            # Write coordinate system
            f.write("fk5\n")

            # Loop over models
            for model in self._models:

                # Continue only if point source or extended source model
                if (model.type() == "PointSource"
                        or model.type() == "ExtendedSource"):
                    line = self._model2ds9string(model)
                    if len(line):
                        f.write(line + "\n")

                # Logging for diffuse components
                elif model.type() == "DiffuseSource":
                    self._log("Skipping diffuse model \"" + model.name() +
                              "\"\n")

                # Logging for background components
                else:
                    if self._logExplicit():
                        self._log("Skipping background model \"" +
                                  model.name() + "\"\n")

            # Close file
            f.close()

        # Return
        return
示例#14
0
    def save(self):
        """ 
        Save pointings into DS9 region file

        This method saves all pointing directions that are found in the
        observation container into a DS9 region file. If "NONE" is
        specified for the "ds9file" parameter the method does nothing.
        """

        # Get output filename in case it was not read ahead
        ds9file = self['ds9file'].filename()

        # Check if DS9 file is valid
        if ds9file.url() != 'NONE':

            # Write header
            if self._logTerse():
                self._log('\n')
                self._log.header1('Save pointings in DS9 file')

            # Log filename
            if self._logTerse():
                self._log(gammalib.parformat('DS9 filename'))
                self._log(ds9file.url())
                self._log('\n')

            # Open file
            f = open(ds9file.url(), 'w')

            # Write coordinate system
            f.write('fk5\n')

            # Loop over pointings
            for i in range(len(self._pnt_ra)):

                # Create string
                line = 'point('
                line += str(self._pnt_ra[i]) + ',' + str(
                    self._pnt_dec[i]) + ')'
                line += ' # point=cross 20 width=3\n'

                # Write to file
                f.write(line)

            # Close file
            f.close()

        # Return
        return
示例#15
0
    def save(self):
        """
        Save model cube.
        """
        # Write header
        if self._logTerse():
            self._log("\n")
            self._log.header1("Save model cube")

        # Get outfile parameter
        outfile = self["outfile"].filename()

        # Log file name
        if self._logTerse():
            self._log(gammalib.parformat("Model cube"))
            self._log(outfile.url())
            self._log("\n")

        # Save event cube
        self._cube.save(outfile, self._clobber())

        # Return
        return
示例#16
0
    def run(self):
        """
        Run the script.
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write header into logger
        if self._logTerse():
            self._log("\n")
            self._log.header1("Merge TS maps")

        # Initialise file to start with
        file0 = ""
        self._merged_files = []

        # Test files for the entry status map. Use the first one to appear
        # useful
        for fitsfile in self._files:

            # Skip file if it's not a FITS file
            if not gammalib.GFilename(fitsfile).is_fits():
                if self._logExplicit():
                    self._log(gammalib.parformat("Skip file"))
                    self._log(fitsfile)
                    self._log(" (not a FITS file)\n")
                continue

            # Open FITS file
            fits = gammalib.GFits(fitsfile)

            # If file contains a status map then use it
            if fits.contains("STATUS MAP"):
                fits.close()
                file0 = fitsfile
                if self._logTerse():
                    self._log(gammalib.parformat("Initial TS map file"))
                    self._log(fitsfile)
                    count = self._get_number_of_ts_pixels(fitsfile)
                    self._log(" (%d TS pixels computed)" % count)
                    self._log("\n")
                break

            # ... otherwise signal that file is useless
            else:
                fits.close()
                if self._logExplicit():
                    self._log(gammalib.parformat("Skip file"))
                    self._log(fitsfile)
                    self._log(" (no \"STATUS MAP\" extension)\n")
                continue

        # Signal if no suitable file was found
        if file0 == "":
            if self._logTerse():
                self._log("None of the provided files seems to be a sliced "
                          "TS map file (none has a \"STATUS MAP\" "
                          "extension).\n")

        # ... otherwise merge files
        else:

            # Copy file list
            workfiles = self._files

            # Remove entry which will be used to initalise the map
            workfiles.remove(file0)

            # Initialise map from first file
            self._init_ts_map(file0)

            # Append to added files
            self._merged_files.append(file0)

            # Loop over files
            for fitsfile in workfiles:

                # Skip if file is not FITS
                if not gammalib.GFilename(fitsfile).is_fits():
                    if self._logExplicit():
                        self._log(gammalib.parformat("Skip file"))
                        self._log(fitsfile)
                        self._log(" (not a FITS file)\n")
                    continue

                # Open FITS file
                fits = gammalib.GFits(fitsfile)

                # Skip if file does not contain status map
                if not fits.contains("STATUS MAP"):
                    fits.close()
                    if self._logExplicit():
                        self._log(gammalib.parformat("Skip file"))
                        self._log(fitsfile)
                        self._log(" (no \"STATUS MAP\" extension)\n")
                    continue

                # Close FITS file
                fits.close()

                # Logging
                if self._logTerse():
                    self._log(gammalib.parformat("Merge TS map file"))
                    self._log(fitsfile)
                    count = self._get_number_of_ts_pixels(fitsfile)
                    self._log(" (%d TS pixels computed)" % count)
                    self._log("\n")

                # Merge TS map
                self._merge_ts_map(fitsfile)

                # Append FITS file to merged files
                self._merged_files.append(fitsfile)

        # Return
        return
示例#17
0
    def _make_irf_file(self):
        """
        Creates an IRF FITS file.
        """
        # Write header into logger
        if self._logTerse():
            self._log("\n")
            self._log.header2("Creating IRF file")

        # Get response for the observation
        rsp = self._observation.response()
        
        # Extract response file names
        fname_aeff  = rsp.aeff().filename()
        fname_psf   = rsp.psf().filename()
        fname_edisp = rsp.edisp().filename()
        fname_bkg   = rsp.background().filename()

        # Log filenames
        if self._logNormal():
            self._log.header3("IRF input files")
            self._log(gammalib.parformat("Effective area"))
            self._log(fname_aeff.url())
            self._log("\n")
            self._log(gammalib.parformat("Point spread function"))
            self._log(fname_psf.url())
            self._log("\n")
            self._log(gammalib.parformat("Energy dispersion"))
            self._log(fname_edisp.url())
            self._log("\n")
            self._log(gammalib.parformat("Background rate"))
            self._log(fname_bkg.url())
            self._log("\n")
    
        # Open FITS files of response components
        fits_aeff  = gammalib.GFits(fname_aeff)
        fits_psf   = gammalib.GFits(fname_psf)
        fits_edisp = gammalib.GFits(fname_edisp)
        fits_bkg   = gammalib.GFits(fname_bkg)

        # Get extension names
        ext_aeff  = fname_aeff.extname("EFFECTIVE AREA")
        ext_psf   = fname_psf.extname("POINT SPREAD FUNCTION")
        ext_edisp = fname_edisp.extname("ENERGY DISPERSION")
        ext_bkg   = fname_bkg.extname("BACKGROUND")
        
        # Create empty FITS file
        fits = gammalib.GFits()
        
        # Append IRF component to FITS file
        fits.append(fits_aeff[ext_aeff])
        fits.append(fits_psf[ext_psf])
        fits.append(fits_edisp[ext_edisp])
        fits.append(fits_bkg[ext_bkg])

        # Log resulting FITS file
        if self._logNormal():
            self._log(str(fits))
            self._log("\n")
        if self._logExplicit():
            self._log(str(fits[ext_aeff].header()))
            self._log("\n")
            self._log(str(fits[ext_psf].header()))
            self._log("\n")
            self._log(str(fits[ext_edisp].header()))
            self._log("\n")
            self._log(str(fits[ext_bkg].header()))
            self._log("\n")
        
        # Return fits file
        return fits
示例#18
0
def handle_matrix():
    """
    Illustrates the handling of matrix. Although we use in this example the
    GMatrix class, the same operations can be performed on a sparse matrix
    GMatrixSparse or a symmetric matrix GMatrixSymmetric.
    """
    # Create matrix with 4 rows and 5 columns
    matrix = gammalib.GMatrix(4, 5)

    # Get matrix attributes
    number_of_rows = matrix.rows()  # Will be 4
    number_of_columns = matrix.columns()  # Will be 5
    number_of_elements = matrix.size()  # Will be 20=4*5
    matrix_fill = matrix.fill()  # Will be 0
    matrix_minimum = matrix.min()  # Will be 0
    matrix_maximum = matrix.max()  # Will be 0
    matrix_sum = matrix.sum()  # Will be 0

    # Dump some information
    print("Attributes of an empty GMatrix:")
    print(gammalib.parformat("Matrix rows") + str(number_of_rows))
    print(gammalib.parformat("Matrix columns") + str(number_of_columns))
    print(gammalib.parformat("Matrix elements") + str(number_of_elements))
    print(gammalib.parformat("Matrix fill") + str(matrix_fill))
    print(gammalib.parformat("Smallest matrix element") + str(matrix_minimum))
    print(gammalib.parformat("Largest matrix element") + str(matrix_maximum))
    print(gammalib.parformat("Sum of all element") + str(matrix_sum))

    # Set all matrix elements to 2.0
    matrix.set(2.0)

    # Now set all matrix elements to specific values
    for row in range(matrix.rows()):
        for column in range(matrix.columns()):
            matrix[row, column] = row + column * matrix.rows() + 1.5

    # Get matrix attributes
    matrix_fill = matrix.fill()  # Will be 1
    matrix_minimum = matrix.min()  # Will be 1.5
    matrix_maximum = matrix.max()  # Will be 20.5
    matrix_sum = matrix.sum()  # Will be 220.0

    # Dump some information
    print("Attributes of a filled GMatrix:")
    print(gammalib.parformat("Matrix fill") + str(matrix_fill))
    print(gammalib.parformat("Smallest matrix element") + str(matrix_minimum))
    print(gammalib.parformat("Largest matrix element") + str(matrix_maximum))
    print(gammalib.parformat("Sum of all element") + str(matrix_sum))

    # Perform some matrix operations
    other_matrix = matrix.copy()  # Use copy() to create a deep copy
    sum_matrix = matrix + other_matrix  # Add two matrices
    zero_matrix = matrix - other_matrix  # Subtract two matrices
    other_matrix += matrix  # Add-on matrix
    other_matrix -= matrix  # Subtract off matrix
    other_matrix *= 2.0  # Multiply elements by 2
    other_matrix /= 2.0  # Divide elements by 2
    neg_matrix = -matrix  # Negate matrix
    abs_matrix = neg_matrix.abs()  # Absolute value of elements

    # And now some more tricky operations
    transposed = matrix.transpose()  # First transpose matrix
    mult_matrix = matrix * transposed  # Now we can multiply it
    other_matrix *= transposed  # Multiply on matrix
    #inverted      = matrix.invert()           # NOT YET IMPLEMENTED
    vector = gammalib.GVector(5)  # Allocate vector
    multiplied = matrix * vector  # Vector multiplication

    # Compare matrices
    if matrix == abs_matrix:
        print("Yes, got it!")
    if matrix != transposed:
        print("Got it again!")

    # Copy columns one-by-one from one matrix into another
    destination = gammalib.GMatrix(4, 5)
    for column in range(matrix.columns()):
        vector = matrix.column(column)  # Extract column
        destination.column(column, vector)  # Set column

    # Add columns one-by-one from one matrix into another
    destination = matrix.copy()
    for column in range(matrix.columns()):
        vector = matrix.column(column)  # Extract column
        destination.add_to_column(column, vector)  # Add column

    # Copy rows one-by-one from one matrix into another
    destination = gammalib.GMatrix(4, 5)
    for row in range(matrix.rows()):
        vector = matrix.row(row)  # Extract row
        destination.row(row, vector)  # Set row

    # Add rows one-by-one from one matrix into another
    destination = matrix.copy()
    for row in range(matrix.rows()):
        vector = matrix.row(row)  # Extract row
        destination.add_to_row(row, vector)  # Add row

    # Return
    return
示例#19
0
    def run(self):
        """
        Run the script.
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write observation into logger
        if self._logTerse():
            self._log("\n")
            self._log.header1("Observation")
            self._log(str(self._obs))
            self._log("\n")

        # Adjust model parameters dependent on user parameters
        self._adjust_model_pars()

        # Write header
        if self._logTerse():
            self._log("\n")
            self._log.header1("Generate lightcurve")

        # Initialise list of result dictionaries
        results = []

        # Get source parameters
        pars = self._get_free_par_names()

        # Loop over time bins
        for i in range(self._tbins.size()):

            # Get time boundaries
            tmin = self._tbins.tstart(i)
            tmax = self._tbins.tstop(i)

            # Write time bin into header
            if self._logTerse():
                self._log.header2("MJD " + str(tmin.mjd()) + "-" +
                                  str(tmax.mjd()))

            # Compute time bin center and time width
            twidth = 0.5 * (tmax - tmin)  # in seconds
            tmean = tmin + twidth

            # Initialise result dictionary
            result = {
                'mjd': tmean.mjd(),
                'e_mjd': twidth / gammalib.sec_in_day,
                'ts': 0.0,
                'ulimit': 0.0,
                'pars': pars,
                'values': {}
            }

            # Log information
            if self._logExplicit():
                self._log.header3("Selecting events")

            # Select events
            select = ctools.ctselect(self._obs)
            select["emin"] = self["emin"].real()
            select["emax"] = self["emax"].real()
            select["tmin"] = tmin.convert(self._time_reference())
            select["tmax"] = tmax.convert(self._time_reference())
            select["rad"] = "UNDEFINED"
            select["ra"] = "UNDEFINED"
            select["dec"] = "UNDEFINED"
            select.run()

            # Retrieve observation
            obs = select.obs()

            # If a stacked analysis is requested then bin the events
            # and compute the stacked response functions and setup
            # an observation container with a single stacked observation.
            if self._stacked:
                obs = self._bin_observation(obs)

            # Header
            if self._logExplicit():
                self._log.header3("Fitting the data")

            # Do maximum likelihood model fitting
            like = ctools.ctlike(obs)
            like["edisp"] = self["edisp"].boolean()
            like.run()

            # Skip bin if no event was present
            if like.obs().logL() == 0.0:

                # Signal skipping of bin
                if self._logTerse():
                    self._log(gammalib.parformat("Warning"))
                    self._log("No event in this time bin, skip bin.\n")

                # Set all results to 0
                for par in pars:
                    result['values'][par] = 0.0
                    result['values']["e_" + par] = 0.0

                # Append result
                results.append(result)

                # Continue with next time bin
                continue

            # Retrieve model fitting results for source of interest
            source = like.obs().models()[self._srcname]

            # Extract parameter values
            for par in pars:
                result['values'][par] = source.spectral()[par].value()
                result['values']["e_" + par] = source.spectral()[par].error()

            # Calculate upper limit (-1 if not computed)
            ulimit_value = self._compute_ulimit(like.obs())
            if ulimit_value > 0.0:
                result['ulimit'] = ulimit_value

            # Extract Test Statistic value
            if self["calc_ts"].boolean():
                result['ts'] = source.ts()

            # Append result to list of dictionaries
            results.append(result)

            # Log results for this time bin
            if self._logNormal():
                self._log.header3("Results")
                pars = self._get_free_par_names()
                for par in pars:
                    value = source.spectral()[par].value()
                    error = source.spectral()[par].error()
                    unit = source.spectral()[par].unit()
                    self._log(gammalib.parformat(par))
                    self._log(str(value))
                    self._log(" +/- ")
                    self._log(str(error))
                    self._log(" ")
                    self._log(unit)
                    self._log("\n")
                if result['ulimit'] > 0.0:
                    self._log(gammalib.parformat("Upper flux limit"))
                    self._log(str(result['ulimit']) + " ph/cm2/s\n")
                if self["calc_ts"].boolean():
                    self._log(gammalib.parformat("Test Statistic"))
                    self._log(str(result['ts']) + "\n")

        # Create FITS table from results
        table = self._create_fits_table(results)

        # Create FITS file and append FITS table to FITS file
        self._fits = gammalib.GFits()
        self._fits.append(table)

        # Return
        return
示例#20
0
    def run(self):
        """
        Run the script
        """

        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write information into logger
        if self._logTerse():
            self._log('\n')
            self._log.header1('Test source')
            model = self._obs.models()[self._srcname]
            self._log(str(model))
            self._log('\n')

        # Set log-likelihood to zero
        logL0 = 0.0

        # Pre-compute null hypothesis if requested
        if self._compute_null:

            # Write information into logger
            if self._logTerse():
                self._log('\n')
                self._log.header1('Compute null hypothesis')

            # Compute null hypothesis
            logL0 = self._compute_null_hypothesis()

            # Write likelihood into logger
            if self._logTerse():
                self._log(gammalib.parformat('Source removed'))
                self._log(self._srcname)
                self._log('\n')
                self._log(gammalib.parformat('Log-likelihood'))
                self._log(repr(logL0))
                self._log('\n')

        # Get parameters of TS map ctool
        pars = gammalib.GApplicationPars('cttsmap.par')

        # Compute total number of jobs
        nbins = self._map.npix()
        njobs = int(math.ceil(float(nbins) / float(self._bins_per_job)) + 0.1)

        # Set parameters to be skipped now, we will deal with them later
        skip_pars = ['binmin', 'binmax', 'logL0', 'outmap', 'logfile']

        # Set tool name for computation
        base_command = 'cttsmap'

        # Write information into logger
        if self._logTerse():
            self._log('\n')
            self._log.header1('Create commands')
            self._log(gammalib.parformat('Number of cttsmap calls'))
            self._log(str(njobs))
            self._log('\n')

        # Loop over TS map parameters
        for par in pars:

            # Skip if we deal with them later
            if par.name() in skip_pars:
                continue

            # Skip if they need to be queried
            # This way we ensure we pass only parameters
            # that were queried before
            if self[par.name()].is_query():
                continue

            # Set TS map parameter according to input from
            # this script
            par.value(self[par.name()].value())

            # Append command to set parameter
            base_command += ' ' + par.name() + '=' + par.value()

        # Append null hypothesis parameter
        base_command += ' logL0=' + repr(logL0)

        # Set binning to start from zero
        binmin = 0
        binmax = 0

        # Clear command sequence
        self._cmd = []

        # Loop over jobs and create commands
        for job in range(njobs):

            # Set specific outmap file name
            outmap = self._outmap.url().replace('.fits',
                                                '_' + str(job) + '.fits')

            # Set bin numbers to be computed in this job
            binmin = binmax
            binmax = binmin + self._bins_per_job
            if binmax > nbins:
                binmax = nbins

            # Setup sliced command
            sliced_command = '%s binmin=%s binmax=%s outmap=%s logfile=%s' % \
                             (base_command, str(binmin), str(binmax),
                              outmap, outmap.replace('.fits','.log'))

            # Append command to list of commands
            self._cmd.append(sliced_command)

        # Write information into logger
        if self._logExplicit():
            self._log('\n')
            self._log.header2('Commands')
            for cmd in self._cmd:
                self._log(cmd)
                self._log('\n')
            self._log('\n')

        # Return
        return
示例#21
0
def handle_matrix():
    """
    Illustrates the handling of matrix. Although we use in this example the
    GMatrix class, the same operations can be performed on a sparse matrix
    GMatrixSparse or a symmetric matrix GMatrixSymmetric.
    """
    # Create matrix with 4 rows and 5 columns
    matrix = gammalib.GMatrix(4,5)

    # Get matrix attributes
    number_of_rows     = matrix.rows()     # Will be 4
    number_of_columns  = matrix.columns()  # Will be 5
    number_of_elements = matrix.size()     # Will be 20=4*5
    matrix_fill        = matrix.fill()     # Will be 0
    matrix_minimum     = matrix.min()      # Will be 0
    matrix_maximum     = matrix.max()      # Will be 0
    matrix_sum         = matrix.sum()      # Will be 0

    # Dump some information
    print("Attributes of an empty GMatrix:")
    print(gammalib.parformat("Matrix rows")+str(number_of_rows))
    print(gammalib.parformat("Matrix columns")+str(number_of_columns))
    print(gammalib.parformat("Matrix elements")+str(number_of_elements))
    print(gammalib.parformat("Matrix fill")+str(matrix_fill))
    print(gammalib.parformat("Smallest matrix element")+str(matrix_minimum))
    print(gammalib.parformat("Largest matrix element")+str(matrix_maximum))
    print(gammalib.parformat("Sum of all element")+str(matrix_sum))

    # Set all matrix elements to 2.0
    matrix.set(2.0)

    # Now set all matrix elements to specific values
    for row in range(matrix.rows()):
        for column in range(matrix.columns()):
            matrix[row,column] = row + column*matrix.rows() + 1.5
    
    # Get matrix attributes
    matrix_fill    = matrix.fill()             # Will be 1
    matrix_minimum = matrix.min()              # Will be 1.5
    matrix_maximum = matrix.max()              # Will be 20.5
    matrix_sum     = matrix.sum()              # Will be 220.0

    # Dump some information
    print("Attributes of a filled GMatrix:")
    print(gammalib.parformat("Matrix fill")+str(matrix_fill))
    print(gammalib.parformat("Smallest matrix element")+str(matrix_minimum))
    print(gammalib.parformat("Largest matrix element")+str(matrix_maximum))
    print(gammalib.parformat("Sum of all element")+str(matrix_sum))

    # Perform some matrix operations 
    other_matrix  = matrix.copy()              # Use copy() to create a deep copy
    sum_matrix    = matrix + other_matrix      # Add two matrices
    zero_matrix   = matrix - other_matrix      # Subtract two matrices
    other_matrix += matrix                     # Add-on matrix
    other_matrix -= matrix                     # Subtract off matrix
    other_matrix *= 2.0                        # Multiply elements by 2
    other_matrix /= 2.0                        # Divide elements by 2
    neg_matrix    = -matrix                    # Negate matrix
    abs_matrix    = neg_matrix.abs()           # Absolute value of elements

    # And now some more tricky operations
    transposed    = matrix.transpose()         # First transpose matrix
    mult_matrix   = matrix * transposed        # Now we can multiply it
    other_matrix *= transposed                 # Multiply on matrix
    #inverted      = matrix.invert()           # NOT YET IMPLEMENTED
    vector        = gammalib.GVector(5)        # Allocate vector
    multiplied    = matrix * vector            # Vector multiplication

    # Compare matrices
    if matrix == abs_matrix:
        print("Yes, got it!")
    if matrix != transposed:
        print("Got it again!")

    # Copy columns one-by-one from one matrix into another
    destination = gammalib.GMatrix(4,5)
    for column in range(matrix.columns()):
        vector = matrix.column(column)            # Extract column
        destination.column(column, vector)        # Set column

    # Add columns one-by-one from one matrix into another
    destination = matrix.copy()
    for column in range(matrix.columns()):
        vector = matrix.column(column)            # Extract column
        destination.add_to_column(column, vector) # Add column

    # Copy rows one-by-one from one matrix into another
    destination = gammalib.GMatrix(4,5)
    for row in range(matrix.rows()):
        vector = matrix.row(row)                  # Extract row
        destination.row(row, vector)              # Set row

    # Add rows one-by-one from one matrix into another
    destination = matrix.copy()
    for row in range(matrix.rows()):
        vector = matrix.row(row)                  # Extract row
        destination.add_to_row(row, vector)       # Add row
    
    # Return
    return
示例#22
0
    def run(self):
        """
        Run the script.

        Raises
        ------
        RuntimeError
            Invalid pointing definition file format.
        """
        # Switch screen logging on in debug mode
        if self._logDebug():
            self._log.cout(True)

        # Get parameters
        self._get_parameters()

        # Write header into logger
        if self._logTerse():
            self._log('\n')
            self._log.header1('Creating observation definition XML file')

        # Load pointing definition file if it is not already set
        if self._pntdef.size() == 0:
            self._pntdef = gammalib.GCsv(self['inpnt'].filename(), ',')
        ncols = self._pntdef.ncols()
        npnt  = self._pntdef.nrows()-1

        # Throw an exception is there is no header information
        if self._pntdef.nrows() < 1:
            raise RuntimeError('No header found in pointing definition file.')

        # Clear observation container
        self._obs.clear()
        identifier = 1

        # Extract header from pointing definition file
        header = []
        for col in range(ncols):
            header.append(self._pntdef[0,col])

        # Loop over all pointings
        for pnt in range(npnt):

            # Set row index
            row = pnt + 1

            # Create CTA observation
            obs = gammalib.GCTAObservation()

            # Set observation name
            if 'name' in header:
                name = self._pntdef[row, header.index('name')]
            else:
                name = 'None'
            obs.name(name)

            # Set identifier
            if 'id' in header:
                id_ = self._pntdef[row, header.index('id')]
            else:
                id_ = '%6.6d' % identifier
                identifier += 1
            obs.id(id_)

            # Set pointing
            if 'ra' in header and 'dec' in header:
                ra     = float(self._pntdef[row, header.index('ra')])
                dec    = float(self._pntdef[row, header.index('dec')])
                pntdir = gammalib.GSkyDir()
                pntdir.radec_deg(ra,dec)
            elif 'lon' in header and 'lat' in header:
                lon    = float(self._pntdef[row, header.index('lon')])
                lat    = float(self._pntdef[row, header.index('lat')])
                pntdir = gammalib.GSkyDir()
                pntdir.lb_deg(lon,lat)
            else:
                raise RuntimeError('No (ra,dec) or (lon,lat) columns '
                                   'found in pointing definition file.')
            obs.pointing(gammalib.GCTAPointing(pntdir))

            # Set response function
            if 'caldb' in header:
                caldb = self._pntdef[row, header.index('caldb')]
            else:
                caldb = self['caldb'].string()
            if 'irf' in header:
                irf = self._pntdef[row, header.index('irf')]
            else:
                irf = self['irf'].string()
            if caldb != '' and irf != '':
                obs = self._set_response(obs, caldb, irf)

            # Set deadtime correction factor
            if 'deadc' in header:
                deadc = float(self._pntdef[row, header.index('deadc')])
            else:
                deadc = self['deadc'].real()
            obs.deadc(deadc)

            # Set Good Time Interval
            if 'duration' in header:
                duration = float(self._pntdef[row, header.index('duration')])
            else:
                duration = self['duration'].real()
            tmin       = self._tmin
            tmax       = self._tmin + duration
            gti        = gammalib.GGti(self._time_reference())
            tstart     = gammalib.GTime(tmin, self._time_reference())
            tstop      = gammalib.GTime(tmax, self._time_reference())
            self._tmin = tmax
            gti.append(tstart, tstop)
            obs.ontime(gti.ontime())
            obs.livetime(gti.ontime()*deadc)

            # Set Energy Boundaries
            has_emin = False
            has_emax = False
            if 'emin' in header:
                emin     = float(self._pntdef[row, header.index('emin')])
                has_emin = True
            else:
                if self['emin'].is_valid():
                    emin     = self['emin'].real()
                    has_emin = True
            if 'emax' in header:
                emax     = float(self._pntdef[row, header.index('emax')])
                has_emax = True
            else:
                if self['emax'].is_valid():
                    emax     = self['emax'].real()
                    has_emax = True
            has_ebounds = has_emin and has_emax
            if has_ebounds:
                ebounds = gammalib.GEbounds(gammalib.GEnergy(emin, 'TeV'),
                                            gammalib.GEnergy(emax, 'TeV'))

            # Set ROI
            has_roi = False
            if 'rad' in header:
                rad     = float(self._pntdef[row, header.index('rad')])
                has_roi = True
            else:
                if self['rad'].is_valid():
                    rad     = self['rad'].real()
                    has_roi = True
            if has_roi:
                roi = gammalib.GCTARoi(gammalib.GCTAInstDir(pntdir), rad)

            # Create an empty event list
            list_ = gammalib.GCTAEventList()
            list_.gti(gti)

            # Set optional information
            if has_ebounds:
                list_.ebounds(ebounds)
            if has_roi:
                list_.roi(roi)

            # Attach event list to CTA observation
            obs.events(list_)

            # Write observation into logger
            if self._logExplicit():
                self._log(str(obs))
                self._log('\n')
            elif self._logTerse():
                self._log(gammalib.parformat(obs.instrument()+' observation'))
                self._log('Name="'+obs.name()+'" ')
                self._log('ID="'+obs.id()+'"\n')

            # Append observation
            self._obs.append(obs)

        # Return
        return