示例#1
0
    def create_fit(self,log=False,debug=False):
        self.validate()
        # create ctlike instance with given parameters
        self.info("Fitting Data using ctlike")
        if self.m_obs:
            self.like = ct.ctlike(self.m_obs)
        else:
            self.like = ct.ctlike()
            if self.m_binned:
                self.like["infile"] = self.m_cntfile
            else:
                self.like["infile"] = self.m_evtfile
                
            self.like["stat"].string(self.m_stat)
            self.like["caldb"].string(self.m_caldb)
            self.like["irf"].string(self.m_irf)
        self.like["srcmdl"] = self.m_xml 
        self.like["edisp"].boolean(self.m_edisp)
        self.like["outmdl"] = self.m_xml_out

        # Optionally open the log file
        if log:
            self.like.logFileOpen()
        # Optionally switch-on debugging model
        if debug:
            self.like["debug"].boolean(True)
示例#2
0
def fit(obs, log=False, debug=False):
    """
    Perform maximum likelihood fitting of observations in the container.
    
    Parameters:
     obs   - Observation container
    Keywords:
     log   - Create log file(s)
     debug - Create screen dump
    """
    # Allocate ctlike application
    like = ctools.ctlike(obs)
    
    # Optionally open the log file
    if log:
        like.logFileOpen()
    
    # Optionally switch-on debugging model
    if debug:
        like["debug"].boolean(True)

    # Run ctlike application.
    like.run()
    
    # Return observations
    return like
示例#3
0
def fit(obs, log=False, debug=False, chatter=2, edisp=False):
    """
    Perform maximum likelihood fitting of observations in the container.

    Parameters:
     obs   - Observation container
    Keywords:
     log     - Create log file(s)
     debug   - Create screen dump
     chatter - Chatter level
     edisp   - Apply energy dispersion?
    """
    # Allocate ctlike application
    like = ctools.ctlike(obs)

    # Optionally open the log file
    if log:
        like.logFileOpen()

    # Optionally switch-on debugging model
    if debug:
        like["debug"] = True

    # Set chatter level
    like["chatter"] = chatter

    # Optionally apply energy dispersion
    like["edisp"] = edisp

    # Run ctlike application.
    like.run()

    # Return observations
    return like
示例#4
0
    def test_functional(self):
        """
        Test ctlike functionnality.
        """
        # Set-up ctlike
        like = ctools.ctlike()
        like["inobs"]    = self.events_name
        like["inmodel"]  = self.model_name
        like["caldb"]    = self.caldb
        like["irf"]      = self.irf
        like["outmodel"] = "result.xml"

        # Run tool
        self.test_try("Run ctlike")
        try:
            like.run()
            self.test_try_success()
        except:
            self.test_try_failure("Exception occured in ctlike.")

        # Save results
        self.test_try("Save results")
        try:
            like.save()
            self.test_try_success()
        except:
            self.test_try_failure("Exception occured in saving results.")

        # Return
        return
示例#5
0
def ctlike_binned(events_name, cntmap_name, emin, emax,
                  enumbins, nxpix, nypix, binsz, ra, dec, IRF, CALDB, outfile):
    """
    Copied and modified from ctools/examples/make_binned_analysis.py
    """
    # Bin the events first
    bin = ctools.ctbin()

    # We need this to explicitely open the log file in Python mode
    bin.logFileOpen()

    bin["evfile"].filename(events_name)
    bin["outfile"].filename(cntmap_name)
    bin["emin"].real(emin)
    bin["emax"].real(emax)
    bin["enumbins"].integer(enumbins)
    bin["nxpix"].integer(nxpix)
    bin["nypix"].integer(nypix)
    bin["binsz"].real(binsz)
    bin["coordsys"].string('GAL')
    bin["xref"].real(ra)
    bin["yref"].real(dec)
    bin["proj"].string('CAR')
    bin.execute()

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like.logFileOpen()
    like["infile"].filename(cntmap_name)
    like["srcmdl"].filename('$CTOOLS/share/models/crab.xml')
    like["outmdl"].filename(outfile)
    like["caldb"].string(CALDB)
    like["irf"].string(IRF)
    like.execute()
示例#6
0
def ctlike_binned(events_name, cntmap_name, emin, emax, enumbins, nxpix, nypix,
                  binsz, ra, dec, IRF, CALDB, outfile):
    """
    Copied and modified from ctools/examples/make_binned_analysis.py
    """
    # Bin the events first
    bin = ctools.ctbin()

    # We need this to explicitely open the log file in Python mode
    bin.logFileOpen()

    bin["evfile"].filename(events_name)
    bin["outfile"].filename(cntmap_name)
    bin["emin"].real(emin)
    bin["emax"].real(emax)
    bin["enumbins"].integer(enumbins)
    bin["nxpix"].integer(nxpix)
    bin["nypix"].integer(nypix)
    bin["binsz"].real(binsz)
    bin["coordsys"].string('GAL')
    bin["xref"].real(ra)
    bin["yref"].real(dec)
    bin["proj"].string('CAR')
    bin.execute()

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like.logFileOpen()
    like["infile"].filename(cntmap_name)
    like["srcmdl"].filename('$CTOOLS/share/models/crab.xml')
    like["outmdl"].filename(outfile)
    like["caldb"].string(CALDB)
    like["irf"].string(IRF)
    like.execute()
示例#7
0
    def _test_python(self):
        """
        Test ctlike from Python
        """
        # Set-up ctlike
        like = ctools.ctlike()
        like['inobs'] = self._events
        like['inmodel'] = self._model
        like['caldb'] = self._caldb
        like['irf'] = self._irf
        like['outmodel'] = 'ctlike_py1.xml'
        like['logfile'] = 'ctlike_py1.log'
        like['chatter'] = 2

        # Run ctlike tool
        like.logFileOpen()  # Make sure we get a log file
        like.run()
        like.save()

        # Check result file
        self._check_result_file('ctlike_py1.xml')

        # Retrieve observation container, save and check model file
        obs = like.obs()
        obs.models().save('ctlike_py2.xml')
        self._check_result_file('ctlike_py2.xml')

        # Retrieve optimizer
        opt = like.opt()

        # Return
        return
示例#8
0
文件: obsutils.py 项目: jdevin/ctools
def fit(obs, log=False, debug=False, chatter=2, edisp=False):
    """
    Perform maximum likelihood fitting of observations in the container.

    Parameters:
     obs   - Observation container
    Keywords:
     log     - Create log file(s)
     debug   - Create screen dump
     chatter - Chatter level
     edisp   - Apply energy dispersion?
    """
    # Allocate ctlike application
    like = ctools.ctlike(obs)

    # Optionally open the log file
    if log:
        like.logFileOpen()

    # Optionally switch-on debugging model
    if debug:
        like["debug"] = True

    # Set chatter level
    like["chatter"] = chatter

    # Optionally apply energy dispersion
    like["edisp"] = edisp

    # Run ctlike application.
    like.run()

    # Return observations
    return like
示例#9
0
 def ctlike_run(self,
                input_obs_list,
                input_models=None,
                output_models='ml_result.xml',
                log_file='ctlike.log',
                force=False,
                save=False):
     like = ctools.ctlike()
     if isinstance(input_obs_list, gammalib.GObservations):
         like.obs(input_obs_list)
         if input_models is not None:
             like.obs().models(input_models)
     elif os.path.isfile(input_obs_list) and os.path.isfile(input_models):
         # observations list from file
         like["inobs"] = input_obs_list
         like["inmodel"] = input_models
     else:
         raise Exception('Cannot understand input obs list for ctlike')
     like["outmodel"] = output_models
     like["logfile"] = log_file
     like["nthreads"] = self.nthreads
     if force or not os.path.isfile(output_models):
         like.logFileOpen()
         like.run()
     elif os.path.isfile(output_models):
         ml_models = gammalib.GModels(output_models)
         like.obs().models(ml_models)
     else:
         raise Exception("Cannot proceed with ctlike")
     saved = False
     if (save and force) or (save and not os.path.isfile(output_models)):
         like.save()
         saved = True
         logger.info("File {} created.".format(output_models))
     return like
示例#10
0
 def test_unbinned_mem(self):
     """
     Test unbinned in-memory pipeline.
     """
     # Set script parameters
     model_name           = "data/crab.xml"
     caldb                = "irf"
     irf                  = "cta_dummy_irf"
     ra                   =   83.63
     dec                  =   22.01
     rad_sim              =   10.0
     tstart               =    0.0
     tstop                = 1800.0
     emin                 =    0.1
     emax                 =  100.0
     rad_select           =    3.0
 
     # Simulate events
     sim = ctools.ctobssim()
     sim["inmodel"].filename(model_name)
     sim["caldb"].string(caldb)
     sim["irf"].string(irf)
     sim["ra"].real(ra)
     sim["dec"].real(dec)
     sim["rad"].real(rad_sim)
     sim["tmin"].real(tstart)
     sim["tmax"].real(tstop)
     sim["emin"].real(emin)
     sim["emax"].real(emax)
     self.test_try("Run ctobssim")
     try:
         sim.run()
         self.test_try_success()
     except:
         self.test_try_failure("Exception occured in ctobssim.")
 
     # Select events
     select = ctools.ctselect(sim.obs())
     select["ra"].real(ra)
     select["dec"].real(dec)
     select["rad"].real(rad_select)
     select["tmin"].real(tstart)
     select["tmax"].real(tstop)
     select["emin"].real(emin)
     select["emax"].real(emax)
     self.test_try("Run ctselect")
     try:
         select.run()
         self.test_try_success()
     except:
         self.test_try_failure("Exception occured in ctselect.")
 
     # Perform maximum likelihood fitting
     like = ctools.ctlike(select.obs())
     self.test_try("Run ctlike")
     try:
         like.run()
         self.test_try_success()
     except:
         self.test_try_failure("Exception occured in ctlike.")
示例#11
0
def unbinned_pipeline(duration):
    """
    Unbinned analysis pipeline.
    """
    # Set script parameters
    model_name  = "${CTOOLS}/share/models/crab.xml"
    caldb       = "prod2"
    irf         = "South_50h"
    ra          =   83.63
    dec         =   22.01
    rad_sim     =   10.0
    tstart      =    0.0
    tstop       = duration
    emin        =    0.1
    emax        =  100.0
    rad_select  =    3.0

    # Get start CPU time
    tstart = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"].filename(model_name)
    sim["caldb"].string(caldb)
    sim["irf"].string(irf)
    sim["ra"].real(ra)
    sim["dec"].real(dec)
    sim["rad"].real(rad_sim)
    sim["tmin"].real(tstart)
    sim["tmax"].real(tstop)
    sim["emin"].real(emin)
    sim["emax"].real(emax)
    sim.run()

    # Select events
    select = ctools.ctselect(sim.obs())
    select["ra"].real(ra)
    select["dec"].real(dec)
    select["rad"].real(rad_select)
    select["tmin"].real(tstart)
    select["tmax"].real(tstop)
    select["emin"].real(emin)
    select["emax"].real(emax)
    select.run()

    # Get ctlike start CPU time
    tctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(select.obs())
    like.run()

    # Get stop CPU time
    tstop    = time.clock()
    telapsed = tstop - tstart
    tctlike  = tstop - tctlike
	
    # Return
    return telapsed, tctlike
def unbinned_pipeline(model_name, duration):
    """
    Unbinned analysis pipeline.
    """
    # Set script parameters
    caldb = "prod2"
    irf = "South_50h"
    ra = 83.63
    dec = 22.01
    rad_sim = 10.0
    tstart = 0.0
    tstop = duration
    emin = 0.1
    emax = 100.0
    rad_select = 3.0

    # Get start CPU time
    cpu_start = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"] = model_name
    sim["caldb"] = caldb
    sim["irf"] = irf
    sim["ra"] = ra
    sim["dec"] = dec
    sim["rad"] = rad_sim
    sim["tmin"] = tstart
    sim["tmax"] = tstop
    sim["emin"] = emin
    sim["emax"] = emax
    sim.run()

    # Select events
    select = ctools.ctselect(sim.obs())
    select["ra"] = ra
    select["dec"] = dec
    select["rad"] = rad_select
    select["tmin"] = tstart
    select["tmax"] = tstop
    select["emin"] = emin
    select["emax"] = emax
    select.run()

    # Get ctlike start CPU time
    cpu_ctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(select.obs())
    like.run()

    # Get stop CPU time and compute elapsed times
    cpu_stop = time.clock()
    cpu_elapsed = cpu_stop - cpu_start
    cpu_ctlike = cpu_stop - cpu_ctlike

    # Return
    return cpu_elapsed, cpu_ctlike
示例#13
0
def unbinned_pipeline(model_name, duration):
    """
    Unbinned analysis pipeline.
    """
    # Set script parameters
    caldb       = "prod2"
    irf         = "South_50h"
    ra          =   83.63
    dec         =   22.01
    rad_sim     =   10.0
    tstart      =    0.0
    tstop       = duration
    emin        =    0.1
    emax        =  100.0
    rad_select  =    3.0

    # Get start CPU time
    cpu_start = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"] = model_name
    sim["caldb"]   = caldb
    sim["irf"]     = irf
    sim["ra"]      = ra
    sim["dec"]     = dec
    sim["rad"]     = rad_sim
    sim["tmin"]    = tstart
    sim["tmax"]    = tstop
    sim["emin"]    = emin
    sim["emax"]    = emax
    sim.run()

    # Select events
    select = ctools.ctselect(sim.obs())
    select["ra"]   = ra
    select["dec"]  = dec
    select["rad"]  = rad_select
    select["tmin"] = tstart
    select["tmax"] = tstop
    select["emin"] = emin
    select["emax"] = emax
    select.run()

    # Get ctlike start CPU time
    cpu_ctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(select.obs())
    like.run()

    # Get stop CPU time and compute elapsed times
    cpu_stop    = time.clock()
    cpu_elapsed = cpu_stop - cpu_start
    cpu_ctlike  = cpu_stop - cpu_ctlike

    # Return
    return cpu_elapsed, cpu_ctlike
示例#14
0
def run_pipeline(obs,
                 ra=83.63,
                 dec=22.01,
                 rad=3.0,
                 emin=0.1,
                 emax=100.0,
                 tmin=0.0,
                 tmax=0.0,
                 debug=False):
    """
    Simulation and binned analysis pipeline

    Parameters
    ----------
    obs : `~gammalib.GObservations`
        Observation container
    ra : float, optional
        Right Ascension of Region of Interest centre (deg)
    dec : float, optional
        Declination of Region of Interest centre (deg)
    rad : float, optional
        Radius of Region of Interest (deg)
    emin : float, optional
        Minimum energy (TeV)
    emax : float, optional
        Maximum energy (TeV)
    tmin : float, optional
        Start time (s)
    tmax : float, optional
        Stop time (s)
    debug : bool, optional
        Debug function
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim['debug'] = debug
    sim.run()

    # Select events
    select = ctools.ctselect(sim.obs())
    select['ra'] = ra
    select['dec'] = dec
    select['rad'] = rad
    select['emin'] = emin
    select['emax'] = emax
    select['tmin'] = tmin
    select['tmax'] = tmax
    select['debug'] = debug
    select.run()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(select.obs())
    like['debug'] = True  # Switch this always on for results in console
    like.run()

    # Return
    return
示例#15
0
    def test_unbinned_fits(self):
        """
        Test unbinned pipeline with FITS file saving
        """
        # Set script parameters
        events_name          = 'events.fits'
        selected_events_name = 'selected_events.fits'
        result_name          = 'results.xml'
        ra                   =   83.63
        dec                  =   22.01
        rad_sim              =   10.0
        rad_select           =    3.0
        tstart               =    0.0
        tstop                =  300.0
        emin                 =    0.1
        emax                 =  100.0

        # Simulate events
        sim = ctools.ctobssim()
        sim['inmodel']   = self._model
        sim['outevents'] = events_name
        sim['caldb']     = self._caldb
        sim['irf']       = self._irf
        sim['ra']        = ra
        sim['dec']       = dec
        sim['rad']       = rad_sim
        sim['tmin']      = tstart
        sim['tmax']      = tstop
        sim['emin']      = emin
        sim['emax']      = emax
        sim.execute()

        # Select events
        select = ctools.ctselect()
        select['inobs']  = events_name
        select['outobs'] = selected_events_name
        select['ra']     = ra
        select['dec']    = dec
        select['rad']    = rad_select
        select['tmin']   = tstart
        select['tmax']   = tstop
        select['emin']   = emin
        select['emax']   = emax
        select.execute()

        # Perform maximum likelihood fitting
        like = ctools.ctlike()
        like['inobs']    = selected_events_name
        like['inmodel']  = self._model
        like['outmodel'] = result_name
        like['caldb']    = self._caldb
        like['irf']      = self._irf
        like.execute()

        # Return
        return
示例#16
0
def run_pipeline(obs, ra=83.63, dec=22.01, rad=3.0,
                 emin=0.1, emax=100.0,
                 tmin=0.0, tmax=0.0,
                 model="${CTOOLS}/share/models/crab.xml",
                 caldb="prod2", irf="South_50h",
                 debug=False):
    """
    Simulation and unbinned analysis pipeline.

    Keywords:
     ra    - RA of cube centre [deg] (default: 83.63)
     dec   - DEC of cube centre [deg] (default: 22.01)
     rad   - Selection radius [deg] (default: 3.0)
     emin  - Minimum energy of cube [TeV] (default: 0.1)
     emax  - Maximum energy of cube [TeV] (default: 100.0)
     tmin  - Start time [MET] (default: 0.0)
     tmax  - Stop time [MET] (default: 0.0)
     model - Model Xml file
     caldb - Calibration database path (default: "dummy")
     irf   - Instrument response function (default: cta_dummy_irf)
     debug - Enable debugging (default: False)
    """
    # Get model

    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"]     = debug
    sim["outevents"] = "obs.xml"
    sim.execute()

    # Select events
    select = ctools.ctselect()
    select["inobs"]  = "obs.xml"
    select["outobs"] = "obs_selected.xml"
    select["ra"]     = ra
    select["dec"]    = dec
    select["rad"]    = rad
    select["emin"]   = emin
    select["emax"]   = emax
    select["tmin"]   = tmin
    select["tmax"]   = tmax
    select["debug"]  = debug
    select.execute()

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like["inobs"]    = "obs_selected.xml"
    like["inmodel"]  = model
    like["outmodel"] = "fit_results.xml"
    like["caldb"]    = caldb
    like["irf"]      = irf
    like["debug"]    = True # Switch this always on for results in console
    like.execute()

    # Return
    return
示例#17
0
    def test_unbinned_fits(self):
        """
        Test unbinned pipeline with FITS file saving
        """
        # Set script parameters
        events_name          = 'events.fits'
        selected_events_name = 'selected_events.fits'
        result_name          = 'results.xml'
        ra                   =   83.63
        dec                  =   22.01
        rad_sim              =    3.0
        rad_select           =    2.0
        tstart               =    0.0
        tstop                =  300.0
        emin                 =    1.0
        emax                 =  100.0

        # Simulate events
        sim = ctools.ctobssim()
        sim['inmodel']   = self._model
        sim['outevents'] = events_name
        sim['caldb']     = self._caldb
        sim['irf']       = self._irf
        sim['ra']        = ra
        sim['dec']       = dec
        sim['rad']       = rad_sim
        sim['tmin']      = tstart
        sim['tmax']      = tstop
        sim['emin']      = emin
        sim['emax']      = emax
        sim.execute()

        # Select events
        select = ctools.ctselect()
        select['inobs']  = events_name
        select['outobs'] = selected_events_name
        select['ra']     = ra
        select['dec']    = dec
        select['rad']    = rad_select
        select['tmin']   = tstart
        select['tmax']   = tstop
        select['emin']   = emin
        select['emax']   = emax
        select.execute()

        # Perform maximum likelihood fitting
        like = ctools.ctlike()
        like['inobs']    = selected_events_name
        like['inmodel']  = self._model
        like['outmodel'] = result_name
        like['caldb']    = self._caldb
        like['irf']      = self._irf
        like.execute()

        # Return
        return
def ctlike(inobs, inmodel, outmodel, caldb='prod2', irf='South_0.5h'):
    """"""
    ctl = ctools.ctlike()
    ctl['inobs'] = inobs
    ctl['caldb'] = caldb
    ctl['irf'] = irf
    ctl['inmodel'] = inmodel
    ctl['outmodel'] = outmodel
    ctl.execute()
    print("Generated " + outmodel)
示例#19
0
def run_pipeline(obs, emin=0.1, emax=100.0,
                 enumbins=20, nxpix=200, nypix=200, binsz=0.02,
                 coordsys="CEL", proj="CAR", debug=False):
    """
    Simulation and binned analysis pipeline.

    Keywords:
     emin     - Minimum energy of cube [TeV] (default: 0.1)
     emax     - Maximum energy of cube [TeV] (default: 100.0)
     enumbins - Number of energy bins in cube (default: 20)
     nxpix    - Number of RA pixels in cube (default: 200)
     nypix    - Number of DEC pixels in cube (default: 200)
     binsz    - Spatial cube bin size [deg] (default: 0.02)
     coordsys - Cube coordinate system (CEL or GAL)
     proj     - Cube World Coordinate System (WCS) projection
     debug    - Enable debugging (default: False)
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"] = debug
    sim.run()

    # Bin events by looping over all observations in the container
    obs = gammalib.GObservations()
    obs.models(sim.obs().models())
    for run in sim.obs():

        # Create container with a single observation
        container = gammalib.GObservations()
        container.append(run)

        # Bin events for that observation
        bin = ctools.ctbin(container)
        bin["ebinalg"]  = "LOG"
        bin["emin"]     = emin
        bin["emax"]     = emax
        bin["enumbins"] = enumbins
        bin["nxpix"]    = nxpix
        bin["nypix"]    = nypix
        bin["binsz"]    = binsz
        bin["coordsys"] = coordsys
        bin["usepnt"]   = True
        bin["proj"]     = proj
        bin.run()

        # Append result to observations
        obs.extend(bin.obs())

    # Perform maximum likelihood fitting
    like = ctools.ctlike(obs)
    like["debug"] = True # Switch this always on for results in console
    like.run()

    # Return
    return
示例#20
0
def fit(obs, srcmodel, spec, bkgname, analysis, pars, alpha=1.0):
    """
    Analyse one observation and determine the background model

    Parameters
    ----------
    obs : `~gammalib.GObservations()`
        Observation container
    srcmodel : str
        Source model
    spec : str
        Spectral model
    bkgname : str
        Background model definition XML file
    analysis : str
        Analyse name
    pars : dict
        Dictionary of analysis parameters
    alpha : float, optional
        Map scaling factor
    """
    # Set file names
    outmodel = 'rx_results_%s.xml' % analysis
    logfile = 'rx_results_%s.log' % analysis

    # Continue only if result file does not exist
    if not os.path.isfile(outmodel):

        # Set models
        models = set_model(srcmodel, spec, bkgname, alpha=alpha)

        # Attach models
        obs.models(models)

        # Perform maximum likelihood fitting with initial models
        like = ctools.ctlike(obs)
        like['edisp'] = pars['edisp']
        like['outmodel'] = outmodel
        like['max_iter'] = 500
        like['logfile'] = logfile
        like['debug'] = True
        like.logFileOpen()

        # Use wstat if no background model is provided
        if bkgname == '':
            like['statistic'] = 'WSTAT'

        # Execute ctlike
        like.execute()

    # Return
    return
示例#21
0
def ctlike_unbinned(selected_events_name, IRF, CALDB, outfile):
    """
    Copied and modified from ctools/test/test_python.py
    """
    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like.logFileOpen()
    like["infile"].filename(selected_events_name)
    like["srcmdl"].filename('$CTOOLS/share/models/crab.xml')
    like["outmdl"].filename(outfile)
    like["caldb"].string(CALDB)
    like["irf"].string(IRF)
    like.execute()
示例#22
0
def ctlike_unbinned(selected_events_name, IRF, CALDB, outfile):
    """
    Copied and modified from ctools/test/test_python.py
    """
    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like.logFileOpen()
    like["infile"].filename(selected_events_name)
    like["srcmdl"].filename('$CTOOLS/share/models/crab.xml')
    like["outmdl"].filename(outfile)
    like["caldb"].string(CALDB)
    like["irf"].string(IRF)
    like.execute()
示例#23
0
    def create_fit(self, log=False, debug=False):
        '''
        Create ctlike instance with given parameters
        Parameters
        ---------
        log  : save or not the log file
        debug  : debug mode or not. This will print a lot of information
        '''
        self.info("Fitting Data using ctlike")
        if self.m_obs:
            self.like = ct.ctlike(self.m_obs)
        else:
            self.like = ct.ctlike()
            for k in self.config.keys():
                try:
                    for kk in self.config[k].keys():
                        if self.like._has_par(kk):
                            self.like[kk] = self.config[k][kk]
                except:
                    if self.like._has_par(k):
                        self.like[k] = self.config[k]

            if self.config["analysis"]["likelihood"] == "binned":
                self.like["inobs"] = join(self.workdir,
                                          self.config['file']["cube"])

        self.like["outmodel"] = self.config['out'] + "/" + self.config[
            'target']["name"] + "_results.xml"

        # Optionally open the log file
        if log:
            self.like.logFileOpen()
        # Optionally switch-on debugging model
        if debug:
            self.like["debug"].boolean(True)

        if self.verbose:
            print self.like
示例#24
0
    def create_fit(self,log=False,debug=False, **kwargs):
        '''
        Create ctlike instance with given parameters
        Parameters
        ---------
        log  : save or not the log file
        debug  : debug mode or not. This will print a lot of information
        '''
        self.info("Fitting Data using ctlike")
        if self.m_obs:
            self.like = ct.ctlike(self.m_obs)
        else:
            self.like = ct.ctlike()
            self._fill_app(self.like,log=log,debug=debug, **kwargs)

            if self.config["analysis"]["likelihood"] == "binned":
                self.like["inobs"] = join(self.outdir,self.config['file']["cntcube"])

        self.like["outmodel"] = self.config['out']+"/"+self.config['file']["tag"]+"_results.xml"
        self.like["edisp"] = self.config['analysis']["edisp"]

        if self.verbose:
            print self.like
示例#25
0
def npred(obs, analysis, pars):
    """
    Determine Npred of source

    Parameters
    ----------
    obs : `~gammalib.GObservations()`
        Observation container
    analysis : str
        Analyse name
    pars : dict
        Dictionary of analysis parameters
    """
    # Set file names
    inmodel = 'rx_results_%s.xml' % analysis
    outmodel = 'rx_npred_%s.xml' % analysis
    logfile = 'rx_npred_%s.log' % analysis

    # Continue only if result file does not exist
    if not os.path.isfile(outmodel) and os.path.isfile(inmodel):

        # Set models
        models = gammalib.GModels(inmodel)
        model = models['RX J1713.7-3946']
        for par in model:
            par.fix()
        model.tscalc(False)  # Otherwise leads to an error
        npred_models = gammalib.GModels()
        npred_models.append(model)

        # Attach models
        obs.models(npred_models)

        # If statistic is wstat then switch to cstat (since wstat does not
        # correctly compute Npred)
        for o in obs:
            if o.statistic() == 'wstat':
                o.statistic('cstat')

        # Perform maximum likelihood fitting
        like = ctools.ctlike(obs)
        like['edisp'] = pars['edisp']
        like['outmodel'] = outmodel
        like['logfile'] = logfile
        like['debug'] = True
        like.logFileOpen()
        like.execute()

    # Return
    return
示例#26
0
    def __init__(self,analyser,srcname,parname = "Prefactor"):
        super(UpperLimitComputer,self).__init__()
        self.info("Creating "+self.classname+" object")
        # self.like = analyser.like.copy()
        obs = analyser.like.obs().copy()
        self.like = ct.ctlike(obs)
        self.succes = False

        # get spectral model
        self.model = self.like.obs().models()[srcname]
        self.spec = self.model.spectral()
        self.parname = parname
        self.bestloglike = analyser.like.opt().value()
        self.dloglike = 3.84/2.0 #95% CL now
示例#27
0
    def _fit_energy_bins(self):
        """
        Fit model to energy bins

        Returns
        -------
        results : list of dict
            List of dictionaries with fit results
        """
        # Write header
        self._log_header1(gammalib.TERSE, 'Generate spectrum')
        self._log_string(gammalib.TERSE, str(self._ebounds))

        like          = ctools.ctlike(self.obs())
        like['edisp'] = self['edisp'].boolean()
        like.run()
        logL          = like.obs().logL()

        # print( 'Total LogLikelihood {:.7e}'.format( logL ) )

        # Initialise results
        results = []

        # If more than a single thread is requested then use multiprocessing
        if self._nthreads > 1:

            # Compute energy bins
            args        = [(self, '_fit_energy_bin', i)
                           for i in range(self._ebounds.size())]
            poolresults = mputils.process(self._nthreads, mputils.mpfunc, args)

            # Construct results
            for i in range(self._ebounds.size()):
                results.append(poolresults[i][0])
                self._log_string(gammalib.TERSE, poolresults[i][1]['log'], False)

        # Otherwise, loop over energy bins
        else:
            for i in range(self._ebounds.size()):

                # Fit energy bin
                result = self._fit_energy_bin( i )

                # Append results
                results.append(result)

        # Return results
        return results
示例#28
0
def npred(obs, analysis, pars, fitname='pks_results', npredname='pks_npred'):
    """
    Determine Npred of source

    Parameters
    ----------
    obs : `~gammalib.GObservations()`
        Observation container
    analysis : str
        Analyse name
    pars : dict
        Dictionary of analysis parameters
    fitname : str, optional
        Fit result prefix
    npredname : str, optional
        Npred result prefix
    """
    # Set file names
    inmodel  = '%s_%s.xml' % (fitname,  analysis)
    outmodel = '%s_%s.xml' % (npredname, analysis)
    logfile  = '%s_%s.log' % (npredname, analysis)

    # Continue only if result file does not exist
    if not os.path.isfile(outmodel) and os.path.isfile(inmodel):

        # Set models
        models = gammalib.GModels(inmodel)
        model  = models['PKS 2155-304']
        for par in model:
            par.fix()
        model.tscalc(False) # Otherwise leads to an error
        npred_models = gammalib.GModels()
        npred_models.append(model)

        # Attach models
        obs.models(npred_models)

        # Perform maximum likelihood fitting
        like = ctools.ctlike(obs)
        like['edisp']    = pars['edisp']
        like['outmodel'] = outmodel
        like['logfile']  = logfile
        like['debug']    = True
        like.logFileOpen()
        like.execute()

    # Return
    return
示例#29
0
文件: exec.py 项目: simotrone/astro
def ctlike_run(observation_file, input_model, force=0):
    working_dir = os.path.dirname(observation_file)
    result_file = os.path.join(working_dir, "ml_result.xml")
    log_file = os.path.join(working_dir, "ctlike.log")
    if not os.path.isfile(result_file) or force == 1:
        like = ctools.ctlike()
        like.clear()
        like["inobs"] = observation_file
        like["inmodel"] = input_model
        like["outmodel"] = result_file
        like["logfile"] = log_file
        like.logFileOpen()
        like.run()
        like.save()
        sys.stderr.write("File '%s' created.\n" % result_file)
    return result_file
示例#30
0
    def test_unbinned_mem(self):
        """
        Test unbinned in-memory pipeline
        """
        # Set script parameters
        ra         =   83.63
        dec        =   22.01
        rad_sim    =   10.0
        rad_select =    3.0
        tstart     =    0.0
        tstop      =  300.0
        emin       =    0.1
        emax       =  100.0

        # Simulate events
        sim = ctools.ctobssim()
        sim['inmodel'] = self._model
        sim['caldb']   = self._caldb
        sim['irf']     = self._irf
        sim['ra']      = ra
        sim['dec']     = dec
        sim['rad']     = rad_sim
        sim['tmin']    = tstart
        sim['tmax']    = tstop
        sim['emin']    = emin
        sim['emax']    = emax
        sim.run()

        # Select events
        select = ctools.ctselect(sim.obs())
        select['ra']   = ra
        select['dec']  = dec
        select['rad']  = rad_select
        select['tmin'] = tstart
        select['tmax'] = tstop
        select['emin'] = emin
        select['emax'] = emax
        select.run()

        # Perform maximum likelihood fitting
        like = ctools.ctlike(select.obs())
        like.run()

        # Return
        return
示例#31
0
    def test_unbinned_mem(self):
        """
        Test unbinned in-memory pipeline
        """
        # Set script parameters
        ra         =   83.63
        dec        =   22.01
        rad_sim    =    3.0
        rad_select =    2.0
        tstart     =    0.0
        tstop      =  300.0
        emin       =    1.0
        emax       =  100.0

        # Simulate events
        sim = ctools.ctobssim()
        sim['inmodel'] = self._model
        sim['caldb']   = self._caldb
        sim['irf']     = self._irf
        sim['ra']      = ra
        sim['dec']     = dec
        sim['rad']     = rad_sim
        sim['tmin']    = tstart
        sim['tmax']    = tstop
        sim['emin']    = emin
        sim['emax']    = emax
        sim.run()

        # Select events
        select = ctools.ctselect(sim.obs())
        select['ra']   = ra
        select['dec']  = dec
        select['rad']  = rad_select
        select['tmin'] = tstart
        select['tmax'] = tstop
        select['emin'] = emin
        select['emax'] = emax
        select.run()

        # Perform maximum likelihood fitting
        like = ctools.ctlike(select.obs())
        like.run()

        # Return
        return
示例#32
0
def run_pipeline(obs, ra=83.63, dec=22.01, rad=3.0, \
                 emin=0.1, emax=100.0, \
                 tmin=0.0, tmax=0.0, \
                 debug=False):
    """
    Simulation and unbinned analysis pipeline.

    Keywords:
     ra    - RA of cube centre [deg] (default: 83.63)
     dec   - DEC of cube centre [deg] (default: 22.01)
     rad   - Selection radius [deg] (default: 3.0)
     emin  - Minimum energy of cube [TeV] (default: 0.1)
     emax  - Maximum energy of cube [TeV] (default: 100.0)
     tmin  - Start time [MET] (default: 0.0)
     tmax  - Stop time [MET] (default: 0.0)
     debug - Enable debugging (default: False)
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"].boolean(debug)
    sim.run()

    # Select events
    select = ctools.ctselect(sim.obs())
    select["ra"].real(ra)
    select["dec"].real(dec)
    select["rad"].real(rad)
    select["emin"].real(emin)
    select["emax"].real(emax)
    select["tmin"].real(tmin)
    select["tmax"].real(tmax)
    select["debug"].boolean(debug)
    select.run()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(select.obs())
    like["debug"].boolean(True) # Switch this always on for results in console
    like.run()
	
    # Return
    return
示例#33
0
def makeFit(cfg):
    """
    makes fit with ctlike
    """
    # Perform maximum likelihood fitting
    outputdir = cfg.getValue('general', 'outputdir')

    like = ctools.ctlike()
    if cfg.getValue('general', 'anatype') == 'unbinned':
        like["inobs"] = outputdir + '/' + cfg.getValue('ctselect', 'output')
        like["inmodel"] = outputdir + '/' + \
            cfg.getValue('csiactobs', 'model_output')
    elif cfg.getValue('general', 'anatype') == 'binned':
        like["inobs"] = outputdir + '/' + cfg.getValue('ctbin', 'output')
        like["inmodel"] = outputdir + '/' + \
            cfg.getValue('ctbkgcube', 'output_model')
        like["expcube"] = outputdir + '/' + cfg.getValue('ctexpcube', 'output')
        like["psfcube"] = outputdir + '/' + cfg.getValue('ctpsfcube', 'output')
        like["bkgcube"] = outputdir + '/' + \
            cfg.getValue('ctbkgcube', 'output_cube')
    else:
        Utilities.warning('Unlnown type: {}'.format(
            cfg.getValue('general', 'anatype')))
        sys.exit()

    if cfg.getValue('general', 'edisp'):
        like["edisp"] = True
    else:
        like["edisp"] = False

    like["outmodel"] = outputdir + '/' + cfg.getValue('ctlike', 'output')
    like["chatter"] = 1
    if cfg.getValue('general', 'debug') is True:
        like["debug"] = True

    like.run()
    like.save()
示例#34
0
    def run(self):
        """
        Run the script.
        """
        # Switch screen logging on in debug mode
        if self.logDebug():
            self.log.cout(True)

        # Get parameters
        self.get_parameters()
        
        #  Write input parameters into logger
        if self.logTerse():
            self.log_parameters()
            self.log("\n")
        
        # Write observation into logger
        if self.logTerse():
            self.log("\n")
            self.log.header1("Observation")
            self.log(str(self.obs))
            self.log("\n")

        # Write header
        if self.logTerse():
            self.log("\n")
            self.log.header1("Adjust model parameters")

        # Adjust model parameters dependent on input user parameters
        for model in self.obs.models():
            
            # Set TS flag for all models to false.
            # Source of interest will be set to true later
            model.tscalc(False)
            
            # Log model name
            if self.logExplicit():
                self.log.header3(model.name())
            
            # Deal with the source of interest    
            if model.name() == self.m_srcname:
                if self.m_calc_ts:
                    model.tscalc(True)
                
            elif self.m_fix_bkg and not model.classname() == "GModelSky":
                for par in model:
                    if par.is_free() and self.logExplicit():
                        self.log(" Fixing \""+par.name()+"\"\n")
                    par.fix()
        
            elif self.m_fix_srcs and model.classname() == "GModelSky":
                for par in model:
                    if par.is_free() and self.logExplicit():
                        self.log(" Fixing \""+par.name()+"\"\n")
                    par.fix()
        
        # Write header
        if self.logTerse():
            self.log("\n")
            self.log.header1("Generate lightcurve")      
        
        # Initialise FITS Table with extension "LIGHTCURVE"
        table = gammalib.GFitsBinTable(self.m_tbins.size())
        table.extname("LIGHTCURVE")
        
        # Add Header for compatibility with gammalib.GMWLSpectrum
        table.card("INSTRUME", "CTA", "Name of Instrument")
        table.card("TELESCOP", "CTA", "Name of Telescope")
             
        # Create FITS table columns        
        MJD = gammalib.GFitsTableDoubleCol("MJD", self.m_tbins.size())
        MJD.unit("days")
        e_MJD = gammalib.GFitsTableDoubleCol("e_MJD", self.m_tbins.size())
        e_MJD.unit("days")
        
        # Create a FITS column for every free parameter
        columns = []
        for par in self.obs.models()[self.m_srcname]:
            if par.is_free():
                col = gammalib.GFitsTableDoubleCol(par.name(), self.m_tbins.size())
                col.unit(par.unit())
                columns.append(col)
                e_col = gammalib.GFitsTableDoubleCol("e_"+par.name(), self.m_tbins.size())
                e_col.unit(par.unit())
                columns.append(e_col)
        
        # Create TS and upper limit columns
        TSvalues    = gammalib.GFitsTableDoubleCol("TS", self.m_tbins.size())
        ulim_values = gammalib.GFitsTableDoubleCol("UpperLimit", self.m_tbins.size())
        ulim_values.unit("ph/cm2/s")

        # Loop over energy bins
        for i in range(self.m_tbins.size()):

            # Log information
            if self.logTerse():
                self.log("\n")
                self.log.header2("Time bin "+str(i))

            # Get time boundaries
            tmin = self.m_tbins.tstart(i)
            tmax = self.m_tbins.tstop(i)
            
            # Compute time bin center and time width
            tmean   = (tmin + tmax)
            tmean  *= 0.5
            twidth  = (tmax - tmin)
            twidth *= 0.5 

            # Store time as MJD
            MJD[i] = tmean.mjd()
            e_MJD[i] = twidth.days()
            
            # Log information
            if self.logExplicit():
                self.log.header3("Selecting events")
                     
            # Select events
            select = ctools.ctselect(self.obs)
            select["emin"].real(self.m_emin)    
            select["emax"].real(self.m_emax) 
            select["tmin"].real(tmin.convert(select.time_reference()))
            select["tmax"].real(tmax.convert(select.time_reference()))
            select["rad"].value("UNDEFINED")
            select["ra"].value("UNDEFINED")
            select["dec"].value("UNDEFINED")
            select.run()  

            # Retrieve observation
            obs = select.obs()
             
            # Binned analysis
            if self.m_binned:

                # Header
                if self.logTerse():
                    self.log.header3("Binning events")
                
                # Bin events
                bin = ctools.ctbin(select.obs())
                bin["usepnt"].boolean(False)
                bin["ebinalg"].string("LOG")
                bin["xref"].real(self.m_xref)
                bin["yref"].real(self.m_yref)
                bin["binsz"].real(self.m_binsz)
                bin["nxpix"].integer(self.m_nxpix)
                bin["nypix"].integer(self.m_nypix)
                bin["enumbins"].integer(self.m_ebins)
                bin["emin"].real(self.m_emin)
                bin["emax"].real(self.m_emax)        
                bin["coordsys"].string(self.m_coordsys)
                bin["proj"].string(self.m_proj)            
                bin.run()
                
                # Header
                if self.logTerse():
                    self.log.header3("Creating exposure cube")
                
                # Create exposure cube
                expcube = ctools.ctexpcube(select.obs())
                expcube["incube"].filename("NONE")
                expcube["usepnt"].boolean(False)
                expcube["ebinalg"].string("LOG")
                expcube["xref"].real(self.m_xref)
                expcube["yref"].real(self.m_yref)
                expcube["binsz"].real(self.m_binsz)
                expcube["nxpix"].integer(self.m_nxpix)
                expcube["nypix"].integer(self.m_nypix)
                expcube["enumbins"].integer(self.m_ebins)
                expcube["emin"].real(self.m_emin)
                expcube["emax"].real(self.m_emax)   
                expcube["coordsys"].string(self.m_coordsys)
                expcube["proj"].string(self.m_proj)               
                expcube.run()
                
                # Header
                if self.logTerse():
                    self.log.header3("Creating PSF cube")
                
                # Create psf cube
                psfcube = ctools.ctpsfcube(select.obs())
                psfcube["incube"].filename("NONE")
                psfcube["usepnt"].boolean(False)
                psfcube["ebinalg"].string("LOG")
                psfcube["xref"].real(self.m_xref)
                psfcube["yref"].real(self.m_yref)
                psfcube["binsz"].real(self.m_binsz)
                psfcube["nxpix"].integer(self.m_nxpix)
                psfcube["nypix"].integer(self.m_nypix)
                psfcube["enumbins"].integer(self.m_ebins)
                psfcube["emin"].real(self.m_emin)
                psfcube["emax"].real(self.m_emax)    
                psfcube["coordsys"].string(self.m_coordsys)
                psfcube["proj"].string(self.m_proj)               
                psfcube.run()
                
                # Header
                if self.logTerse():
                    self.log.header3("Creating background cube")
                
                # Create background cube
                bkgcube = ctools.ctbkgcube(select.obs())
                bkgcube["incube"].filename("NONE")
                bkgcube["usepnt"].boolean(False)
                bkgcube["ebinalg"].string("LOG")
                bkgcube["xref"].real(self.m_xref)
                bkgcube["yref"].real(self.m_yref)
                bkgcube["binsz"].real(self.m_binsz)
                bkgcube["nxpix"].integer(self.m_nxpix)
                bkgcube["nypix"].integer(self.m_nypix)
                bkgcube["enumbins"].integer(self.m_ebins)
                bkgcube["emin"].real(self.m_emin)
                bkgcube["emax"].real(self.m_emax)   
                bkgcube["coordsys"].string(self.m_coordsys)
                bkgcube["proj"].string(self.m_proj)                
                bkgcube.run()
                
                # Set new binned observation
                obs = bin.obs()
                
                # Set precomputed binned response
                obs[0].response(expcube.expcube(), psfcube.psfcube(), bkgcube.bkgcube())

                # Get new models
                models = bkgcube.models()
                
                # Fix background models if required
                if self.m_fix_bkg:
                    for model in models:
                        if not model.classname() == "GModelSky":
                            for par in model:
                                par.fix()
                                
                # Set new models to binned observation           
                obs.models(models)
                
            # Header
            if self.logTerse():
                self.log.header3("Performing fit")
                             
            # Likelihood
            like = ctools.ctlike(obs)
            like.run()
            
            # Skip bin if no event was present
            if like.obs().logL() == 0.0:
                
                # Log information
                if self.logTerse():
                    self.log("No event in this time bin. Bin is skipped\n")

                # Set all values to 0
                for col in columns:
                    col[i] = 0.0
                TSvalues[i]    = 0.0
                ulim_values[i] = 0.0
                continue
                         
            # Get results
            fitted_models = like.obs().models()
            source        = fitted_models[self.m_srcname]

            # Calculate Upper Limit            
            ulimit_value = -1.0
            if self.m_calc_ulimit:
                
                # Logging information
                if self.logTerse():
                    self.log.header3("Computing upper limit")
                  
                # Create upper limit object  
                ulimit = ctools.ctulimit(like.obs())
                ulimit["srcname"].string(self.m_srcname)
                ulimit["eref"].real(1.0)
                
                # Try to run upper limit and catch exceptions
                try:
                    ulimit.run()
                    ulimit_value = ulimit.flux_ulimit()
                except:
                    if self.logTerse():
                        self.log("Upper limit calculation failed\n")
                    ulimit_value = -1.0
            
            # Get TS value
            TS = -1.0
            if self.m_calc_ts:
                TS = source.ts() 
            
            # Set values for storage
            TSvalues[i] = TS
            
            # Set FITS column values
            for col in columns:
                if "e_" == col.name()[:2]:
                    col[i] = source.spectral()[col.name()[2:]].error()
                else:
                    col[i] = source.spectral()[col.name()].value()
            
            # Store upper limit value if available
            if ulimit_value > 0.0:
                ulim_values[i] = ulimit_value
         
            # Log information
            if self.logExplicit(): 
                self.log.header3("Results of bin "+str(i)+": MJD "+str(tmin.mjd())+"-"+str(tmax.mjd()))
                for col in columns:
                    if "e_" == col.name()[:2]:
                        continue
                    value = source.spectral()[col.name()].value()
                    error = source.spectral()[col.name()].error()
                    unit = source.spectral()[col.name()].unit()
                    self.log(" > "+col.name()+": "+str(value)+" +- "+str(error)+" "+unit+"\n")
                if self.m_calc_ts and TSvalues[i] > 0.0:
                    self.log(" > TS = "+str(TS)+" \n")
                if self.m_calc_ulimit and ulim_values[i] > 0.0:
                    self.log(" > UL = "+str(ulim_values[i])+" [ph/cm2/s]")
                self.log("\n")

        # Append filles columns to fits table    
        table.append(MJD)
        table.append(e_MJD)
        for col in columns:
            table.append(col)
        table.append(TSvalues)
        table.append(ulim_values)
        
        # Create the FITS file now
        self.fits = gammalib.GFits()
        self.fits.append(table)
            
        # Return
        return
示例#35
0
def fit(obs, srcmodel, spec, bkgname, analysis, pars, fitname='pks_results'):
    """
    Analyse one observation and determine the background model

    Parameters
    ----------
    obs : `~gammalib.GObservations()`
        Observation container
    srcmodel : str
        Source model
    spec : str
        Spectral model
    bkgname : str
        Background model definition XML file
    analysis : str
        Analyse name
    pars : dict
        Dictionary of analysis parameters
    fitname : str, optional
        Fit result prefix
    """
    # Set file names
    outmodel = '%s_%s.xml' % (fitname, analysis)
    logfile  = '%s_%s.log' % (fitname, analysis)

    # Continue only if result file does not exist
    if not os.path.isfile(outmodel):

        # Set models
        models = set_model(srcmodel, spec, bkgname)

        # Attach models
        obs.models(models)

        # Perform maximum likelihood fitting with initial models
        like = ctools.ctlike(obs)
        like['edisp']    = pars['edisp']
        like['outmodel'] = outmodel
        like['logfile']  = logfile
        like['max_iter'] = 200
        like['debug']    = True
        like.logFileOpen()

        # Use wstat if no background model is provided
        if bkgname == '':
            like['statistic'] = 'WSTAT'

        # Execute ctlike
        like.execute()

    # Compute energy flux and add it to log file
    if os.path.isfile(outmodel):

        # Load result file and compute energy flux
        emin   = gammalib.GEnergy(0.3, 'TeV')
        emax   = gammalib.GEnergy(3.0, 'TeV')
        models = gammalib.GModels(outmodel)
        eflux  = models['PKS 2155-304'].spectral().eflux(emin, emax)

        # Append result to output file
        with open(logfile, "a") as myfile:
            myfile.write('\n')
            myfile.write('Energy flux (0.3-3 TeV): %e' % eflux)

    # Return
    return
示例#36
0
def run_pipeline(obs, emin=0.1, emax=100.0, \
                 enumbins=20, nxpix=200, nypix=200, binsz=0.02, \
                 coordsys="CEL", proj="CAR", \
                 model="${CTOOLS}/share/models/crab.xml", \
                 caldb="prod2", irf="South_50h", \
                 debug=False):
    """
    Simulation and binned analysis pipeline.

    Keywords:
     emin     - Minimum energy of cube [TeV] (default: 0.1)
     emax     - Maximum energy of cube [TeV] (default: 100.0)
     enumbins - Number of energy bins in cube (default: 20)
     nxpix    - Number of RA pixels in cube (default: 200)
     nypix    - Number of DEC pixels in cube (default: 200)
     binsz    - Spatial cube bin size [deg] (default: 0.02)
     coordsys - Cube coordinate system (CEL or GAL)
     proj     - Cube World Coordinate System (WCS) projection
     model    - Model Xml file
     caldb    - Calibration database path (default: "dummy")
     irf      - Instrument response function (default: cta_dummy_irf)
     debug    - Enable debugging (default: False)
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"].boolean(debug)
    sim["outevents"].filename("obs.xml")
    sim.execute()

    # Bin events by looping over all observations in the container
    sim_obs = gammalib.GObservations("obs.xml")
    obs     = gammalib.GObservations()
    for run in sim_obs:

        # Get event filename and set counts cube filename
        eventfile = run.eventfile()
        cubefile  = "cube_"+eventfile

        # Bin events for that observation
        bin = ctools.ctbin()
        bin["inobs"].filename(eventfile)
        bin["outcube"].filename(cubefile)
        bin["ebinalg"].string("LOG")
        bin["emin"].real(emin)
        bin["emax"].real(emax)
        bin["enumbins"].integer(enumbins)
        bin["nxpix"].integer(nxpix)
        bin["nypix"].integer(nypix)
        bin["binsz"].real(binsz)
        bin["coordsys"].string(coordsys)
        bin["usepnt"].boolean(True)
        bin["proj"].string(proj)
        bin.execute()

        # Set observation ID
        bin.obs()[0].id(cubefile)
        bin.obs()[0].eventfile(cubefile)

        # Append result to observations
        obs.extend(bin.obs())

    # Save XML file
    xml = gammalib.GXml()
    obs.write(xml)
    xml.save("obs_cube.xml")

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like["inobs"].filename("obs_cube.xml")
    like["inmodel"].filename(model)
    like["outmodel"].filename("fit_results.xml")
    like["expcube"].filename("NONE")
    like["psfcube"].filename("NONE")
    like["bkgcube"].filename("NONE")
    like["caldb"].string(caldb)
    like["irf"].string(irf)
    like["debug"].boolean(True) # Switch this always on for results in console
    like.execute()
	
    # Return
    return
示例#37
0
    def _generate_bkg(self, obs):
        """
        Generate background models

        Parameters
        ----------
        obs : `~gammalib.GObservations()`
            Observations container

        Returns
        -------
        model : `~gammalib.GModelData()`
            Background model component
        """
        # Write header for event selection
        self._log_header3(gammalib.EXPLICIT, 'Select events from observation')

        # Select events
        obs = self._select_events(obs)

        # Write header for initial background model generation
        self._log_header3(gammalib.EXPLICIT,
                          'Generate initial background model')

        # Generate initial background model
        model = self._generate_initial_model()

        # Attach initial background model
        models = gammalib.GModels()
        models.append(model)
        obs.models(models)

        # Write header for initial model fitting
        self._log_header3(gammalib.EXPLICIT, 'Fit initial background model')

        # Perform maximum likelihood fitting with initial model
        like = ctools.ctlike(obs)
        like.run()

        # Extract optimiser
        opt = like.opt()

        # Extract fitted model
        model = like.obs().models()[0].copy()

        # If a NODES model is requested then refit a node spectrum
        if self['spectral'].string() == 'NODES':

            # Create nodes spectrum from fitted initial model
            model = self._create_nodes(model)

            # Attach node spectrum
            models = gammalib.GModels()
            models.append(model)
            obs.models(models)

            # Write header for node model fitting
            self._log_header3(gammalib.EXPLICIT, 'Fit nodes background model')

            # Perform maximum likelihood fitting with node model
            like = ctools.ctlike(obs)
            like.run()

            # Extract optimiser
            opt = like.opt()

            # Extract fitted model
            model = like.obs().models()[0].copy()

            # Remove nodes with zero errors as they are not constrained
            # by the data and may lead to fitting problems later
            spectral = model.spectral()
            nodes = spectral.nodes()
            for i in range(nodes):
                iint = 2 * (nodes - i) - 1
                if spectral[iint].error() == 0.0:
                    spectral.remove(nodes - i - 1)

        # Write optimizer
        self._log_string(gammalib.EXPLICIT, str(opt))

        # Return model
        return model
示例#38
0
 def test_unbinned_fits(self):
     """
     Test unbinned pipeline with FITS file saving.
     """
     # Set script parameters
     model_name           = "data/crab.xml"
     events_name          = "events.fits"
     selected_events_name = "selected_events.fits"
     result_name          = "results.xml"
     caldb                = "irf"
     irf                  = "cta_dummy_irf"
     ra                   =   83.63
     dec                  =   22.01
     rad_sim              =   10.0
     tstart               =    0.0
     tstop                = 1800.0
     emin                 =    0.1
     emax                 =  100.0
     rad_select           =    3.0
 
     # Simulate events
     sim = ctools.ctobssim()
     sim["inmodel"].filename(model_name)
     sim["outevents"].filename(events_name)
     sim["caldb"].string(caldb)
     sim["irf"].string(irf)
     sim["ra"].real(ra)
     sim["dec"].real(dec)
     sim["rad"].real(rad_sim)
     sim["tmin"].real(tstart)
     sim["tmax"].real(tstop)
     sim["emin"].real(emin)
     sim["emax"].real(emax)
     self.test_try("Execute ctobssim")
     try:
         sim.execute()
         self.test_try_success()
     except:
         self.test_try_failure("Exception occured in ctobssim.")
 
     # Select events
     select = ctools.ctselect()
     select["inobs"].filename(events_name)
     select["outobs"].filename(selected_events_name)
     select["ra"].real(ra)
     select["dec"].real(dec)
     select["rad"].real(rad_select)
     select["tmin"].real(tstart)
     select["tmax"].real(tstop)
     select["emin"].real(emin)
     select["emax"].real(emax)
     self.test_try("Execute ctselect")
     try:
         select.execute()
         self.test_try_success()
     except:
         self.test_try_failure("Exception occured in ctselect.")
 
     # Perform maximum likelihood fitting
     like = ctools.ctlike()
     like["inobs"].filename(selected_events_name)
     like["inmodel"].filename(model_name)
     like["outmodel"].filename(result_name)
     like["caldb"].string(caldb)
     like["irf"].string(irf)
     self.test_try("Execute ctlike")
     try:
         like.execute()
         self.test_try_success()
     except:
         self.test_try_failure("Exception occured in ctlike.")
def gw_simulation(sim_in, config_in, model_xml, fits_model, counter):
    """

    :param sim_in:
    :param config_in:
    :param model_xml:
    :param fits_model:
    :param counter:
    :return:
    """

    src_name = fits_model.split("/")[-1][:-5]
    run_id, merger_id = src_name.split('_')

    fits_header_0 = fits.open(fits_model)[0].header
    ra_src = fits_header_0['RA']
    dec_src = fits_header_0['DEC']

    coordinate_source = SkyCoord(ra=ra_src * u.deg, dec=dec_src * u.deg, frame="icrs")

    src_yaml = sim_in['source']

    point_path = create_path(src_yaml['pointings_path'])
    opt_point_path = f"{point_path}/optimized_pointings"

    ctools_pipe_path = create_path(config_in['exe']['software_path'])
    ctobss_params = sim_in['ctobssim']

    seed = int(counter)*10

    # # PARAMETERS FROM THE CTOBSSIM
    sim_e_min = u.Quantity(ctobss_params['energy']['e_min']).to_value(u.TeV)
    sim_e_max = u.Quantity(ctobss_params['energy']['e_max']).to_value(u.TeV)

    sim_rad = ctobss_params['radius']
    output_path = create_path(sim_in['output']['path'] + f"/{src_name}/seed-{seed:03}")

    irf_dict = sim_in['IRF']
    site = irf_dict['site']

    detection = sim_in['detection']
    significance_map = detection['skymap_significance']
    srcdetect_ctlike = detection['srcdetect_likelihood']

    save_simulation = ctobss_params['save_simulation']

    try:
        mergers_data = pd.read_csv(
            f"{point_path}/BNS-GW-Time_onAxis5deg.txt",
            sep=" ")
    except FileNotFoundError:
        print("merger data not present. check that the text file with the correct pointings is in the 'pointings' folder!")
        sys.exit()

    filter_mask = (mergers_data["run"] == run_id) & (mergers_data["MergerID"] == f"Merger{merger_id}")
    merger_onset_data = mergers_data[filter_mask]
    time_onset_merger = merger_onset_data['Time'].values[0]

    with open(f"{output_path}/GW-{src_name}_seed-{seed:03}_site-{site}.txt", "w") as f:
        f.write(f"GW_name\tRA_src\tDEC_src\tseed\tpointing_id\tsrc_to_point\tsrc_in_point\tra_point\tdec_point\tradius\ttime_start\ttime_end\tsignificanceskymap\tsigmasrcdetectctlike\n")
        try:
            file_name = f"{opt_point_path}/{run_id}_Merger{merger_id}_GWOptimisation_v3.txt"
            pointing_data = pd.read_csv(
                file_name,
                header=0,
                sep=",")
        except FileNotFoundError:
            print("File not found\n")
            sys.exit()

        RA_data = pointing_data['RA(deg)']
        DEC_data = pointing_data['DEC(deg)']
        times = pointing_data['Observation Time UTC']
        durations = pointing_data['Duration']

        # LOOP OVER POINTINGS
        for index in range(0, len(pointing_data)):
            RA_point = RA_data[index]
            DEC_point = DEC_data[index]
            coordinate_pointing = SkyCoord(
                ra=RA_point * u.degree,
                dec=DEC_point * u.degree,
                frame="icrs"
            )
            src_from_pointing = coordinate_pointing.separation(coordinate_source)

            t_in_point = Time(times[index])

            obs_condition = Observability(site=site)
            obs_condition.set_irf(irf_dict)
            obs_condition.Proposal_obTime = 10
            obs_condition.TimeOffset = 0
            obs_condition.Steps_observability = 10
            condition_check = obs_condition.check(RA=RA_point, DEC=DEC_point, t_start=t_in_point)

            # once the IRF has been chosen, the times are shifted
            # this is a quick and dirty solution to handle the times in ctools...not elegant for sure
            t_in_point = (Time(times[index]) - Time(time_onset_merger)).to(u.s)
            t_end_point = t_in_point + durations[index] * u.s

            if len(condition_check) == 0:
                print(f"Source Not Visible in pointing {index}")
                f.write(
                    f"{src_name}\t{ra_src}\t{dec_src}\t{seed}\t{index}\t{src_from_pointing.value:.2f}\t{src_from_pointing.value < sim_rad}\t{RA_point}\t{DEC_point}\t{sim_rad}\t{t_in_point.value:.2f}\t{t_end_point.value:.2f}\t -1 \t -1\n")
                continue

            name_irf = condition_check['IRF_name'][0]
            irf = condition_check['IRF'][0]
            # model loading

            if irf.prod_number == "3b" and irf.prod_version == 0:
                caldb = "prod3b"
            else:
                caldb = f'prod{irf.prod_number}-v{irf.prod_version}'

            # simulation
            sim = ctools.ctobssim()
            sim['inmodel'] = model_xml
            sim['caldb'] = caldb
            sim['irf'] = name_irf
            sim['ra'] = RA_point
            sim['dec'] = DEC_point
            sim['rad'] = sim_rad
            sim['tmin'] = t_in_point.value
            sim['tmax'] = t_end_point.value
            sim['emin'] = sim_e_min
            sim['emax'] = sim_e_max
            sim['seed'] = seed

            if save_simulation:
                event_list_path = create_path(f"{ctobss_params['output_path']}/{src_name}/seed-{seed:03}/")
                sim['outevents'] = f"{event_list_path}/event_list_source-{src_name}_seed-{seed:03}_pointingID-{index}.fits"
                sim.execute()
                f.write(
                    f"{src_name}\t{ra_src}\t{dec_src}\t{seed}\t{index}\t{src_from_pointing.value:.2f}\t{src_from_pointing.value < sim_rad}\t{RA_point}\t{DEC_point}\t{sim_rad}\t{t_in_point.value:.2f}\t{t_end_point.value:.2f}\t -1 \t -1\n"
                )
                continue
            else:
                sim.run()

            obs = sim.obs()

            obs.models(gammalib.GModels())

            # ctskymap

            sigma_onoff = -1
            sqrt_ts_like = -1

            if significance_map:
                pars_skymap = detection['parameters_skymap']
                scale = float(pars_skymap['scale'])
                npix = 2 * int(sim_rad / scale)

                fits_temp_title = f"{output_path}/GW-skymap_point-{index}_{seed}.fits"

                skymap = ctools.ctskymap(obs.copy())
                skymap['proj'] = 'CAR'
                skymap['coordsys'] = 'CEL'
                skymap['xref'] = RA_point
                skymap['yref'] = DEC_point
                skymap['binsz'] = scale
                skymap['nxpix'] = npix
                skymap['nypix'] = npix
                skymap['emin'] = sim_e_min
                skymap['emax'] = sim_e_max
                skymap['bkgsubtract'] = 'RING'
                skymap['roiradius'] = pars_skymap['roiradius']
                skymap['inradius'] = pars_skymap['inradius']
                skymap['outradius'] = pars_skymap['outradius']
                skymap['iterations'] = pars_skymap['iterations']
                skymap['threshold'] = pars_skymap['threshold']
                skymap['outmap'] = fits_temp_title
                skymap.execute()

                input_fits = fits.open(fits_temp_title)
                datain = input_fits['SIGNIFICANCE'].data
                datain[np.isnan(datain)] = 0.0
                datain[np.isinf(datain)] = 0.0

                sigma_onoff = np.max(datain)

                if pars_skymap['remove_fits']:
                    os.remove(fits_temp_title)

            if srcdetect_ctlike:
                pars_detect = detection['parameters_detect']
                scale = float(pars_detect['scale'])
                npix = 2 * int(sim_rad / scale)

                skymap = ctools.ctskymap(obs.copy())
                skymap['proj'] = 'TAN'
                skymap['coordsys'] = 'CEL'
                skymap['xref'] = RA_point
                skymap['yref'] = DEC_point
                skymap['binsz'] = scale
                skymap['nxpix'] = npix
                skymap['nypix'] = npix
                skymap['emin'] = sim_e_min
                skymap['emax'] = sim_e_max
                skymap['bkgsubtract'] = 'NONE'
                skymap.run()

                # cssrcdetect
                srcdetect = cscripts.cssrcdetect(skymap.skymap().copy())
                srcdetect['srcmodel'] = 'POINT'
                srcdetect['bkgmodel'] = 'NONE'
                srcdetect['corr_kern'] = 'GAUSSIAN'
                srcdetect['threshold'] = pars_detect['threshold']
                srcdetect['corr_rad'] = pars_detect['correlation']
                srcdetect.run()

                models = srcdetect.models()

                # if there's some detection we can do the likelihood.
                # Spectral model is a PL and the spatial model is the one from cssrcdetect
                if len(models) > 0:
                    hotspot = models['Src001']
                    ra_hotspot = hotspot['RA'].value()
                    dec_hotspot = hotspot['DEC'].value()

                    models_ctlike = gammalib.GModels()

                    src_dir = gammalib.GSkyDir()
                    src_dir.radec_deg(ra_hotspot, dec_hotspot)
                    spatial = gammalib.GModelSpatialPointSource(src_dir)

                    spectral = gammalib.GModelSpectralPlaw()
                    spectral['Prefactor'].value(5.5e-16)
                    spectral['Prefactor'].scale(1e-16)
                    spectral['Index'].value(-2.6)
                    spectral['Index'].scale(-1.0)
                    spectral['PivotEnergy'].value(50000)
                    spectral['PivotEnergy'].scale(1e3)

                    model_src = gammalib.GModelSky(spatial, spectral)
                    model_src.name('PL_fit_GW')
                    model_src.tscalc(True)

                    models_ctlike.append(model_src)

                    spectral_back = gammalib.GModelSpectralPlaw()
                    spectral_back['Prefactor'].value(1.0)
                    spectral_back['Prefactor'].scale(1.0)
                    spectral_back['Index'].value(0)
                    spectral_back['PivotEnergy'].value(300000)
                    spectral_back['PivotEnergy'].scale(1e6)

                    back_model = gammalib.GCTAModelIrfBackground()
                    back_model.instruments('CTA')
                    back_model.name('Background')
                    back_model.spectral(spectral_back.copy())
                    models_ctlike.append(back_model)

                    xmlmodel_PL_ctlike_std = f"{output_path}/model_PL_ctlike_std_seed-{seed}_pointing-{index}.xml"
                    models_ctlike.save(xmlmodel_PL_ctlike_std)

                    like_pl = ctools.ctlike(obs.copy())
                    like_pl['inmodel'] = xmlmodel_PL_ctlike_std
                    like_pl['caldb'] = caldb
                    like_pl['irf'] = name_irf
                    like_pl.run()

                    ts = -like_pl.obs().models()[0].ts()
                    if ts > 0:
                        sqrt_ts_like = np.sqrt(ts)
                    else:
                        sqrt_ts_like = 0

                    if pars_detect['remove_xml']:
                        os.remove(xmlmodel_PL_ctlike_std)

            f.write(
                f"{src_name}\t{ra_src}\t{dec_src}\t{seed}\t{index}\t{src_from_pointing.value:.2f}\t{src_from_pointing.value < sim_rad}\t{RA_point:.2f}\t{DEC_point:.2f}\t{sim_rad}\t{t_in_point:.2f}\t{t_end_point:.2f}\t{sigma_onoff:.2f}\t{sqrt_ts_like}\n")
示例#40
0
            sim['inmodel'] = 'nu_sources_' + str(i + 1) + '.xml'
            sim['caldb'] = caldb
            sim['irf'] = irf
            sim['ra'] = ra
            sim['dec'] = dec
            sim['rad'] = 5.0
            sim['tmin'] = '2020-05-31T12:00:00'
            sim['tmax'] = '2020-05-31T12:10:00'
            sim['emin'] = 0.02
            sim['emax'] = 199.0
            sim['maxrate'] = 1.0e9
            sim['debug'] = debug
            sim['edisp'] = edisp
            sim.run()

            like = ctools.ctlike(sim.obs())
            like['debug'] = debug
            like['edisp'] = edisp
            like.run()

            nuts = like.obs().models()[sourcename].ts()
            nunormsp = like.obs().models()[sourcename].spectral(
            )['Normalization'].value()
            nunormsp_error = like.obs().models()[sourcename].spectral(
            )['Normalization'].error()

            if nuts >= 25.:
                if nunormsp > 2. or nunormsp < 0.5:
                    fake = str(i + 1) + ' ' + str(nuts) + ' ' + str(
                        nunormsp) + ' ' + str(nunormsp_error) + ' ' + str(
                            ra) + ' ' + str(dec) + ' ' + str(tsig) + '\n'
示例#41
0
文件: cspull.py 项目: ctools/ctools
    def _trial(self, seed):
        """
        Compute the pull for a single trial

        Parameters
        ----------
        seed : int
            Random number generator seed

        Returns
        -------
        result : dict
            Dictionary of results
        """
        # Write header
        self._log_header2(gammalib.NORMAL, 'Trial %d' %
                          (seed-self['seed'].integer()+1))

        # Get number of energy bins and On source name and initialise
        # some parameters
        nbins     = self['enumbins'].integer()
        onsrc     = self['onsrc'].string()
        edisp     = self['edisp'].boolean()
        statistic = self['statistic'].string()
        emin      = None
        emax      = None
        binsz     = 0.0
        npix      = 0
        proj      = 'TAN'
        coordsys  = 'CEL'

        # If we have a On source name then set On region radius
        if gammalib.toupper(onsrc) != 'NONE':
            onrad = self['onrad'].real()
            emin  = self['emin'].real()
            emax  = self['emax'].real()
            edisp = True   # Use always energy dispersion for On/Off
        else:

            # Reset On region source name and radius
            onrad = 0.0
            onsrc = None

            # If we have a binned obeservation then specify the lower and
            # upper energy limit in TeV
            if nbins > 0:
                emin     = self['emin'].real()
                emax     = self['emax'].real()
                binsz    = self['binsz'].real()
                npix     = self['npix'].integer()
                proj     = self['proj'].string()
                coordsys = self['coordsys'].string()

        # Simulate events
        obs = obsutils.sim(self.obs(),
                           emin=emin, emax=emax, nbins=nbins,
                           onsrc=onsrc, onrad=onrad,
                           addbounds=True, seed=seed,
                           binsz=binsz, npix=npix, proj=proj, coord=coordsys,
                           edisp=edisp, log=False, debug=self._logDebug(),
                           chatter=self['chatter'].integer())

        # Determine number of events in simulation
        nevents = 0.0
        for run in obs:
            nevents += run.nobserved()

        # Write simulation results
        self._log_header3(gammalib.NORMAL, 'Simulation')
        for run in self.obs():
            self._log_value(gammalib.NORMAL, 'Input observation %s' % run.id(),
                            self._obs_string(run))
        for run in obs:
            self._log_value(gammalib.NORMAL, 'Output observation %s' % run.id(),
                            self._obs_string(run))
        self._log_value(gammalib.NORMAL, 'Number of simulated events', nevents)

        # Fit model
        if self['profile'].boolean():
            models = self.obs().models()
            for model in models:
                like = ctools.cterror(obs)
                like['srcname']   = model.name()
                like['edisp']     = edisp
                like['statistic'] = statistic
                like['debug']     = self._logDebug()
                like['chatter']   = self['chatter'].integer()
                like.run()
        else:
            like = ctools.ctlike(obs)
            like['edisp']     = edisp
            like['statistic'] = statistic
            like['debug']     = self._logDebug()
            like['chatter']   = self['chatter'].integer()
            like.run()

        # Store results
        logL   = like.opt().value()
        npred  = like.obs().npred()
        models = like.obs().models()

        # Write result header
        self._log_header3(gammalib.NORMAL, 'Pulls')

        # Gather results in form of a list of result columns and a
        # dictionary containing the results. The result contains the
        # log-likelihood, the number of simulated events, the number of
        # predicted events and for each fitted parameter the fitted value,
        # the pull and the fit error.
        #
        # Note that we do not use the model and parameter iterators
        # because we need the indices to get the true (or real) parameter
        # values from the input models.
        colnames = ['LogL', 'Sim_Events', 'Npred_Events']
        values   = {'LogL': logL, 'Sim_Events': nevents, 'Npred_Events': npred}
        for i in range(models.size()):
            model = models[i]
            for k in range(model.size()):
                par = model[k]
                if par.is_free():

                    # Set name as a combination of model name and parameter
                    # name separated by an underscore. In that way each
                    # parameter has a unique name.
                    name = model.name()+'_'+par.name()

                    # Append parameter, Pull_parameter and e_parameter column
                    # names
                    colnames.append(name)
                    colnames.append('Pull_'+name)
                    colnames.append('e_'+name)

                    # Compute pull for this parameter as the difference
                    #               (fitted - true) / error
                    # In case that the error is 0 the pull is set to 99
                    fitted_value = par.value()
                    real_value   = self.obs().models()[i][k].value()
                    error        = par.error()
                    if error != 0.0:
                        pull = (fitted_value - real_value) / error
                    else:
                        pull = 99.0

                    # Store results in dictionary
                    values[name]         = fitted_value
                    values['Pull_'+name] = pull
                    values['e_'+name]    = error

                    # Write results into logger
                    value = '%.4f (%e +/- %e)' % (pull, fitted_value, error)
                    self._log_value(gammalib.NORMAL, name, value)

        # Bundle together results in a dictionary
        result = {'colnames': colnames, 'values': values}

        # Return
        return result
示例#42
0
文件: csspec.py 项目: jdevin/ctools
    def run(self):
        """
        Run the script.
        """
        # Switch screen logging on in debug mode
        if self.logDebug():
            self.log.cout(True)

        # Get parameters
        self.get_parameters()

        #  Write input parameters into logger
        if self.logTerse():
            self.log_parameters()
            self.log("\n")

        # Write spectral binning into header
        if self.logTerse():
            self.log("\n")
            self.log.header1("Spectral binning")
            if self.m_binned_mode:
                cube_ebounds = self.obs[0].events().ebounds()
                self.log.parformat("Counts cube energy range")
                self.log(str(cube_ebounds.emin()))
                self.log(" - ")
                self.log(str(cube_ebounds.emax()))
                self.log("\n")
            for i in range(self.m_ebounds.size()):
                self.log.parformat("Bin "+str(i+1))
                self.log(str(self.m_ebounds.emin(i)))
                self.log(" - ")
                self.log(str(self.m_ebounds.emax(i)))
                self.log("\n")

        # Write observation into logger
        if self.logTerse():
            self.log("\n")
            self.log.header1("Observation")
            self.log(str(self.obs))
            self.log("\n")

        # Write header
        if self.logTerse():
            self.log("\n")
            self.log.header1("Adjust model parameters")

        # Adjust model parameters dependent on input user parameters
        for model in self.obs.models():

            # Set TS flag for all models to false.
            # Source of interest will be set to true later
            model.tscalc(False)

            # Log model name
            if self.logExplicit():
                self.log.header3(model.name())

            # Deal with the source of interest    
            if model.name() == self.m_srcname:
                for par in model:
                    if par.is_free() and self.logExplicit():
                        self.log(" Fixing \""+par.name()+"\"\n")
                    par.fix()
                normpar = model.spectral()[0]
                if normpar.is_fixed() and self.logExplicit():
                    self.log(" Freeing \""+normpar.name()+"\"\n")
                normpar.free()
                if self.m_calc_ts:
                    model.tscalc(True)

            elif self.m_fix_bkg and not model.classname() == "GModelSky":
                for par in model:
                    if par.is_free() and self.logExplicit():
                        self.log(" Fixing \""+par.name()+"\"\n")
                    par.fix()

            elif self.m_fix_srcs and model.classname() == "GModelSky":
                for par in model:
                    if par.is_free() and self.logExplicit():
                        self.log(" Fixing \""+par.name()+"\"\n")
                    par.fix()

        # Write header
        if self.logTerse():
            self.log("\n")
            self.log.header1("Generate spectrum")  
            self.log(str(self.m_ebounds))    

        # Initialise FITS Table with extension "SPECTRUM"
        table = gammalib.GFitsBinTable(self.m_ebounds.size())
        table.extname("SPECTRUM")

        # Add Header for compatibility with gammalib.GMWLSpectrum
        table.card("INSTRUME", "CTA", "Name of Instrument")
        table.card("TELESCOP", "CTA", "Name of Telescope")

        # Create FITS table columns
        energy       = gammalib.GFitsTableDoubleCol("Energy", self.m_ebounds.size())
        energy_low   = gammalib.GFitsTableDoubleCol("ed_Energy", self.m_ebounds.size())
        energy_high  = gammalib.GFitsTableDoubleCol("eu_Energy", self.m_ebounds.size())
        flux         = gammalib.GFitsTableDoubleCol("Flux", self.m_ebounds.size())
        flux_err     = gammalib.GFitsTableDoubleCol("e_Flux", self.m_ebounds.size())
        TSvalues     = gammalib.GFitsTableDoubleCol("TS", self.m_ebounds.size())
        ulim_values  = gammalib.GFitsTableDoubleCol("UpperLimit", self.m_ebounds.size())
        Npred_values = gammalib.GFitsTableDoubleCol("Npred", self.m_ebounds.size())
        energy.unit("TeV")
        energy_low.unit("TeV")
        energy_high.unit("TeV")
        flux.unit("erg/cm2/s")
        flux_err.unit("erg/cm2/s")
        ulim_values.unit("erg/cm2/s")

        # Loop over energy bins
        for i in range(self.m_ebounds.size()):

            # Log information
            if self.logExplicit():
                self.log("\n")
                self.log.header2("Energy bin "+str(i+1))

            # Get energy boundaries
            emin      = self.m_ebounds.emin(i)
            emax      = self.m_ebounds.emax(i)
            elogmean  = self.m_ebounds.elogmean(i)
            elogmean2 = elogmean.MeV() * elogmean.MeV()    

            # Store energy as TeV
            energy[i] = elogmean.TeV()

            # Store energy errors
            energy_low[i]  = (elogmean - emin).TeV()
            energy_high[i] = (emax - elogmean).TeV()

            # use ctselect for unbinned analysis
            if not self.m_binned_mode:
                
                # Log information
                if self.logExplicit():
                    self.log.header3("Selecting events")
    
                # Select events
                select = ctools.ctselect(self.obs)
                select["emin"] = emin.TeV()    
                select["emax"] = emax.TeV() 
                select["tmin"] = "UNDEFINED"
                select["tmax"] = "UNDEFINED"
                select["rad"]  = "UNDEFINED"
                select["ra"]   = "UNDEFINED"
                select["dec"]  = "UNDEFINED"
                select.run()  
    
                # Retrieve observation
                obs = select.obs()

            # use ctcubemask for binned analysis
            else:

                # Header
                if self.logExplicit():
                    self.log.header3("Filtering cube")

                # Select layers
                cubemask            = ctools.ctcubemask(self.obs)
                cubemask["regfile"] = "NONE"
                cubemask["ra"]      = "UNDEFINED"
                cubemask["dec"]     = "UNDEFINED"
                cubemask["rad"]     = "UNDEFINED"
                cubemask["emin"]    = emin.TeV() 
                cubemask["emax"]    = emax.TeV()
                cubemask.run() 
                
                # Set new binned observation
                obs = cubemask.obs()

            # Header
            if self.logExplicit():
                self.log.header3("Performing fit")

            # Likelihood
            like          = ctools.ctlike(obs)
            like["edisp"] = self.m_edisp
            like.run()

            # Skip bin if no event was present
            if like.obs().logL() == 0.0:

                # Log information
                if self.logExplicit():
                    self.log("No event in this bin. ")
                    self.log("Likelihood is zero. ")
                    self.log("Bin is skipped.")

                # Set all values to 0
                flux[i]         = 0.0
                flux_err[i]     = 0.0
                TSvalues[i]     = 0.0
                ulim_values[i]  = 0.0
                Npred_values[i] = 0.0
                continue

            # Get results
            fitted_models = like.obs().models()
            source        = fitted_models[self.m_srcname]

            # Calculate Upper Limit            
            ulimit_value = -1.0
            if self.m_calc_ulimit:

                # Logging information
                if self.logExplicit():
                    self.log.header3("Computing upper limit")

                # Create upper limit object  
                ulimit = ctools.ctulimit(like.obs())
                ulimit["srcname"] = self.m_srcname
                ulimit["eref"]    = elogmean.TeV()

                # Try to run upper limit and catch exceptions
                try:
                    ulimit.run()
                    ulimit_value = ulimit.diff_ulimit()
                except:
                    if self.logExplicit():
                        self.log("Upper limit calculation failed.")
                    ulimit_value = -1.0

            # Get TS value
            TS = -1.0
            if self.m_calc_ts:
                TS = source.ts() 

            # Compute Npred value (only works for unbinned analysis)
            Npred = 0.0
            if not self.m_binned_mode:
                for observation in like.obs():
                    Npred += observation.npred(source)  

            # Get differential flux    
            fitted_flux = source.spectral().eval(elogmean,gammalib.GTime())

            # Compute flux error
            parvalue  = source.spectral()[0].value()
            rel_error = source.spectral()[0].error() / parvalue        
            e_flux    = fitted_flux * rel_error

            # Set values for storage
            TSvalues[i] = TS

            # Set npred values 
            Npred_values[i] = Npred

            # Convert fluxes to nuFnu
            flux[i]     = fitted_flux * elogmean2 * gammalib.MeV2erg
            flux_err[i] = e_flux      * elogmean2 * gammalib.MeV2erg
            if ulimit_value > 0.0:
                ulim_values[i] = ulimit_value * elogmean2 * gammalib.MeV2erg

            # Log information
            if self.logTerse(): 
                self.log("\n")
                self.log.parformat("Bin "+str(i+1))
                self.log(str(flux[i]))
                self.log(" +/- ")
                self.log(str(flux_err[i]))
                if self.m_calc_ulimit and ulim_values[i] > 0.0:
                    self.log(" [< "+str(ulim_values[i])+"]")
                self.log(" erg/cm2/s")
                if self.m_calc_ts and TSvalues[i] > 0.0:
                    self.log(" (TS = "+str(TS)+")")

        # Append filled columns to fits table    
        table.append(energy)
        table.append(energy_low)
        table.append(energy_high)
        table.append(flux)
        table.append(flux_err)
        table.append(TSvalues)
        table.append(ulim_values)
        table.append(Npred_values)

        # Create the FITS file now
        self.fits = gammalib.GFits()
        self.fits.append(table)

        # Return
        return
示例#43
0
def run_pipeline(obs, ra=83.63, dec=22.01, emin=0.1, emax=100.0, \
                 enumbins=20, nxpix=200, nypix=200, binsz=0.02, \
                 coordsys="CEL", proj="CAR", \
                 model="${CTOOLS}/share/models/crab.xml", \
                 caldb="prod2", irf="South_50h", \
                 debug=False):
    """
    Simulation and stacked analysis pipeline.

    Keywords:
     ra       - RA of cube centre [deg] (default: 83.6331)
     dec      - DEC of cube centre [deg] (default: 22.0145)
     emin     - Minimum energy of cube [TeV] (default: 0.1)
     emax     - Maximum energy of cube [TeV] (default: 100.0)
     enumbins - Number of energy bins in cube (default: 20)
     nxpix    - Number of RA pixels in cube (default: 200)
     nypix    - Number of DEC pixels in cube (default: 200)
     binsz    - Spatial cube bin size [deg] (default: 0.02)
     coordsys - Cube coordinate system (CEL or GAL)
     proj     - Cube World Coordinate System (WCS) projection
     debug    - Enable debugging (default: False)
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"].boolean(debug)
    sim["outevents"].filename("obs.xml")
    sim.execute()

    # Bin events into counts map
    bin = ctools.ctbin()
    bin["inobs"].filename("obs.xml")
    bin["outcube"].filename("cntcube.fits")
    bin["ebinalg"].string("LOG")
    bin["emin"].real(emin)
    bin["emax"].real(emax)
    bin["enumbins"].integer(enumbins)
    bin["nxpix"].integer(nxpix)
    bin["nypix"].integer(nypix)
    bin["binsz"].real(binsz)
    bin["coordsys"].string(coordsys)
    bin["proj"].string(proj)
    bin["xref"].real(ra)
    bin["yref"].real(dec)
    bin["debug"].boolean(debug)
    bin.execute()

    # Create exposure cube
    expcube = ctools.ctexpcube()
    expcube["inobs"].filename("obs.xml")
    expcube["incube"].filename("cntcube.fits")
    expcube["outcube"].filename("expcube.fits")
    expcube["caldb"].string(caldb)
    expcube["irf"].string(irf)
    expcube["ebinalg"].string("LOG")
    expcube["emin"].real(emin)
    expcube["emax"].real(emax)
    expcube["enumbins"].integer(enumbins)
    expcube["nxpix"].integer(nxpix)
    expcube["nypix"].integer(nypix)
    expcube["binsz"].real(binsz)
    expcube["coordsys"].string(coordsys)
    expcube["proj"].string(proj)
    expcube["xref"].real(ra)
    expcube["yref"].real(dec)
    expcube["debug"].boolean(debug)
    expcube.execute()

    # Create PSF cube
    psfcube = ctools.ctpsfcube()
    psfcube["inobs"].filename("obs.xml")
    psfcube["incube"].filename("NONE")
    psfcube["outcube"].filename("psfcube.fits")
    psfcube["caldb"].string(caldb)
    psfcube["irf"].string(irf)
    psfcube["ebinalg"].string("LOG")
    psfcube["emin"].real(emin)
    psfcube["emax"].real(emax)
    psfcube["enumbins"].integer(enumbins)
    psfcube["nxpix"].integer(10)
    psfcube["nypix"].integer(10)
    psfcube["binsz"].real(1.0)
    psfcube["coordsys"].string(coordsys)
    psfcube["proj"].string(proj)
    psfcube["xref"].real(ra)
    psfcube["yref"].real(dec)
    psfcube["debug"].boolean(debug)
    psfcube.execute()

    # Create background cube
    bkgcube = ctools.ctbkgcube()
    bkgcube["inobs"].filename("obs.xml")
    bkgcube["inmodel"].filename(model)
    bkgcube["incube"].filename("cntcube.fits")
    bkgcube["outcube"].filename("bkgcube.fits")
    bkgcube["outmodel"].filename("model_bkg.xml")
    bkgcube["caldb"].string(caldb)
    bkgcube["irf"].string(irf)
    bkgcube["ebinalg"].string("LOG")
    bkgcube["emin"].real(emin)
    bkgcube["emax"].real(emax)
    bkgcube["enumbins"].integer(enumbins)
    bkgcube["nxpix"].integer(10)
    bkgcube["nypix"].integer(10)
    bkgcube["binsz"].real(1.0)
    bkgcube["coordsys"].string(coordsys)
    bkgcube["proj"].string(proj)
    bkgcube["xref"].real(ra)
    bkgcube["yref"].real(dec)
    bkgcube["debug"].boolean(debug)
    bkgcube.execute()

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like["inobs"].filename("cntcube.fits")
    like["inmodel"].filename("model_bkg.xml")
    like["outmodel"].filename("fit_results.xml")
    like["expcube"].filename("expcube.fits")
    like["psfcube"].filename("psfcube.fits")
    like["bkgcube"].filename("bkgcube.fits")
    like["caldb"].string(caldb)
    like["irf"].string(irf)
    like["debug"].boolean(True) # Switch this always on for results in console
    like.execute()
	
    # Return
    return
示例#44
0
文件: cssens.py 项目: ctools/ctools
    def _get_sensitivity(self, emin, emax, test_model):
        """
        Determine sensitivity for given observations

        Parameters
        ----------
        emin : `~gammalib.GEnergy`
            Minimum energy for fitting and flux computation
        emax : `~gammalib.GEnergy`
            Maximum energy for fitting and flux computation
        test_model : `~gammalib.GModels`
            Test source model

        Returns
        -------
        result : dict
            Result dictionary
        """
        # Set TeV->erg conversion factor
        tev2erg = 1.6021764

        # Set parameters
        ts_thres = self['sigma'].real() * self['sigma'].real()
        max_iter = self['max_iter'].integer()
        enumbins = self['enumbins'].integer()
        if not enumbins == 0:
            npix  = self['npix'].integer()
            binsz = self['binsz'].real()
        else:
            npix  = 200
            binsz = 0.05

        # Set flux ratio precision required for convergence to 5%
        ratio_precision = 0.05

        # Set energy boundaries
        self._set_obs_ebounds(emin, emax)

        # Determine mean energy for energy boundary
        e_mean   = math.sqrt(emin.TeV()*emax.TeV())
        loge     = math.log10(e_mean)
        erg_mean = e_mean * tev2erg

        # Compute Crab unit. This is the factor with which the Prefactor needs
        # to be multiplied to get 1 Crab.
        crab_flux = self._get_crab_flux(emin, emax)
        src_flux  = test_model[self._srcname].spectral().flux(emin, emax)
        crab_unit = crab_flux/src_flux

        # Initialise regression coefficient
        regcoeff = 0.0

        # Write header for energy bin
        self._log_string(gammalib.TERSE, '')
        self._log_header2(gammalib.TERSE, 'Energies: '+str(emin)+' - '+str(emax))

        # Write initial parameters
        self._log_header3(gammalib.TERSE, 'Initial parameters')
        self._log_value(gammalib.TERSE, 'Crab flux', str(crab_flux)+' ph/cm2/s')
        self._log_value(gammalib.TERSE, 'Source model flux', str(src_flux)+' ph/cm2/s')
        self._log_value(gammalib.TERSE, 'Crab unit factor', crab_unit)

        # Initialise loop
        results        = []
        iterations     = 0
        test_crab_flux = 0.1 # Initial test flux in Crab units (100 mCrab)

        # Write header for iterations for terse chatter level
        if self._logTerse():
            self._log_header3(gammalib.TERSE, 'Iterations')

        # Loop until we break
        while True:

            # Update iteration counter
            iterations += 1

            # Write header for iteration into logger
            self._log_header2(gammalib.EXPLICIT, 'Iteration '+str(iterations))

            # Create a copy of the test models, set the prefactor of the test
            # source in the models, and append the models to the observation.
            # "crab_prefactor" is the Prefactor that corresponds to a flux of
            # 1 Crab.
            models         = test_model.copy()
            crab_prefactor = models[self._srcname]['Prefactor'].value() * crab_unit
            models[self._srcname]['Prefactor'].value(crab_prefactor * test_crab_flux)
            self.obs().models(models)

            # Simulate events for the models. "sim" holds an observation
            # container with observations containing the simulated events.
            sim = obsutils.sim(self.obs(), nbins=enumbins, seed=iterations,
                               binsz=binsz, npix=npix,
                               log=self._log_clients,
                               debug=self['debug'].boolean(),
                               edisp=self['edisp'].boolean())

            # Determine number of events in simulation by summing the events
            # over all observations in the observation container
            nevents = 0.0
            for run in sim:
                nevents += run.events().number()

            # Write simulation results into logger
            self._log_header3(gammalib.EXPLICIT, 'Simulation')
            self._log_value(gammalib.EXPLICIT, 'Number of simulated events', nevents)

            # Fit test source to the simulated events in the observation
            # container
            fit = ctools.ctlike(sim)
            fit['edisp']   = self['edisp'].boolean()
            fit['debug']   = self['debug'].boolean()
            fit['chatter'] = self['chatter'].integer()
            fit.run()

            # Get model fitting results
            logL   = fit.opt().value()
            npred  = fit.obs().npred()
            models = fit.obs().models()
            source = models[self._srcname]
            ts     = source.ts()

            # Get fitted Crab, photon and energy fluxes
            crab_flux   = source['Prefactor'].value() / crab_prefactor
            photon_flux = source.spectral().flux(emin, emax)
            energy_flux = source.spectral().eflux(emin, emax)

            # Compute differential sensitivity in unit erg/cm2/s by evaluating
            # the spectral model at the "e_mean" energy and by multipling the
            # result with the energy squared. Since the "eval()" method returns
            # an intensity in units of ph/cm2/s/MeV we multiply by 1.0e6 to
            # convert into ph/cm2/s/TeV, by "e_mean" to convert into ph/cm2/s,
            # and finally by "erg_mean" to convert to erg/cm2/s.
            energy      = gammalib.GEnergy(e_mean, 'TeV')
            sensitivity = source.spectral().eval(energy) * e_mean*erg_mean*1.0e6

            # Write fit results into logger
            name  = 'Iteration %d' % iterations
            value = ('TS=%10.4f  Sim=%9.4f mCrab  Fit=%9.4f mCrab  '
                     'Sens=%e erg/cm2/s' %
                     (ts, test_crab_flux*1000.0, crab_flux*1000.0, sensitivity))
            self._log_value(gammalib.TERSE, name, value)

            # If TS was non-positive then increase the test flux and start over
            if ts <= 0.0:

                # If the number of iterations was exceeded then stop
                if (iterations >= max_iter):
                    self._log_string(gammalib.TERSE,
                         ' Test ended after %d iterations.' % max_iter)
                    break

                # Increase test flux
                test_crab_flux *= 3.0

                # Signal start we start over
                self._log_string(gammalib.EXPLICIT,
                     'Non positive TS, increase test flux and start over.')

                # ... and start over
                continue

            # Append result entry to result list
            result = {'ts': ts, 'crab_flux': crab_flux,
                                'photon_flux': photon_flux,
                                'energy_flux': energy_flux}
            results.append(result)

            # Predict Crab flux at threshold TS using a linear regression of
            # the log(TS) and log(crab_flux) values that have so far been
            # computed. If not enough results are available than use a simple
            # TS scaling relation.
            if len(results) > 1:
                pred_crab_flux, regcoeff = self._predict_flux(results, ts_thres)
                correct                  = pred_crab_flux / crab_flux
            else:
                correct = math.sqrt(ts_thres/ts)

            # Compute extrapolated fluxes based on the flux correction factor
            crab_flux   = correct * crab_flux
            photon_flux = correct * photon_flux
            energy_flux = correct * energy_flux
            sensitivity = correct * sensitivity

            # If we have at least 3 results then check if the flux determination
            # at the TS threshold has converged
            if len(results) > 3:
                if test_crab_flux > 0:

                    # Compute fractional change in the Crab flux between two
                    # iterations
                    ratio = crab_flux/test_crab_flux

                    # If fractional change is smaller than the required position
                    # the iterations are stopped
                    if ratio > 1.0-ratio_precision and \
                       ratio < 1.0+ratio_precision:
                        value = ('TS=%10.4f  Sim=%9.4f mCrab                  '
                                 '     Sens=%e erg/cm2/s' %
                                 (ts, crab_flux*1000.0, sensitivity))
                        self._log_value(gammalib.TERSE, 'Converged result', value)
                        self._log_value(gammalib.TERSE, 'Converged flux ratio', ratio)
                        self._log_value(gammalib.TERSE, 'Regression coefficient',
                                        regcoeff)
                        break
                else:
                    self._log_value(gammalib.TERSE, 'Not converged', 'Flux is zero')
                    break

            # Set test flux for next iteration
            test_crab_flux = crab_flux

            # Exit loop if number of trials exhausted
            if (iterations >= max_iter):
                self._log_string(gammalib.TERSE,
                                 ' Test ended after %d iterations.' % max_iter)
                break

        # Write fit results into logger
        self._log_header3(gammalib.TERSE, 'Fit results')
        self._log_value(gammalib.TERSE, 'Photon flux',
                        str(photon_flux)+' ph/cm2/s')
        self._log_value(gammalib.TERSE, 'Energy flux',
                        str(energy_flux)+' erg/cm2/s')
        self._log_value(gammalib.TERSE, 'Crab flux',
                        str(crab_flux*1000.0)+' mCrab')
        self._log_value(gammalib.TERSE, 'Differential sensitivity',
                        str(sensitivity)+' erg/cm2/s')
        self._log_value(gammalib.TERSE, 'Number of simulated events', nevents)
        self._log_header3(gammalib.TERSE, 'Test source model fitting')
        self._log_value(gammalib.TERSE, 'log likelihood', logL)
        self._log_value(gammalib.TERSE, 'Number of predicted events', npred)
        for model in models:
            self._log_value(gammalib.TERSE, 'Model', model.name())
            for par in model:
                self._log_string(gammalib.TERSE, str(par))

        # Restore energy boundaries of observation container
        for i, obs in enumerate(self.obs()):
            obs.events().ebounds(self._obs_ebounds[i])

        # Store result
        result = {'loge': loge, 'emin': emin.TeV(), 'emax': emax.TeV(), \
                  'crab_flux': crab_flux, 'photon_flux': photon_flux, \
                  'energy_flux': energy_flux, \
                  'sensitivity': sensitivity, 'regcoeff': regcoeff, \
                  'nevents': nevents, 'npred': npred}

        # Return result
        return result
示例#45
0
文件: csspec.py 项目: ctools/ctools
    def _fit_energy_bin(self, i):
        """
        Fit data for one energy bin

        Parameters
        ----------
        i : int
            Energy bin index

        Returns
        -------
        result : dict
            Dictionary with fit results
        """
        # Get energy boundaries
        emin      = self._ebounds.emin(i)
        emax      = self._ebounds.emax(i)
        elogmean  = self._ebounds.elogmean(i)

        # Select observations for energy bin
        obs = self._select_obs(emin, emax)

        # Initialise dictionary
        result = {'energy':      elogmean.TeV(),
                  'energy_low':  (elogmean - emin).TeV(),
                  'energy_high': (emax - elogmean).TeV(),
                  'flux':        0.0,
                  'flux_err':    0.0,
                  'TS':          0.0,
                  'ulimit':      0.0,
                  'Npred':       0.0}

        # Write header for fitting
        self._log_header3(gammalib.EXPLICIT, 'Performing fit')

        # Perform maximum likelihood fit
        like          = ctools.ctlike(obs)
        like['edisp'] = self['edisp'].boolean()
        like.run()

        # Continue only if log-likelihood is non-zero
        if like.obs().logL() != 0.0:

            # Get results
            fitted_models = like.obs().models()
            source        = fitted_models[self['srcname'].string()]

            # Extract Test Statistic value
            if self['calc_ts'].boolean():
                result['TS'] = source.ts()

            # Compute Npred value (only works for unbinned analysis)
            if not self._binned_mode and not self._onoff_mode:
                for observation in like.obs():
                    result['Npred'] += observation.npred(source)

            # Compute upper flux limit
            ulimit_value = -1.0
            if self['calc_ulim'].boolean():

                # Logging information
                self._log_header3(gammalib.EXPLICIT, 'Computing upper limit')

                # Create upper limit object  
                ulimit = ctools.ctulimit(like.obs())
                ulimit['srcname'] = self['srcname'].string()
                ulimit['eref']    = elogmean.TeV()

                # Try to run upper limit and catch exceptions
                try:
                    ulimit.run()
                    ulimit_value = ulimit.diff_ulimit()
                except:
                    self._log_string(gammalib.EXPLICIT, 'Upper limit '
                                     'calculation failed.')
                    ulimit_value = -1.0

                # Compute upper limit
                if ulimit_value > 0.0:
                    result['ulimit'] = ulimit_value * elogmean.MeV() * \
                                       elogmean.MeV() * gammalib.MeV2erg

            # Compute differential flux and flux error
            fitted_flux = source.spectral().eval(elogmean)
            parvalue    = source.spectral()[0].value()
            if parvalue != 0.0:
                rel_error = source.spectral()[0].error() / parvalue
                e_flux    = fitted_flux * rel_error
            else:
                e_flux = 0.0

            # Convert differential flux and flux error to nuFnu
            elogmean2          = elogmean.MeV() * elogmean.MeV()
            result['flux']     = fitted_flux * elogmean2 * gammalib.MeV2erg
            result['flux_err'] = e_flux      * elogmean2 * gammalib.MeV2erg

            # Log information
            value = '%e +/- %e' % (result['flux'], result['flux_err'])
            if self['calc_ulim'].boolean() and result['ulimit'] > 0.0:
                value += ' [< %e]' % (result['ulimit'])
            value += ' erg/cm2/s'
            if self['calc_ts'].boolean() and result['TS'] > 0.0:
                value += ' (TS = %.3f)' % (result['TS'])
            self._log_value(gammalib.TERSE, 'Bin '+str(i+1), value)

        # ... otherwise if logL is zero then signal that bin is
        # skipped
        else:
            value = 'No event in this bin. Likelihood is zero. Bin is skipped.'
            self._log_value(gammalib.TERSE, 'Bin '+str(i+1), value)

        # Return result
        return result
示例#46
0
    def _fit_energy_bin(self, i):
        """
        Fit data for one energy bin

        Parameters
        ----------
        i : int
            Energy bin index

        Returns
        -------
        result : dict
            Dictionary with fit results
        """

        # Write header for energy bin
        self._log_header2(gammalib.EXPLICIT, 'Energy bin ' + str(i + 1))

        # Get energy boundaries
        emin = self._ebounds.emin(i)
        emax = self._ebounds.emax(i)
        elogmean = self._ebounds.elogmean(i)

        # Select observations for energy bin
        obs = self._select_obs(emin, emax)

        # Initialise dictionary
        result = {
            'energy': elogmean.TeV(),
            'energy_low': (elogmean - emin).TeV(),
            'energy_high': (emax - elogmean).TeV(),
            'flux': 0.0,
            'flux_err': 0.0,
            'TS': 0.0,
            'ulimit': 0.0,
            'Npred': 0.0
        }

        # Write header for fitting
        self._log_header3(gammalib.EXPLICIT, 'Performing fit in energy bin')

        # Setup maximum likelihood fit
        like = ctools.ctlike(obs)
        like['edisp'] = self['edisp'].boolean()
        like['nthreads'] = 1  # Avoids OpenMP conflict

        # If chatter level is verbose and debugging is requested then
        # switch also on the debug model in ctlike
        if self._logVerbose() and self._logDebug():
            like['debug'] = True

        # Perform maximum likelihood fit
        like.run()

        # Write model results for explicit chatter level
        self._log_string(gammalib.EXPLICIT, str(like.obs().models()))

        # Continue only if log-likelihood is non-zero
        if like.obs().logL() != 0.0:

            # Get results
            fitted_models = like.obs().models()
            source = fitted_models[self['srcname'].string()]

            # Extract Test Statistic value
            if self['calc_ts'].boolean():
                result['TS'] = source.ts()

            # Compute Npred value (only works for unbinned analysis)
            if not self._binned_mode and not self._onoff_mode:
                for observation in like.obs():
                    result['Npred'] += observation.npred(source)

            # Compute upper flux limit
            ulimit_value = -1.0
            if self['calc_ulim'].boolean():

                # Logging information
                self._log_header3(gammalib.EXPLICIT,
                                  'Computing upper limit for energy bin')

                # Create upper limit object
                ulimit = ctools.ctulimit(like.obs())
                ulimit['srcname'] = self['srcname'].string()
                ulimit['eref'] = elogmean.TeV()

                # If chatter level is verbose and debugging is requested
                # then switch also on the debug model in ctulimit
                if self._logVerbose() and self._logDebug():
                    ulimit['debug'] = True

                # Try to run upper limit and catch exceptions
                try:
                    ulimit.run()
                    ulimit_value = ulimit.diff_ulimit()
                except:
                    self._log_string(gammalib.EXPLICIT, 'Upper limit '
                                     'calculation failed.')
                    ulimit_value = -1.0

                # Compute upper limit
                if ulimit_value > 0.0:
                    result['ulimit'] = ulimit_value * elogmean.MeV() * \
                                       elogmean.MeV() * gammalib.MeV2erg

            # Compute differential flux and flux error
            fitted_flux = source.spectral().eval(elogmean)
            parvalue = source.spectral()[0].value()
            if parvalue != 0.0:
                rel_error = source.spectral()[0].error() / parvalue
                e_flux = fitted_flux * rel_error
            else:
                e_flux = 0.0

            # If the source model is a cube then multiply-in the cube
            # spectrum
            if source.spatial().classname() == 'GModelSpatialDiffuseCube':
                dir = gammalib.GSkyDir()
                source.spatial().set_mc_cone(dir, 180.0)
                norm = source.spatial().spectrum().eval(elogmean)
                fitted_flux *= norm
                e_flux *= norm

            # Convert differential flux and flux error to nuFnu
            elogmean2 = elogmean.MeV() * elogmean.MeV()
            result['flux'] = fitted_flux * elogmean2 * gammalib.MeV2erg
            result['flux_err'] = e_flux * elogmean2 * gammalib.MeV2erg

            # Log information
            value = '%e +/- %e' % (result['flux'], result['flux_err'])
            if self['calc_ulim'].boolean() and result['ulimit'] > 0.0:
                value += ' [< %e]' % (result['ulimit'])
            value += ' erg/cm2/s'
            if self['calc_ts'].boolean() and result['TS'] > 0.0:
                value += ' (TS = %.3f)' % (result['TS'])
            self._log_value(gammalib.TERSE, 'Bin ' + str(i + 1), value)

        # ... otherwise if logL is zero then signal that bin is
        # skipped
        else:
            value = 'Likelihood is zero. Bin is skipped.'
            self._log_value(gammalib.TERSE, 'Bin ' + str(i + 1), value)

        # Return result
        return result
示例#47
0
    def _fit_model(self):
        """
        Fit model to observations

        Returns
        -------
        results : list of dict
            List of dictionaries with fit results
        """
        # Write header
        self._log_header1(gammalib.TERSE, 'Generate spectrum')

        # Write header for fitting
        self._log_header3(gammalib.EXPLICIT, 'Performing model fit')

        # Perform maximum likelihood fit
        like = ctools.ctlike(self.obs())
        like['edisp'] = self['edisp'].boolean()
        like.run()

        # Initialise fit results
        results = []

        # Extract fit results
        model = like.obs().models()[self['srcname'].string()]
        spectrum = model.spectral()
        logL0 = like.obs().logL()

        # Write model results for explicit chatter level
        self._log_string(gammalib.EXPLICIT, str(like.obs().models()))

        # Loop over all nodes
        for i in range(spectrum.nodes()):

            # Get energy boundaries
            emin = self._ebounds.emin(i)
            emax = self._ebounds.emax(i)
            elogmean = self._ebounds.elogmean(i)

            # Initialise dictionary
            result = {
                'energy': elogmean.TeV(),
                'energy_low': (elogmean - emin).TeV(),
                'energy_high': (emax - elogmean).TeV(),
                'flux': 0.0,
                'flux_err': 0.0,
                'TS': 0.0,
                'ulimit': 0.0,
                'Npred': 0.0
            }

            # Convert differential flux and flux error to nuFnu
            norm = elogmean.MeV() * elogmean.MeV() * gammalib.MeV2erg
            result['flux'] = spectrum[i * 2 + 1].value() * norm
            result['flux_err'] = spectrum[i * 2 + 1].error() * norm

            # Compute upper flux limit
            ulimit_value = -1.0
            if self['calc_ulim'].boolean():

                # Logging information
                self._log_header3(
                    gammalib.EXPLICIT,
                    'Computing upper limit for node energy %f TeV' %
                    result['energy'])

                # Copy observation container
                obs = like.obs().copy()

                # Fix intensities of all nodes
                spectral = obs.models()[self['srcname'].string()].spectral()
                for par in spectral:
                    par.fix()

                # Create upper limit object
                ulimit = ctools.ctulimit(obs)
                ulimit['srcname'] = self['srcname'].string()
                ulimit['parname'] = 'Intensity%d' % i
                ulimit['eref'] = elogmean.TeV()
                ulimit['tol'] = 1.0e-3

                # Try to run upper limit and catch exceptions
                try:
                    ulimit.run()
                    ulimit_value = ulimit.diff_ulimit()
                except:
                    self._log_string(gammalib.EXPLICIT, 'Upper limit '
                                     'calculation failed.')
                    ulimit_value = -1.0

                # Compute upper limit
                if ulimit_value > 0.0:
                    result['ulimit'] = ulimit_value * elogmean.MeV() * \
                                       elogmean.MeV() * gammalib.MeV2erg

            # Compute TS
            if self['calc_ts'].boolean():

                # Copy observation container
                obs = like.obs().copy()

                # Set intensity of node to tiny value by scaling the value
                # by a factor 1e-8.
                par = obs.models()[self['srcname'].string()].spectral()[i * 2 +
                                                                        1]
                par.autoscale()
                par.factor_min(1.0e-8)
                par.factor_value(1.0e-8)
                par.autoscale()
                par.fix()

                # Perform maximum likelihood fit
                tslike = ctools.ctlike(obs)
                tslike['edisp'] = self['edisp'].boolean()
                tslike.run()

                # Store Test Statistic
                model = tslike.obs().models()[self['srcname'].string()]
                logL1 = tslike.obs().logL()
                result['TS'] = 2.0 * (logL1 - logL0)

            # Log information
            value = '%e +/- %e' % (result['flux'], result['flux_err'])
            if self['calc_ulim'].boolean() and result['ulimit'] > 0.0:
                value += ' [< %e]' % (result['ulimit'])
            value += ' erg/cm2/s'
            if self['calc_ts'].boolean() and result['TS'] > 0.0:
                value += ' (TS = %.3f)' % (result['TS'])
            self._log_value(gammalib.TERSE, 'Bin ' + str(i + 1), value)

            # Append results
            results.append(result)

        # Return results
        return results
示例#48
0
def binned_pipeline(model_name, duration):
    """
    Binned analysis pipeline.
    """
    # Set script parameters
    caldb       = "prod2"
    irf         = "South_50h"
    ra          =   83.63
    dec         =   22.01
    rad_sim     =   10.0
    tstart      =    0.0
    tstop       = duration
    emin        =    0.1
    emax        =  100.0
    enumbins    =   40
    nxpix       =  200
    nypix       =  200
    binsz       =    0.02
    coordsys    = "CEL"
    proj        = "CAR"

    # Get start CPU time
    cpu_start = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"] = model_name
    sim["caldb"]   = caldb
    sim["irf"]     = irf
    sim["ra"]      = ra
    sim["dec"]     = dec
    sim["rad"]     = rad_sim
    sim["tmin"]    = tstart
    sim["tmax"]    = tstop
    sim["emin"]    = emin
    sim["emax"]    = emax
    sim.run()

    # Bin events into counts map
    bin = ctools.ctbin(sim.obs())
    bin["ebinalg"]  = "LOG"
    bin["emin"]     = emin
    bin["emax"]     = emax
    bin["enumbins"] = enumbins
    bin["nxpix"]    = nxpix
    bin["nypix"]    = nypix
    bin["binsz"]    = binsz
    bin["coordsys"] = coordsys
    bin["xref"]     = ra
    bin["yref"]     = dec
    bin["proj"]     = proj
    bin.run()

    # Get ctlike start CPU time
    cpu_ctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(bin.obs())
    like.run()

    # Get stop CPU time and compute elapsed times
    cpu_stop    = time.clock()
    cpu_elapsed = cpu_stop - cpu_start
    cpu_ctlike  = cpu_stop - cpu_ctlike

    # Return
    return cpu_elapsed, cpu_ctlike
def grb_simulation(sim_in, config_in, model_xml, fits_header_0, counter):
    """
    Function to handle the GRB simulation.
    :param sim_in: the yaml file for the simulation (unpacked as a dict of dicts)
    :param config_in: the yaml file for the job handling (unpacked as a dict of dicts)
    :param model_xml: the XML model name for the source under analysis
    :param fits_header_0: header for the fits file of the GRB model to use. Used in the visibility calculation
    :param counter: integer number. counts the id of the source realization
    :return: significance obtained with the activated detection methods
    """

    src_name = model_xml.split('/')[-1].split('model_')[1][:-4]
    print(src_name, counter)

    ctools_pipe_path = create_path(config_in['exe']['software_path'])
    ctobss_params = sim_in['ctobssim']

    seed = int(counter)*10

    # PARAMETERS FROM THE CTOBSSIM
    sim_t_min = u.Quantity(ctobss_params['time']['t_min']).to_value(u.s)
    sim_t_max = u.Quantity(ctobss_params['time']['t_max']).to_value(u.s)
    sim_e_min = u.Quantity(ctobss_params['energy']['e_min']).to_value(u.TeV)
    sim_e_max = u.Quantity(ctobss_params['energy']['e_max']).to_value(u.TeV)
    sim_rad = ctobss_params['radius']

    models = sim_in['source']
    source_type = models['type']

    if source_type == "GRB":
        phase_path = "/" + models['phase']
    elif source_type == "GW":
        phase_path = ""

    output_path = create_path(sim_in['output']['path'] + phase_path + '/' + src_name)

    save_simulation = ctobss_params['save_simulation']

    with open(f"{output_path}/GRB-{src_name}_seed-{seed}.txt", "w") as f:
        f.write(f"GRB,seed,time_start,time_end,sigma_lima,sqrt_TS_onoff,sqrt_TS_std\n")
        # VISIBILITY PART
        # choose between AUTO mode (use visibility) and MANUAL mode (manually insert IRF)
        simulation_mode = sim_in['IRF']['mode']

        if simulation_mode == "auto":
            print("using visibility to get IRFs")

            # GRB information from the fits header
            ra = fits_header_0['RA']
            dec = fits_header_0['DEC']
            t0 = Time(fits_header_0['GRBJD'])

            irf_dict = sim_in['IRF']
            site = irf_dict['site']
            obs_condition = Observability(site=site)
            obs_condition.set_irf(irf_dict)

            t_zero_mode = ctobss_params['time']['t_zero'].lower()

            if t_zero_mode == "VIS":
                # check if the source is visible one day after the onset of the source
                print("time starts when source becomes visible")
                obs_condition.Proposal_obTime = 86400
                condition_check = obs_condition.check(RA=ra, DEC=dec, t_start=t0)

            elif t_zero_mode == "ONSET":
                print("time starts from the onset of the GRB")
                condition_check = obs_condition.check(RA=ra, DEC=dec, t_start=t0, t_min=sim_t_min, t_max=sim_t_max)

            else:
                print(f"Choose some proper mode between 'VIS' and 'ONSET'. {t_zero_mode} is not a valid one.")
                sys.exit()

            # NO IRF in AUTO mode ==> No simulation! == EXIT!
            if len(condition_check) == 0:
                f.write(f"{src_name},{seed}, -1, -1, -1, -1, -1\n")
                sys.exit()

        elif simulation_mode == "manual":
            print("manual picking IRF")

            # find proper IRF name
            irf = IRFPicker(sim_in, ctools_pipe_path)
            name_irf = irf.irf_pick()

            backgrounds_path = create_path(ctobss_params['bckgrnd_path'])
            fits_background_list = glob.glob(
                f"{backgrounds_path}/{irf.prod_number}_{irf.prod_version}_{name_irf}/background*.fits")

            if len(fits_background_list) == 0:
                print(f"No background for IRF {name_irf}")
                sys.exit()

            fits_background_list = sorted(fits_background_list, key=sort_background)
            background_fits = fits_background_list[int(counter) - 1]
            obs_back = gammalib.GCTAObservation(background_fits)

        else:
            print(f"wrong input for IRF - mode. Input is {simulation_mode}. Use 'auto' or 'manual' instead")
            sys.exit()

        if irf.prod_number == "3b" and irf.prod_version == 0:
            caldb = "prod3b"
        else:
            caldb = f'prod{irf.prod_number}-v{irf.prod_version}'

        # source simulation
        sim = ctools.ctobssim()
        sim['inmodel'] = model_xml
        sim['caldb'] = caldb
        sim['irf'] = name_irf
        sim['ra'] = 0.0
        sim['dec'] = 0.0
        sim['rad'] = sim_rad
        sim['tmin'] = sim_t_min
        sim['tmax'] = sim_t_max
        sim['emin'] = sim_e_min
        sim['emax'] = sim_e_max
        sim['seed'] = seed
        sim.run()

        obs = sim.obs()

        # # move the source photons from closer to (RA,DEC)=(0,0), where the background is located
        # for event in obs[0].events():
        #     # ra_evt = event.dir().dir().ra()
        #     dec_evt = event.dir().dir().dec()
        #     ra_evt_deg = event.dir().dir().ra_deg()
        #     dec_evt_deg = event.dir().dir().dec_deg()
        #
        #     ra_corrected = (ra_evt_deg - ra_pointing)*np.cos(dec_evt)
        #     dec_corrected = dec_evt_deg - dec_pointing
        #     event.dir().dir().radec_deg(ra_corrected, dec_corrected)

        # append all background events to GRB ones ==> there's just one observation and not two
        for event in obs_back.events():
            obs[0].events().append(event)

        # ctselect to save data on disk
        if save_simulation:
            event_list_path = create_path(f"{ctobss_params['output_path']}/{src_name}/")
            #obs.save(f"{event_list_path}/event_list_source-{src_name}_seed-{seed:03}.fits")

            select_time = ctools.ctselect(obs)
            select_time['rad'] = sim_rad
            select_time['tmin'] = sim_t_min
            select_time['tmax'] = sim_t_max
            select_time['emin'] = sim_e_min
            select_time['emax'] = sim_e_max
            select_time['outobs'] = f"{event_list_path}/event_list_source-{src_name}_{seed:03}.fits"
            select_time.run()
            sys.exit()

        # delete all 70+ models from the obs def file...not needed any more
        obs.models(gammalib.GModels())

        # CTSELECT
        select_time = sim_in['ctselect']['time_cut']
        slices = int(select_time['t_slices'])

        if slices == 0:
            times = [sim_t_min, sim_t_max]
            times_start = times[:-1]
            times_end = times[1:]
        elif slices > 0:
            time_mode = select_time['mode']
            if time_mode == "log":
                times = np.logspace(np.log10(sim_t_min), np.log10(sim_t_max), slices + 1, endpoint=True)
            elif time_mode == "lin":
                times = np.linspace(sim_t_min, sim_t_max, slices + 1, endpoint=True)
            else:
                print(f"{time_mode} not valid. Use 'log' or 'lin' ")
                sys.exit()

            if select_time['obs_mode'] == "iter":
                times_start = times[:-1]
                times_end = times[1:]
            elif select_time['obs_mode'] == "cumul":
                times_start = np.repeat(times[0], slices)         # this is to use the same array structure for the loop
                times_end = times[1:]
            elif select_time['obs_mode'] == "all":
                begins, ends = np.meshgrid(times[:-1], times[1:])
                mask_times = begins < ends
                times_start = begins[mask_times].ravel()
                times_end = ends[mask_times].ravel()
            else:
                print(f"obs_mode: {select_time['obs_mode']} not supported")
                sys.exit()

        else:
            print(f"value {slices} not supported...check yaml file")
            sys.exit()

        # ------------------------------------
        # ----- TIME LOOP STARTS HERE --------
        # ------------------------------------

        ctlike_mode = sim_in['detection']
        mode_1 = ctlike_mode['counts']
        mode_2 = ctlike_mode['ctlike-onoff']
        mode_3 = ctlike_mode['ctlike-std']

        for t_in, t_end in zip(times_start, times_end):
            sigma_onoff = 0
            sqrt_ts_like_onoff = 0
            sqrt_ts_like_std = 0
            print("-----------------------------")
            print(f"t_in: {t_in:.2f}, t_end: {t_end:.2f}")

            # different ctlikes (onoff or std) need different files.
            # will be appended here and used later on for the final likelihood
            dict_obs_select_time = {}

            # perform time selection for this specific time bin
            select_time = ctools.ctselect(obs)
            select_time['rad'] = sim_rad
            select_time['tmin'] = t_in
            select_time['tmax'] = t_end
            select_time['emin'] = sim_e_min
            select_time['emax'] = sim_e_max
            select_time.run()

            if mode_1:
                fits_temp_title = f"skymap_{seed}_{t_in:.2f}_{t_end:.2f}.fits"
                pars_counts = ctlike_mode['pars_counts']
                scale = float(pars_counts['scale'])
                npix = 2*int(sim_rad/scale)
                skymap = ctools.ctskymap(select_time.obs().copy())
                skymap['emin'] = sim_e_min
                skymap['emax'] = sim_e_max
                skymap['nxpix'] = npix
                skymap['nypix'] = npix
                skymap['binsz'] = scale
                skymap['proj'] = 'TAN'
                skymap['coordsys'] = 'CEL'
                skymap['xref'] = 0
                skymap['yref'] = 0
                skymap['bkgsubtract'] = 'RING'
                skymap['roiradius'] = pars_counts['roiradius']
                skymap['inradius'] = pars_counts['inradius']
                skymap['outradius'] = pars_counts['outradius']
                skymap['iterations'] = pars_counts['iterations']
                skymap['threshold'] = pars_counts['threshold']
                skymap['outmap'] = fits_temp_title
                skymap.execute()

                input_fits = fits.open(fits_temp_title)
                datain = input_fits[2].data
                datain[np.isnan(datain)] = 0.0
                datain[np.isinf(datain)] = 0.0

                sigma_onoff = np.max(datain)
                os.remove(fits_temp_title)

            if mode_3:
                dict_obs_select_time['std'] = select_time.obs().copy()

            if mode_2:
                onoff_time_sel = cscripts.csphagen(select_time.obs().copy())
                onoff_time_sel['inmodel'] = 'NONE'
                onoff_time_sel['ebinalg'] = 'LOG'
                onoff_time_sel['emin'] = sim_e_min
                onoff_time_sel['emax'] = sim_e_max
                onoff_time_sel['enumbins'] = 30
                onoff_time_sel['coordsys'] = 'CEL'
                onoff_time_sel['ra'] = 0.0
                onoff_time_sel['dec'] = 0.5
                onoff_time_sel['rad'] = 0.2
                onoff_time_sel['bkgmethod'] = 'REFLECTED'
                onoff_time_sel['use_model_bkg'] = False
                onoff_time_sel['stack'] = False
                onoff_time_sel.run()

                dict_obs_select_time['onoff'] = onoff_time_sel.obs().copy()

                del onoff_time_sel

                # print(f"sigma ON/OFF: {sigma_onoff:.2f}")

            if mode_2 or mode_3:

                # Low Energy PL fitting
                # to be saved in this dict
                dict_pl_ctlike_out = {}

                e_min_pl_ctlike = 0.030
                e_max_pl_ctlike = 0.080

                # simple ctobssim copy and select for ctlike-std
                select_pl_ctlike = ctools.ctselect(select_time.obs().copy())
                select_pl_ctlike['rad'] = 3
                select_pl_ctlike['tmin'] = t_in
                select_pl_ctlike['tmax'] = t_end
                select_pl_ctlike['emin'] = e_min_pl_ctlike
                select_pl_ctlike['emax'] = e_max_pl_ctlike
                select_pl_ctlike.run()

                # create test source
                src_dir = gammalib.GSkyDir()
                src_dir.radec_deg(0, 0.5)
                spatial = gammalib.GModelSpatialPointSource(src_dir)

                # create and append source spectral model
                spectral = gammalib.GModelSpectralPlaw()
                spectral['Prefactor'].value(5.5e-16)
                spectral['Prefactor'].scale(1e-16)
                spectral['Index'].value(-2.6)
                spectral['Index'].scale(-1.0)
                spectral['PivotEnergy'].value(50000)
                spectral['PivotEnergy'].scale(1e3)
                model_src = gammalib.GModelSky(spatial, spectral)
                model_src.name('PL_fit_temp')
                model_src.tscalc(True)

                spectral_back = gammalib.GModelSpectralPlaw()
                spectral_back['Prefactor'].value(1.0)
                spectral_back['Prefactor'].scale(1.0)
                spectral_back['Index'].value(0)
                spectral_back['PivotEnergy'].value(300000)
                spectral_back['PivotEnergy'].scale(1e6)

                if mode_2:
                    back_model = gammalib.GCTAModelIrfBackground()
                    back_model.instruments('CTAOnOff')
                    back_model.name('Background')
                    back_model.spectral(spectral_back.copy())

                    onoff_pl_ctlike_lima = cscripts.csphagen(select_pl_ctlike.obs().copy())
                    onoff_pl_ctlike_lima['inmodel'] = 'NONE'
                    onoff_pl_ctlike_lima['ebinalg'] = 'LOG'
                    onoff_pl_ctlike_lima['emin'] = e_min_pl_ctlike
                    onoff_pl_ctlike_lima['emax'] = e_max_pl_ctlike
                    onoff_pl_ctlike_lima['enumbins'] = 30
                    onoff_pl_ctlike_lima['coordsys'] = 'CEL'
                    onoff_pl_ctlike_lima['ra'] = 0.0
                    onoff_pl_ctlike_lima['dec'] = 0.5
                    onoff_pl_ctlike_lima['rad'] = 0.2
                    onoff_pl_ctlike_lima['bkgmethod'] = 'REFLECTED'
                    onoff_pl_ctlike_lima['use_model_bkg'] = False
                    onoff_pl_ctlike_lima['stack'] = False
                    onoff_pl_ctlike_lima.run()

                    onoff_pl_ctlike_lima.obs().models(gammalib.GModels())
                    onoff_pl_ctlike_lima.obs().models().append(model_src.copy())
                    onoff_pl_ctlike_lima.obs().models().append(back_model.copy())

                    like_pl = ctools.ctlike(onoff_pl_ctlike_lima.obs())
                    like_pl['refit'] = True
                    like_pl.run()
                    dict_pl_ctlike_out['onoff'] = like_pl.obs().copy()
                    del onoff_pl_ctlike_lima
                    del like_pl

                if mode_3:
                    models_ctlike_std = gammalib.GModels()
                    models_ctlike_std.append(model_src.copy())
                    back_model = gammalib.GCTAModelIrfBackground()
                    back_model.instruments('CTA')
                    back_model.name('Background')
                    back_model.spectral(spectral_back.copy())
                    models_ctlike_std.append(back_model)

                    # save models
                    xmlmodel_PL_ctlike_std = 'test_model_PL_ctlike_std.xml'
                    models_ctlike_std.save(xmlmodel_PL_ctlike_std)
                    del models_ctlike_std

                    like_pl = ctools.ctlike(select_pl_ctlike.obs().copy())
                    like_pl['inmodel'] = xmlmodel_PL_ctlike_std
                    like_pl['refit'] = True
                    like_pl.run()
                    dict_pl_ctlike_out['std'] = like_pl.obs().copy()
                    del like_pl

                del spatial
                del spectral
                del model_src
                del select_pl_ctlike

                # EXTENDED CTLIKE
                for key in dict_obs_select_time.keys():
                    likelihood_pl_out = dict_pl_ctlike_out[key]
                    selected_data = dict_obs_select_time[key]

                    pref_out_pl = likelihood_pl_out.models()[0]['Prefactor'].value()
                    index_out_pl = likelihood_pl_out.models()[0]['Index'].value()
                    pivot_out_pl = likelihood_pl_out.models()[0]['PivotEnergy'].value()

                    expplaw = gammalib.GModelSpectralExpPlaw()
                    expplaw['Prefactor'].value(pref_out_pl)
                    expplaw['Index'].value(index_out_pl)
                    expplaw['PivotEnergy'].value(pivot_out_pl)
                    expplaw['CutoffEnergy'].value(80e3)

                    if key == "onoff":
                        selected_data.models()[0].name(src_name)
                        selected_data.models()[0].tscalc(True)
                        selected_data.models()[0].spectral(expplaw.copy())

                        like = ctools.ctlike(selected_data)
                        like['refit'] = True
                        like.run()
                        ts = like.obs().models()[0].ts()
                        if ts > 0:
                            sqrt_ts_like_onoff = np.sqrt(like.obs().models()[0].ts())
                        else:
                            sqrt_ts_like_onoff = 0

                        del like

                    if key == "std":
                        models_fit_ctlike = gammalib.GModels()

                        # create test source
                        src_dir = gammalib.GSkyDir()
                        src_dir.radec_deg(0, 0.5)
                        spatial = gammalib.GModelSpatialPointSource(src_dir)

                        # append spatial and spectral models
                        model_src = gammalib.GModelSky(spatial, expplaw.copy())
                        model_src.name('Source_fit')
                        model_src.tscalc(True)
                        models_fit_ctlike.append(model_src)

                        # create and append background
                        back_model = gammalib.GCTAModelIrfBackground()
                        back_model.instruments('CTA')
                        back_model.name('Background')
                        spectral_back = gammalib.GModelSpectralPlaw()
                        spectral_back['Prefactor'].value(1.0)
                        spectral_back['Prefactor'].scale(1.0)
                        spectral_back['Index'].value(0)
                        spectral_back['PivotEnergy'].value(300000)
                        spectral_back['PivotEnergy'].scale(1e6)
                        back_model.spectral(spectral_back)
                        models_fit_ctlike.append(back_model)

                        # save models
                        input_ctlike_xml = "model_GRB_fit_ctlike_in.xml"
                        models_fit_ctlike.save(input_ctlike_xml)
                        del models_fit_ctlike

                        like = ctools.ctlike(selected_data)
                        like['inmodel'] = input_ctlike_xml
                        like['refit'] = True
                        like.run()
                        ts = like.obs().models()[0].ts()
                        if ts > 0:
                            sqrt_ts_like_std = np.sqrt(like.obs().models()[0].ts())
                        else:
                            sqrt_ts_like_std = 0

                        del like

                    # E_cut_off = like.obs().models()[0]['CutoffEnergy'].value()
                    # E_cut_off_error = like.obs().models()[0]['CutoffEnergy'].error()

                    # print(f"sqrt(TS) {key}: {np.sqrt(ts_like):.2f}")
                    # print(f"E_cut_off {key}: {E_cut_off:.2f} +- {E_cut_off_error:.2f}")
                del dict_pl_ctlike_out

            f.write(f"{src_name},{seed},{t_in:.2f},{t_end:.2f},{sigma_onoff:.2f},{sqrt_ts_like_onoff:.2f},{sqrt_ts_like_std:.2f}\n")
            del dict_obs_select_time
            del select_time
示例#50
0
def run_pipeline(obs, ra=83.63, dec=22.01, emin=0.1, emax=100.0,
                 enumbins=20, nxpix=200, nypix=200, binsz=0.02,
                 coordsys="CEL", proj="CAR",
                 model="${CTOOLS}/share/models/crab.xml",
                 caldb="prod2", irf="South_50h",
                 debug=False):
    """
    Simulation and stacked analysis pipeline.

    Keywords:
     ra       - RA of cube centre [deg] (default: 83.6331)
     dec      - DEC of cube centre [deg] (default: 22.0145)
     emin     - Minimum energy of cube [TeV] (default: 0.1)
     emax     - Maximum energy of cube [TeV] (default: 100.0)
     enumbins - Number of energy bins in cube (default: 20)
     nxpix    - Number of RA pixels in cube (default: 200)
     nypix    - Number of DEC pixels in cube (default: 200)
     binsz    - Spatial cube bin size [deg] (default: 0.02)
     coordsys - Cube coordinate system (CEL or GAL)
     proj     - Cube World Coordinate System (WCS) projection
     debug    - Enable debugging (default: False)
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"]     = debug
    sim["outevents"] = "obs.xml"
    sim.execute()

    # Bin events into counts map
    bin = ctools.ctbin()
    bin["inobs"]    = "obs.xml"
    bin["outcube"]  = "cntcube.fits"
    bin["ebinalg"]  = "LOG"
    bin["emin"]     = emin
    bin["emax"]     = emax
    bin["enumbins"] = enumbins
    bin["nxpix"]    = nxpix
    bin["nypix"]    = nypix
    bin["binsz"]    = binsz
    bin["coordsys"] = coordsys
    bin["proj"]     = proj
    bin["xref"]     = ra
    bin["yref"]     = dec
    bin["debug"]    = debug
    bin.execute()

    # Create exposure cube
    expcube = ctools.ctexpcube()
    expcube["inobs"]    = "obs.xml"
    expcube["incube"]   = "cntcube.fits"
    expcube["outcube"]  = "expcube.fits"
    expcube["caldb"]    = caldb
    expcube["irf"]      = irf
    expcube["ebinalg"]  = "LOG"
    expcube["emin"]     = emin
    expcube["emax"]     = emax
    expcube["enumbins"] = enumbins
    expcube["nxpix"]    = nxpix
    expcube["nypix"]    = nypix
    expcube["binsz"]    = binsz
    expcube["coordsys"] = coordsys
    expcube["proj"]     = proj
    expcube["xref"]     = ra
    expcube["yref"]     = dec
    expcube["debug"]    = debug
    expcube.execute()

    # Create PSF cube
    psfcube = ctools.ctpsfcube()
    psfcube["inobs"]    = "obs.xml"
    psfcube["incube"]   = "NONE"
    psfcube["outcube"]  = "psfcube.fits"
    psfcube["caldb"]    = caldb
    psfcube["irf"]      = irf
    psfcube["ebinalg"]  = "LOG"
    psfcube["emin"]     = emin
    psfcube["emax"]     = emax
    psfcube["enumbins"] = enumbins
    psfcube["nxpix"]    = 10
    psfcube["nypix"]    = 10
    psfcube["binsz"]    = 1.0
    psfcube["coordsys"] = coordsys
    psfcube["proj"]     = proj
    psfcube["xref"]     = ra
    psfcube["yref"]     = dec
    psfcube["debug"]    = debug
    psfcube.execute()

    # Create background cube
    bkgcube = ctools.ctbkgcube()
    bkgcube["inobs"]    = "obs.xml"
    bkgcube["inmodel"]  = model
    bkgcube["incube"]   = "cntcube.fits"
    bkgcube["outcube"]  = "bkgcube.fits"
    bkgcube["outmodel"] = "model_bkg.xml"
    bkgcube["caldb"]    = caldb
    bkgcube["irf"]      = irf
    bkgcube["ebinalg"]  = "LOG"
    bkgcube["emin"]     = emin
    bkgcube["emax"]     = emax
    bkgcube["enumbins"] = enumbins
    bkgcube["nxpix"]    = 10
    bkgcube["nypix"]    = 10
    bkgcube["binsz"]    = 1.0
    bkgcube["coordsys"] = coordsys
    bkgcube["proj"]     = proj
    bkgcube["xref"]     = ra
    bkgcube["yref"]     = dec
    bkgcube["debug"]    = debug
    bkgcube.execute()

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like["inobs"]    = "cntcube.fits"
    like["inmodel"]  = "model_bkg.xml"
    like["outmodel"] = "fit_results.xml"
    like["expcube"]  = "expcube.fits"
    like["psfcube"]  = "psfcube.fits"
    like["bkgcube"]  = "bkgcube.fits"
    like["caldb"]    = caldb
    like["irf"]      = irf
    like["debug"]    = True # Switch this always on for results in console
    like.execute()

    # Return
    return
示例#51
0
def stacked_pipeline(duration):
    """
    Cube-style analysis pipeline.
    """
    # Set script parameters
    model_name  = "${CTOOLS}/share/models/crab.xml"
    caldb       = "prod2"
    irf         = "South_50h"
    ra          =   83.63
    dec         =   22.01
    rad_sim     =   10.0
    tstart      =    0.0
    tstop       = duration
    emin        =    0.1
    emax        =  100.0
    enumbins    =   20
    nxpix       =  200
    nypix       =  200
    binsz       =    0.02
    coordsys    = "CEL"
    proj        = "CAR"

    # Get start CPU time
    tstart = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"].filename(model_name)
    sim["caldb"].string(caldb)
    sim["irf"].string(irf)
    sim["ra"].real(ra)
    sim["dec"].real(dec)
    sim["rad"].real(rad_sim)
    sim["tmin"].real(tstart)
    sim["tmax"].real(tstop)
    sim["emin"].real(emin)
    sim["emax"].real(emax)
    sim.run()

    # Bin events into counts map
    bin = ctools.ctbin(sim.obs())
    bin["ebinalg"].string("LOG")
    bin["emin"].real(emin)
    bin["emax"].real(emax)
    bin["enumbins"].integer(enumbins)
    bin["nxpix"].integer(nxpix)
    bin["nypix"].integer(nypix)
    bin["binsz"].real(binsz)
    bin["coordsys"].string(coordsys)
    bin["proj"].string(proj)
    bin["xref"].real(ra)
    bin["yref"].real(dec)
    bin.run()

    # Create exposure cube
    expcube = ctools.ctexpcube(sim.obs())
    expcube["incube"].filename("NONE")
    expcube["caldb"].string(caldb)
    expcube["irf"].string(irf)
    expcube["ebinalg"].string("LOG")
    expcube["emin"].real(emin)
    expcube["emax"].real(emax)
    expcube["enumbins"].integer(enumbins)
    expcube["nxpix"].integer(nxpix)
    expcube["nypix"].integer(nypix)
    expcube["binsz"].real(binsz)
    expcube["coordsys"].string(coordsys)
    expcube["proj"].string(proj)
    expcube["xref"].real(ra)
    expcube["yref"].real(dec)
    expcube.run()

    # Create PSF cube
    psfcube = ctools.ctpsfcube(sim.obs())
    psfcube["incube"].filename("NONE")
    psfcube["caldb"].string(caldb)
    psfcube["irf"].string(irf)
    psfcube["ebinalg"].string("LOG")
    psfcube["emin"].real(emin)
    psfcube["emax"].real(emax)
    psfcube["enumbins"].integer(enumbins)
    psfcube["nxpix"].integer(10)
    psfcube["nypix"].integer(10)
    psfcube["binsz"].real(1.0)
    psfcube["coordsys"].string(coordsys)
    psfcube["proj"].string(proj)
    psfcube["xref"].real(ra)
    psfcube["yref"].real(dec)
    psfcube.run()

    # Create background cube
    bkgcube = ctools.ctbkgcube(sim.obs())
    bkgcube["incube"].filename("NONE")
    bkgcube["ebinalg"].string("LOG")
    bkgcube["emin"].real(emin)
    bkgcube["emax"].real(emax)
    bkgcube["enumbins"].integer(enumbins)
    bkgcube["nxpix"].integer(10)
    bkgcube["nypix"].integer(10)
    bkgcube["binsz"].real(1.0)
    bkgcube["coordsys"].string(coordsys)
    bkgcube["proj"].string(proj)
    bkgcube["xref"].real(ra)
    bkgcube["yref"].real(dec)
    bkgcube.run()

    # Attach background model to observation container
    bin.obs().models(bkgcube.models())

    # Set Exposure and Psf cube for first CTA observation
    # (ctbin will create an observation with a single container)
    bin.obs()[0].response(expcube.expcube(), psfcube.psfcube(), bkgcube.bkgcube())

    # Get ctlike start CPU time
    tctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(bin.obs())
    like.run()

    # Get stop CPU time
    tstop    = time.clock()
    telapsed = tstop - tstart
    tctlike  = tstop - tctlike
	
    # Return
    return telapsed, tctlike
示例#52
0
文件: csspec.py 项目: ctools/ctools
    def _fit_model(self):
        """
        Fit model to observations

        Returns
        -------
        results : list of dict
            List of dictionaries with fit results
        """
        # Write header
        self._log_header1(gammalib.TERSE, 'Generate spectrum')

        # Write header for fitting
        self._log_header3(gammalib.EXPLICIT, 'Performing fit')

        # Perform maximum likelihood fit
        like          = ctools.ctlike(self.obs())
        like['edisp'] = self['edisp'].boolean()
        like.run()

        # Initialise fit results
        results = []

        # Extract fit results
        model    = like.obs().models()[self['srcname'].string()]
        spectrum = model.spectral()
        ts       = model.ts()

        # Loop over all nodes
        for i in range(spectrum.nodes()):

            # Get energy boundaries
            emin      = self._ebounds.emin(i)
            emax      = self._ebounds.emax(i)
            elogmean  = self._ebounds.elogmean(i)

            # Initialise dictionary
            result = {'energy':      elogmean.TeV(),
                      'energy_low':  (elogmean - emin).TeV(),
                      'energy_high': (emax - elogmean).TeV(),
                      'flux':        0.0,
                      'flux_err':    0.0,
                      'TS':          0.0,
                      'ulimit':      0.0,
                      'Npred':       0.0}

            # Convert differential flux and flux error to nuFnu
            norm               = elogmean.MeV() * elogmean.MeV()  * gammalib.MeV2erg
            result['flux']     = spectrum[i*2+1].value() * norm
            result['flux_err'] = spectrum[i*2+1].error() * norm

            # Compute TS
            if self['calc_ts'].boolean():

                # Copy observation container
                obs = like.obs().copy()

                # Set intensity of node to tiny value
                par = obs.models()[self['srcname'].string()].spectral()[i*2+1]
                value = 1.0e-30 * par.value()
                if par.min() > value:
                    par.min(value)
                par.value(value)
                par.fix()

                # Perform maximum likelihood fit
                tslike          = ctools.ctlike(obs)
                tslike['edisp'] = self['edisp'].boolean()
                tslike.run()

                # Store Test Statistic
                model        = tslike.obs().models()[self['srcname'].string()]
                result['TS'] = ts - model.ts()

            # Log information
            value = '%e +/- %e' % (result['flux'], result['flux_err'])
            if self['calc_ulim'].boolean() and result['ulimit'] > 0.0:
                value += ' [< %e]' % (result['ulimit'])
            value += ' erg/cm2/s'
            if self['calc_ts'].boolean() and result['TS'] > 0.0:
                value += ' (TS = %.3f)' % (result['TS'])
            self._log_value(gammalib.TERSE, 'Bin '+str(i+1), value)

            # Append results
            results.append(result)

        # Return results
        return results
示例#53
0
def stackedPipeline(
    name="Crab",
    obsfile="index.xml",
    l=0.01,
    b=0.01,
    emin=0.1,
    emax=100.0,
    enumbins=20,
    nxpix=200,
    nypix=200,
    binsz=0.02,
    coordsys="CEL",
    proj="CAR",
    caldb="prod2",
    irf="acdc1a",
    debug=False,
    inmodel="Crab",
    outmodel="results",
):
    """
    Simulation and stacked analysis pipeline
    
    Parameters
    ----------
    obs : `~gammalib.GObservations`
    Observation container
    ra : float, optional
    Right Ascension of counts cube centre (deg)
    dec : float, optional
    Declination of Region of counts cube centre (deg)
    emin : float, optional
    Minimum energy (TeV)
    emax : float, optional
    Maximum energy (TeV)
    enumbins : int, optional
    Number of energy bins
    nxpix : int, optional
    Number of pixels in X axis
    nypix : int, optional
    Number of pixels in Y axis
    binsz : float, optional
    Pixel size (deg)
    coordsys : str, optional
    Coordinate system
    proj : str, optional
    Coordinate projection
    debug : bool, optional
    Debug function
    """

    # Bin events into counts map
    bin = ctools.ctbin()
    bin["inobs"] = obsfile
    bin["ebinalg"] = "LOG"
    bin["emin"] = emin
    bin["emax"] = emax
    bin["enumbins"] = enumbins
    bin["nxpix"] = nxpix
    bin["nypix"] = nypix
    bin["binsz"] = binsz
    bin["coordsys"] = coordsys
    bin["proj"] = proj
    bin["xref"] = l
    bin["yref"] = b
    bin["debug"] = debug
    bin["outobs"] = "cntcube.fits"
    bin.execute()
    print("Datacube : done!")

    # Create exposure cube
    expcube = ctools.ctexpcube()
    # expcube['incube']=bin.obs()
    expcube["inobs"] = obsfile
    expcube["incube"] = "NONE"
    expcube["ebinalg"] = "LOG"
    expcube["caldb"] = caldb
    expcube["irf"] = irf
    expcube["emin"] = emin
    expcube["emax"] = emax
    expcube["enumbins"] = enumbins
    expcube["nxpix"] = nxpix
    expcube["nypix"] = nypix
    expcube["binsz"] = binsz
    expcube["coordsys"] = coordsys
    expcube["proj"] = proj
    expcube["xref"] = l
    expcube["yref"] = b
    expcube["debug"] = debug
    expcube["outcube"] = "cube_exp.fits"
    expcube.execute()
    print("Expcube : done!")

    # Create PSF cube
    psfcube = ctools.ctpsfcube()
    psfcube["inobs"] = obsfile
    psfcube["incube"] = "NONE"
    psfcube["ebinalg"] = "LOG"
    psfcube["caldb"] = caldb
    psfcube["irf"] = irf
    psfcube["emin"] = emin
    psfcube["emax"] = emax
    psfcube["enumbins"] = enumbins
    psfcube["nxpix"] = 10
    psfcube["nypix"] = 10
    psfcube["binsz"] = 1.0
    psfcube["coordsys"] = coordsys
    psfcube["proj"] = proj
    psfcube["xref"] = l
    psfcube["yref"] = b
    psfcube["debug"] = debug
    psfcube["outcube"] = "psf_cube.fits"
    psfcube.execute()
    print("Psfcube : done!")

    edispcube = ctools.ctedispcube()
    edispcube["inobs"] = obsfile
    edispcube["ebinalg"] = "LOG"
    edispcube["incube"] = "NONE"
    edispcube["caldb"] = caldb
    edispcube["irf"] = irf
    edispcube["xref"] = l
    edispcube["yref"] = b
    edispcube["proj"] = proj
    edispcube["coordsys"] = coordsys
    edispcube["binsz"] = 1.0
    edispcube["nxpix"] = 10
    edispcube["nypix"] = 10
    edispcube["emin"] = emin
    edispcube["emax"] = emax
    edispcube["enumbins"] = enumbins
    edispcube["outcube"] = "edisp_cube.fits"
    edispcube["debug"] = debug
    edispcube.execute()
    print("Edispcube : done!")

    # Create background cube
    bkgcube = ctools.ctbkgcube()
    bkgcube["inobs"] = obsfile
    bkgcube["incube"] = "cntcube.fits"
    bkgcube["caldb"] = caldb
    bkgcube["irf"] = irf
    bkgcube["debug"] = debug
    bkgcube["inmodel"] = str(inmodel)
    bkgcube["outcube"] = "bkg_cube.fits"
    bkgcube["outmodel"] = "bkg_cube.xml"
    bkgcube.execute()
    print("Bkgcube : done!")

    # Fix the instrumental background parameters
    bkgcube.models()["BackgroundModel"]["Prefactor"].fix()
    bkgcube.models()["BackgroundModel"]["Index"].fix()
    #
    #    # Attach background model to observation container
    bin.obs().models(bkgcube.models())
    #
    #
    #    # Set Exposure and Psf cube for first CTA observation
    #    # (ctbin will create an observation with a single container)
    bin.obs()[0].response(expcube.expcube(), psfcube.psfcube(),
                          edispcube.edispcube(), bkgcube.bkgcube())

    # Perform maximum likelihood fitting
    like = ctools.ctlike(bin.obs())
    #    like['inmodel']='bkg_cube.xml'
    like["edisp"] = True
    #    like['edispcube'] = 'edisp_cube.fits'
    #    like['expcube'] = 'cube_exp.fits'
    #    like['psfcube'] = 'psf_cube.fits'
    #    like['bkgcube'] = 'bkg_cube.fits'
    like["outmodel"] = str(outmodel)
    #    like['outcovmat']=inmodel+'_covmat.txt'
    like["debug"] = debug  # Switch this always on for results in console
    # like['statistic']='CSTAT'
    like.execute()
    print("Likelihood : done!")

    # Set the best-fit models (from ctlike) for the counts cube
    bin.obs().models(like.obs().models())

    # Obtain the best-fit butterfly
    try:
        butterfly = ctools.ctbutterfly(bin.obs())
        butterfly["srcname"] = name
        butterfly["inmodel"] = str(outmodel)
        butterfly["edisp"] = True
        butterfly["emin"] = emin
        butterfly["emax"] = emax
        butterfly["outfile"] = str(outmodel.parent) + "/" + str(
            outmodel.stem) + ".txt"
        butterfly[
            "debug"] = debug  # Switch this always on for results in console
        # like['statistic']='CSTAT'
        butterfly.execute()
        print("Butterfly : done!")
    except:
        print("I COULDN'T CALCULATE THE BUTTERFLY....")

    # Extract the spectrum
    try:
        csspec = cscripts.csspec(bin.obs())
        csspec["srcname"] = name
        csspec["inmodel"] = str(outmodel)
        csspec["method"] = "AUTO"
        csspec["ebinalg"] = "LOG"
        csspec["emin"] = emin
        csspec["emax"] = emax
        csspec["enumbins"] = 10
        csspec["edisp"] = True
        csspec["outfile"] = str(outmodel.parent) + "/" + str(
            outmodel.stem) + ".fits"
        csspec["debug"] = debug  # Switch this always on for results in console
        csspec.execute()
        print("Csspec : done!")
    except:
        print("I COULDN'T CALCULATE THE SPECTRUM....")

    # Return
    return
示例#54
0
def binned_pipeline(duration):
    """
    Binned analysis pipeline.
    """
    # Set script parameters
    model_name  = "${CTOOLS}/share/models/crab.xml"
    caldb       = "prod2"
    irf         = "South_50h"
    ra          =   83.63
    dec         =   22.01
    rad_sim     =   10.0
    tstart      =    0.0
    tstop       = duration
    emin        =    0.1
    emax        =  100.0
    enumbins    =   20
    nxpix       =  200
    nypix       =  200
    binsz       =    0.02
    coordsys    = "CEL"
    proj        = "CAR"

    # Get start CPU time
    tstart = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"].filename(model_name)
    sim["caldb"].string(caldb)
    sim["irf"].string(irf)
    sim["ra"].real(ra)
    sim["dec"].real(dec)
    sim["rad"].real(rad_sim)
    sim["tmin"].real(tstart)
    sim["tmax"].real(tstop)
    sim["emin"].real(emin)
    sim["emax"].real(emax)
    sim.run()

    # Bin events into counts map
    bin = ctools.ctbin(sim.obs())
    bin["ebinalg"].string("LOG")
    bin["emin"].real(emin)
    bin["emax"].real(emax)
    bin["enumbins"].integer(enumbins)
    bin["nxpix"].integer(nxpix)
    bin["nypix"].integer(nypix)
    bin["binsz"].real(binsz)
    bin["coordsys"].string(coordsys)
    bin["xref"].real(ra)
    bin["yref"].real(dec)
    bin["proj"].string(proj)
    bin.run()

    # Get ctlike start CPU time
    tctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(bin.obs())
    like.run()

    # Get stop CPU time
    tstop    = time.clock()
    telapsed = tstop - tstart
    tctlike  = tstop - tctlike
	
    # Return
    return telapsed, tctlike
示例#55
0
import gammalib
import ctools
import cscripts

debug = True

like = ctools.ctlike()
like['inobs'] = 'cntcube.fits'
like['expcube'] = 'expcube.fits'
like['psfcube'] = 'psfcube.fits'
like['bkgcube'] = 'bkgcube.fits'
like['inmodel'] = 'models_cat_iem+fb+ts.xml'
like['outmodel'] = 'results_cat_iem+fb+ts.xml'
like["debug"] = debug
like.execute()

resmap = cscripts.csresmap()
resmap['inobs'] = 'cntcube.fits'
resmap['expcube'] = 'expcube.fits'
resmap['psfcube'] = 'psfcube.fits'
resmap['bkgcube'] = 'bkgcube.fits'
resmap['inmodel'] = 'results_cat_iem+fb+ts.xml'
resmap['modcube'] = 'NONE'
resmap['outmap'] = 'resmap_cat_iem+fb+ts.fits'
resmap['algorithm'] = 'SIGNIFICANCE'
resmap["debug"] = debug
resmap.execute()
示例#56
0
    def _test_python(self):
        """
        Test ctulimit from Python
        """
        # Allocate ctulimit
        ulimit = ctools.ctulimit()

        # Check that empty ctulimit tool holds zero upper limits
        self.test_value(ulimit.diff_ulimit(), 0.0, 1.0e-21,
                        'Check differential upper limit')
        self.test_value(ulimit.flux_ulimit(), 0.0, 1.0e-16,
                        'Check upper limit on photon flux')
        self.test_value(ulimit.eflux_ulimit(), 0.0, 1.0e-16,
                        'Check upper limit on energy flux')

        # Check that saving does not nothing
        ulimit['logfile'] = 'ctulimit_py0.log'
        ulimit.logFileOpen()
        ulimit.save()

        # Check that clearing does not lead to an exception or segfault
        ulimit.clear()

        # Now set ctulimit parameters
        ulimit['inobs']   = self._events
        ulimit['inmodel'] = self._model
        ulimit['srcname'] = 'Crab'
        ulimit['caldb']   = self._caldb
        ulimit['irf']     = self._irf
        ulimit['tol']     = 0.1
        ulimit['logfile'] = 'ctulimit_py1.log'
        ulimit['chatter'] = 2

        # Run ctulimit tool
        ulimit.logFileOpen()   # Make sure we get a log file
        ulimit.run()
        ulimit.save()

        # Set reference value
        ref_diff  = 2.75359655874408e-17
        ref_flux  = 1.66942609234064e-11
        ref_eflux = 6.4588860064421e-11

        # Check results
        self.test_value(ulimit.diff_ulimit(), ref_diff, 1.0e-21,
                        'Check differential upper limit')
        self.test_value(ulimit.flux_ulimit(), ref_flux, 1.0e-16,
                        'Check upper limit on photon flux')
        self.test_value(ulimit.eflux_ulimit(), ref_eflux, 1.0e-16,
                        'Check upper limit on energy flux')

        # Check obs() method
        self.test_value(ulimit.obs().size(), 1,
                        'Check number of observations in container')

        # Check opt() method
        self.test_value(ulimit.opt().status(), 0, 'Check optimizer status')

        # Copy ctulimit tool
        cpy_ulimit = ulimit.copy()

        # Check results of copy
        self.test_value(cpy_ulimit.diff_ulimit(), ref_diff, 1.0e-21,
                        'Check differential upper limit')
        self.test_value(cpy_ulimit.flux_ulimit(), ref_flux, 1.0e-16,
                        'Check upper limit on photon flux')
        self.test_value(cpy_ulimit.eflux_ulimit(), ref_eflux, 1.0e-16,
                        'Check upper limit on energy flux')

        # Check results
        self.test_value(cpy_ulimit.diff_ulimit(), ref_diff, 1.0e-21,
                        'Check differential upper limit')
        self.test_value(cpy_ulimit.flux_ulimit(), ref_flux, 1.0e-16,
                        'Check upper limit on photon flux')
        self.test_value(cpy_ulimit.eflux_ulimit(), ref_eflux, 1.0e-16,
                        'Check upper limit on energy flux')

        # Now clear copy of ctulimit tool
        cpy_ulimit.clear()

        # Check that empty ctulimit tool holds zero upper limits
        self.test_value(cpy_ulimit.diff_ulimit(), 0.0, 1.0e-21,
                        'Check differential upper limit')
        self.test_value(cpy_ulimit.flux_ulimit(), 0.0, 1.0e-16,
                        'Check upper limit on photon flux')
        self.test_value(cpy_ulimit.eflux_ulimit(), 0.0, 1.0e-16,
                        'Check upper limit on energy flux')

        # Run ctlike to get an initial log-likelihood solution
        like = ctools.ctlike()
        like['inobs']    = self._events
        like['inmodel']  = self._model
        like['caldb']    = self._caldb
        like['irf']      = self._irf
        like.run()

        # Now set ctulimit tool using the observation container from the
        # previous run. This should avoid the necessity to recompute the
        # maximum likelihood
        ulimit = ctools.ctulimit(like.obs())
        ulimit['srcname'] = 'Crab'
        ulimit['tol']     = 0.1
        ulimit['logfile'] = 'ctulimit_py2.log'
        ulimit['chatter'] = 3

        # Execute ctulimit tool
        ulimit.logFileOpen()  # Needed to get a new log file
        ulimit.execute()

        # Check results
        self.test_value(ulimit.diff_ulimit(), ref_diff, 1.0e-21,
                        'Check differential upper limit')
        self.test_value(ulimit.flux_ulimit(), ref_flux, 1.0e-16,
                        'Check upper limit on photon flux')
        self.test_value(ulimit.eflux_ulimit(), ref_eflux, 1.0e-16,
                        'Check upper limit on energy flux')

        # Test invalid model name
        ulimit['srcname'] = 'Weihnachtsstern'
        ulimit['logfile'] = 'ctulimit_py3.log'
        ulimit.logFileOpen()
        self.test_try('Test invalid model name')
        try:
            ulimit.execute()
            self.test_try_failure('Exception not thrown')
        except ValueError:
            self.test_try_success()

        # Test specification of background model
        ulimit['srcname'] = 'Background'
        ulimit['logfile'] = 'ctulimit_py4.log'
        ulimit.logFileOpen()
        self.test_try('Test invalid model name')
        try:
            ulimit.execute()
            self.test_try_failure('Exception not thrown')
        except ValueError:
            self.test_try_success()

        # Test run with too few iterations
        ulimit['srcname']  = 'Crab'
        ulimit['max_iter'] = 1
        ulimit['logfile']  = 'ctulimit_py5.log'
        ulimit.logFileOpen()
        self.test_try('Test ctulimit with too few iterations')
        try:
            ulimit.execute()
            self.test_try_failure('Exception not thrown')
        except ValueError:
            self.test_try_success()

        # Return
        return
示例#57
0
def stacked_pipeline(model_name, duration):
    """
    Stacked analysis pipeline.
    """
    # Set script parameters
    caldb       = "prod2"
    irf         = "South_50h"
    ra          =   83.63
    dec         =   22.01
    rad_sim     =   10.0
    tstart      =    0.0
    tstop       = duration
    emin        =    0.1
    emax        =  100.0
    enumbins    =   40
    nxpix       =  200
    nypix       =  200
    binsz       =    0.02
    coordsys    = "CEL"
    proj        = "CAR"

    # Get start CPU time
    cpu_start = time.clock()

    # Simulate events
    sim = ctools.ctobssim()
    sim["inmodel"] = model_name
    sim["caldb"]   = caldb
    sim["irf"]     = irf
    sim["ra"]      = ra
    sim["dec"]     = dec
    sim["rad"]     = rad_sim
    sim["tmin"]    = tstart
    sim["tmax"]    = tstop
    sim["emin"]    = emin
    sim["emax"]    = emax
    sim.run()

    # Bin events into counts map
    bin = ctools.ctbin(sim.obs())
    bin["ebinalg"]  = "LOG"
    bin["emin"]     = emin
    bin["emax"]     = emax
    bin["enumbins"] = enumbins
    bin["nxpix"]    = nxpix
    bin["nypix"]    = nypix
    bin["binsz"]    = binsz
    bin["coordsys"] = coordsys
    bin["proj"]     = proj
    bin["xref"]     = ra
    bin["yref"]     = dec
    bin.run()

    # Create exposure cube
    expcube = ctools.ctexpcube(sim.obs())
    expcube["incube"]   = "NONE"
    expcube["caldb"]    = caldb
    expcube["irf"]      = irf
    expcube["ebinalg"]  = "LOG"
    expcube["emin"]     = emin
    expcube["emax"]     = emax
    expcube["enumbins"] = enumbins
    expcube["nxpix"]    = nxpix
    expcube["nypix"]    = nypix
    expcube["binsz"]    = binsz
    expcube["coordsys"] = coordsys
    expcube["proj"]     = proj
    expcube["xref"]     = ra
    expcube["yref"]     = dec
    expcube.run()

    # Create PSF cube
    psfcube = ctools.ctpsfcube(sim.obs())
    psfcube["incube"]   = "NONE"
    psfcube["caldb"]    = caldb
    psfcube["irf"]      = irf
    psfcube["ebinalg"]  = "LOG"
    psfcube["emin"]     = emin
    psfcube["emax"]     = emax
    psfcube["enumbins"] = enumbins
    psfcube["nxpix"]    = 10
    psfcube["nypix"]    = 10
    psfcube["binsz"]    = 1.0
    psfcube["coordsys"] = coordsys
    psfcube["proj"]     = proj
    psfcube["xref"]     = ra
    psfcube["yref"]     = dec
    psfcube.run()

    # Create background cube
    bkgcube = ctools.ctbkgcube(sim.obs())
    bkgcube["incube"]   = "NONE"
    bkgcube["ebinalg"]  = "LOG"
    bkgcube["emin"]     = emin
    bkgcube["emax"]     = emax
    bkgcube["enumbins"] = enumbins
    bkgcube["nxpix"]    = 10
    bkgcube["nypix"]    = 10
    bkgcube["binsz"]    = 1.0
    bkgcube["coordsys"] = coordsys
    bkgcube["proj"]     = proj
    bkgcube["xref"]     = ra
    bkgcube["yref"]     = dec
    bkgcube.run()

    # Attach background model to observation container
    bin.obs().models(bkgcube.models())

    # Set Exposure and Psf cube for first CTA observation
    # (ctbin will create an observation with a single container)
    bin.obs()[0].response(expcube.expcube(), psfcube.psfcube(), bkgcube.bkgcube())

    # Get ctlike start CPU time
    cpu_ctlike = time.clock()

    # Perform maximum likelihood fitting
    like = ctools.ctlike(bin.obs())
    like.run()

    # Get stop CPU time and compute elapsed times
    cpu_stop    = time.clock()
    cpu_elapsed = cpu_stop - cpu_start
    cpu_ctlike  = cpu_stop - cpu_ctlike

    # Return
    return cpu_elapsed, cpu_ctlike
示例#58
0
    def _phase_bin(self, phbin):

        # Write time bin into header
        self._log_header2(gammalib.TERSE,
                          'PHASE %f - %f' % (phbin[0], phbin[1]))

        # Select events
        select = ctools.ctselect(self.obs().copy())
        select['emin'] = self['emin'].real()
        select['emax'] = self['emax'].real()
        select['tmin'] = 'UNDEFINED'
        select['tmax'] = 'UNDEFINED'
        select['rad'] = 'UNDEFINED'
        select['ra'] = 'UNDEFINED'
        select['dec'] = 'UNDEFINED'
        select['expr'] = 'PHASE>' + str(phbin[0]) + ' && PHASE<' + str(
            phbin[1])
        select.run()

        # Set phase string
        phstr = str(phbin[0]) + '-' + str(phbin[1])

        # Add phase to observation id
        for i in range(0, select.obs().size()):
            oldid = select.obs()[i].id()
            select.obs()[i].id(oldid + '_' + phstr)
        obs = select.obs()

        # If an On/Off analysis is requested generate the On/Off observations
        if self._onoff:
            obs = obsutils.get_onoff_obs(self, select.obs(), nthreads=1)

        # ... otherwise, if stacked analysis is requested then bin the
        # events and compute the stacked response functions and setup
        # an observation container with a single stacked observation.
        elif self._stacked:
            obs = obsutils.get_stacked_obs(self, select.obs())

        # Header
        self._log_header3(gammalib.EXPLICIT, 'Fitting the data')

        # The On/Off analysis can produce empty observation containers,
        # e.g., when on-axis observations are used. To avoid ctlike asking
        # for a new observation container (or hang, if in interactive mode)
        # we'll run ctlike only if the size is >0
        if obs.size() > 0:

            # Do maximum likelihood model fitting
            like = ctools.ctlike(obs)
            like['edisp'] = self['edisp'].boolean()
            like['nthreads'] = 1  # Avoids OpenMP conflict
            like.run()

            # Renormalize models to phase selection
            # TODO move the scaling from the temporal to the spectral component
            for model in like.obs().models():
                scaled_norm = model['Normalization'].value() / (phbin[1] -
                                                                phbin[0])
                model['Normalization'].value(scaled_norm)

            # Store fit model
            fitmodels = like.obs().models().copy()

        # ... otherwise we set an empty model container
        else:
            self._log_string(
                gammalib.TERSE, 'PHASE %f - %f: no observations available'
                ' for fitting' % (phbin[0], phbin[1]))

            # Set empty models container
            fitmodels = gammalib.GModels()

        # Set results
        result = {'phstr': phstr, 'fitmodels': fitmodels}

        # Return results
        return result
示例#59
0
def run_pipeline(obs, emin=0.1, emax=100.0,
                 enumbins=20, nxpix=200, nypix=200, binsz=0.02,
                 coordsys='CEL', proj='CAR',
                 model='data/crab.xml',
                 caldb='prod2', irf='South_0.5h',
                 debug=False):
    """
    Simulation and binned analysis pipeline

    Parameters
    ----------
    obs : `~gammalib.GObservations`
        Observation container
    emin : float, optional
        Minimum energy (TeV)
    emax : float, optional
        Maximum energy (TeV)
    enumbins : int, optional
        Number of energy bins
    nxpix : int, optional
        Number of pixels in X axis
    nypix : int, optional
        Number of pixels in Y axis
    binsz : float, optional
        Pixel size (deg)
    coordsys : str, optional
        Coordinate system
    proj : str, optional
        Coordinate projection
    model : str, optional
        Model definition XML file
    caldb : str, optional
        Calibration database path
    irf : str, optional
        Instrument response function
    debug : bool, optional
        Debug function
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim['debug']     = debug
    sim['outevents'] = 'obs.xml'
    sim.execute()

    # Bin events by looping over all observations in the container
    sim_obs = gammalib.GObservations('obs.xml')
    obs     = gammalib.GObservations()
    for run in sim_obs:

        # Get event filename and set counts cube filename
        eventfile = run.eventfile().url()
        cubefile  = 'cube_'+eventfile

        # Bin events for that observation
        bin = ctools.ctbin()
        bin['inobs']    = eventfile
        bin['outcube']  = cubefile
        bin['ebinalg']  = 'LOG'
        bin['emin']     = emin
        bin['emax']     = emax
        bin['enumbins'] = enumbins
        bin['nxpix']    = nxpix
        bin['nypix']    = nypix
        bin['binsz']    = binsz
        bin['coordsys'] = coordsys
        bin['usepnt']   = True
        bin['proj']     = proj
        bin.execute()

        # Set observation ID
        bin.obs()[0].id(cubefile)
        bin.obs()[0].eventfile(cubefile)

        # Append result to observations
        obs.extend(bin.obs())

    # Save XML file
    xml = gammalib.GXml()
    obs.write(xml)
    xml.save('obs_cube.xml')

    # Perform maximum likelihood fitting
    like = ctools.ctlike()
    like['inobs']    = 'obs_cube.xml'
    like['inmodel']  = model
    like['outmodel'] = 'fit_results.xml'
    like['expcube']  = 'NONE'
    like['psfcube']  = 'NONE'
    like['bkgcube']  = 'NONE'
    like['caldb']    = caldb
    like['irf']      = irf
    like['debug']    = True # Switch this always on for results in console
    like.execute()

    # Return
    return
示例#60
0
def run_pipeline(obs, ra=83.63, dec=22.01, emin=0.1, emax=100.0,
                 enumbins=20, nxpix=200, nypix=200, binsz=0.02,
                 coordsys="CEL", proj="CAR", debug=False):
    """
    Simulation and stacked analysis pipeline.

    Keywords:
     ra       - RA of cube centre [deg] (default: 83.6331)
     dec      - DEC of cube centre [deg] (default: 22.0145)
     emin     - Minimum energy of cube [TeV] (default: 0.1)
     emax     - Maximum energy of cube [TeV] (default: 100.0)
     enumbins - Number of energy bins in cube (default: 20)
     nxpix    - Number of RA pixels in cube (default: 200)
     nypix    - Number of DEC pixels in cube (default: 200)
     binsz    - Spatial cube bin size [deg] (default: 0.02)
     coordsys - Cube coordinate system (CEL or GAL)
     proj     - Cube World Coordinate System (WCS) projection
     debug    - Enable debugging (default: False)
    """
    # Simulate events
    sim = ctools.ctobssim(obs)
    sim["debug"] = debug
    sim.run()

    # Bin events into counts map
    bin = ctools.ctbin(sim.obs())
    bin["ebinalg"]  = "LOG"
    bin["emin"]     = emin
    bin["emax"]     = emax
    bin["enumbins"] = enumbins
    bin["nxpix"]    = nxpix
    bin["nypix"]    = nypix
    bin["binsz"]    = binsz
    bin["coordsys"] = coordsys
    bin["proj"]     = proj
    bin["xref"]     = ra
    bin["yref"]     = dec
    bin["debug"]    = debug
    bin.run()

    # Create exposure cube
    expcube = ctools.ctexpcube(sim.obs())
    expcube["incube"]   = "NONE"
    expcube["ebinalg"]  = "LOG"
    expcube["emin"]     = emin
    expcube["emax"]     = emax
    expcube["enumbins"] = enumbins
    expcube["nxpix"]    = nxpix
    expcube["nypix"]    = nypix
    expcube["binsz"]    = binsz
    expcube["coordsys"] = coordsys
    expcube["proj"]     = proj
    expcube["xref"]     = ra
    expcube["yref"]     = dec
    expcube["debug"]    = debug
    expcube.run()

    # Create PSF cube
    psfcube = ctools.ctpsfcube(sim.obs())
    psfcube["incube"]   = "NONE"
    psfcube["ebinalg"]  = "LOG"
    psfcube["emin"]     = emin
    psfcube["emax"]     = emax
    psfcube["enumbins"] = enumbins
    psfcube["nxpix"]    = 10
    psfcube["nypix"]    = 10
    psfcube["binsz"]    = 1.0
    psfcube["coordsys"] = coordsys
    psfcube["proj"]     = proj
    psfcube["xref"]     = ra
    psfcube["yref"]     = dec
    psfcube["debug"]    = debug
    psfcube.run()

    # Create background cube
    bkgcube = ctools.ctbkgcube(sim.obs())
    bkgcube["incube"]   = "NONE"
    bkgcube["ebinalg"]  = "LOG"
    bkgcube["emin"]     = emin
    bkgcube["emax"]     = emax
    bkgcube["enumbins"] = enumbins
    bkgcube["nxpix"]    = 10
    bkgcube["nypix"]    = 10
    bkgcube["binsz"]    = 1.0
    bkgcube["coordsys"] = coordsys
    bkgcube["proj"]     = proj
    bkgcube["xref"]     = ra
    bkgcube["yref"]     = dec
    bkgcube["debug"]    = debug
    bkgcube.run()

    # Attach background model to observation container
    bin.obs().models(bkgcube.models())

    # Set Exposure and Psf cube for first CTA observation
    # (ctbin will create an observation with a single container)
    bin.obs()[0].response(expcube.expcube(), psfcube.psfcube(), bkgcube.bkgcube())

    # Perform maximum likelihood fitting
    like = ctools.ctlike(bin.obs())
    like["debug"] = True # Switch this always on for results in console
    like.run()

    # Return
    return