예제 #1
0
def loaded_response():

    rsp = OGIPResponse(
        os.path.join(get_test_datasets_directory(), 'bn090217206',
                     'bn090217206_n6_weightedrsp.rsp'))

    return rsp
예제 #2
0
def test_OGIP_response_first_channel():

    # Get path of response file
    rsp_file = get_path_of_data_file("ogip_test_gbm_n6.rsp")

    rsp = OGIPResponse(rsp_file)

    assert rsp.first_channel == 1
예제 #3
0
def test_response_write_to_fits2():

    # Now do the same for a response read from a file

    rsp_file = get_path_of_data_file("ogip_test_gbm_n6.rsp")

    rsp = OGIPResponse(rsp_file)

    temp_file = "__test.rsp"

    rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True)

    rsp_reloaded = OGIPResponse(temp_file)

    assert np.allclose(rsp_reloaded.matrix, rsp.matrix)
    assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)
    assert np.allclose(rsp_reloaded.monte_carlo_energies,
                       rsp.monte_carlo_energies)

    os.remove(temp_file)
예제 #4
0
def test_OGIP_response_arf_rsp_accessors():

    # Then load rsp and arf in XSpec

    rsp_file = get_path_of_data_file("ogip_test_xmm_pn.rmf")

    arf_file = get_path_of_data_file("ogip_test_xmm_pn.arf")

    rsp = OGIPResponse(rsp_file, arf_file=arf_file)

    assert rsp.arf_filename == arf_file
    assert rsp.rsp_filename == rsp_file
예제 #5
0
def test_response_write_to_fits3():

    # Now do the same for a file with a ARF

    rsp_file = get_path_of_data_file("ogip_test_xmm_pn.rmf")

    arf_file = get_path_of_data_file("ogip_test_xmm_pn.arf")

    rsp = OGIPResponse(rsp_file, arf_file=arf_file)

    temp_file = "__test.rsp"

    rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True)

    rsp_reloaded = OGIPResponse(temp_file)

    assert np.allclose(rsp_reloaded.matrix, rsp.matrix)
    assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)
    assert np.allclose(rsp_reloaded.monte_carlo_energies,
                       rsp.monte_carlo_energies)

    os.remove(temp_file)
예제 #6
0
def test_response_write_to_fits1():

    matrix, mc_energies, ebounds = get_matrix_elements()

    rsp = InstrumentResponse(matrix, ebounds, mc_energies)

    temp_file = "__test.rsp"

    rsp.to_fits(temp_file, "TEST", "TEST", overwrite=True)

    # Now check that reloading gives back the same matrix
    rsp_reloaded = OGIPResponse(temp_file)

    assert np.allclose(rsp_reloaded.matrix, rsp.matrix)
    assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)
    assert np.allclose(rsp_reloaded.monte_carlo_energies,
                       rsp.monte_carlo_energies)

    os.remove(temp_file)
def test_dispersionspectrumlike_fit():

    response = OGIPResponse(
        get_path_of_data_file("datasets/ogip_powerlaw.rsp"))

    sim_K = 1e-1
    sim_kT = 20.0

    # get a blackbody source function
    source_function = Blackbody(K=sim_K, kT=sim_kT)

    # power law background function
    background_function = Powerlaw(K=1, index=-1.5, piv=100.0)

    spectrum_generator = DispersionSpectrumLike.from_function(
        "test",
        source_function=source_function,
        response=response,
        background_function=background_function,
    )

    bb = Blackbody()

    pts = PointSource("mysource", 0, 0, spectral_shape=bb)

    model = Model(pts)

    # MLE fitting

    jl = JointLikelihood(model, DataList(spectrum_generator))

    result = jl.fit()

    K_variates = jl.results.get_variates("mysource.spectrum.main.Blackbody.K")

    kT_variates = jl.results.get_variates(
        "mysource.spectrum.main.Blackbody.kT")

    assert np.all(
        np.isclose([K_variates.average, kT_variates.average], [sim_K, sim_kT],
                   atol=1))
예제 #8
0
def test_all():
    response = OGIPResponse(
        get_path_of_data_file("datasets/ogip_powerlaw.rsp"))

    np.random.seed(1234)

    # rescale the functions for the response
    source_function = Blackbody(K=1e-7, kT=500.0)
    background_function = Powerlaw(K=1, index=-1.5, piv=1.0e3)
    spectrum_generator = DispersionSpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        response=response)

    source_function.K.prior = Log_normal(mu=np.log(1e-7), sigma=1)
    source_function.kT.prior = Log_normal(mu=np.log(300), sigma=2)

    ps = PointSource("demo", 0, 0, spectral_shape=source_function)

    model = Model(ps)

    ba = BayesianAnalysis(model, DataList(spectrum_generator))

    ba.set_sampler()

    ba.sample(quiet=True)

    ppc = compute_ppc(ba,
                      ba.results,
                      n_sims=500,
                      file_name="my_ppc.h5",
                      overwrite=True,
                      return_ppc=True)

    ppc.fake.plot(bkg_subtract=True)

    ppc.fake.plot(bkg_subtract=False)
예제 #9
0
def test_OGIP_response_against_xspec():

    # Test for various photon indexes
    for index in [-0.5, 0.0, 0.5, 1.5, 2.0, 3.0, 4.0]:

        print("Processing index %s" % index)

        # First reset xspec
        xspec.AllData.clear()

        # Create a model in XSpec

        mo = xspec.Model("po")

        # Change the default value for the photon index
        # (remember that in XSpec the definition of the powerlaw is norm * E^(-PhoIndex),
        # so PhoIndex is positive normally. This is the opposite of astromodels.
        mo.powerlaw.PhoIndex = index
        mo.powerlaw.norm = 12.2

        # Now repeat the same in 3ML

        # Generate the astromodels function and set it to the same values as the XSpec power law
        # (the pivot in XSpec is set to 1). Remember also that the definition in xspec has the
        # sign of the photon index opposite
        powerlaw = Powerlaw()
        powerlaw.piv = 1.0
        powerlaw.index = -mo.powerlaw.PhoIndex.values[0]
        powerlaw.K = mo.powerlaw.norm.values[0]

        # Exploit the fact that the power law integral is analytic
        powerlaw_integral = Powerlaw()
        # Remove transformation
        powerlaw_integral.K._transformation = None
        powerlaw_integral.K.bounds = (None, None)
        powerlaw_integral.index = powerlaw.index.value + 1
        powerlaw_integral.K = old_div(powerlaw.K.value, (powerlaw.index.value + 1))

        powerlaw_integral.display()

        integral_function = lambda e1, e2: powerlaw_integral(e2) - powerlaw_integral(e1)

        # Now check that the two convoluted model give the same number of counts in each channel

        # Fake a spectrum so we can actually compute the convoluted model

        # Get path of response file
        rsp_file = str(get_path_of_data_file("ogip_test_gbm_n6.rsp"))

        fs1 = xspec.FakeitSettings(
            rsp_file, exposure=1.0, fileName="_fake_spectrum.pha"
        )

        xspec.AllData.fakeit(noWrite=True, applyStats=False, settings=fs1)

        # Get the expected counts
        xspec_counts = mo.folded(1)

        # Now get the convolution from 3ML
        rsp = OGIPResponse(rsp_file)

        rsp.set_function(integral_function)

        threeML_counts = rsp.convolve()

        # Compare them
        assert np.allclose(xspec_counts, threeML_counts)

        # Now do the same with a matrix with a ARF

        # First reset xspec
        xspec.AllData.clear()

        # Then load rsp and arf in XSpec

        rsp_file = str(get_path_of_data_file("ogip_test_xmm_pn.rmf"))

        arf_file = str(get_path_of_data_file("ogip_test_xmm_pn.arf"))

        fs1 = xspec.FakeitSettings(
            rsp_file, arf_file, exposure=1.0, fileName="_fake_spectrum.pha"
        )

        xspec.AllData.fakeit(noWrite=True, applyStats=False, settings=fs1)

        # Get the expected counts
        xspec_counts = mo.folded(1)

        # Now get the convolution from 3ML
        rsp = OGIPResponse(rsp_file, arf_file=arf_file)

        rsp.set_function(integral_function)

        threeML_counts = rsp.convolve()

        # Compare them
        assert np.allclose(xspec_counts, threeML_counts)
예제 #10
0
def _read_pha_or_pha2_file(pha_file_or_instance,
                           spectrum_number=None,
                           file_type='observed',
                           rsp_file=None,
                           arf_file=None,
                           treat_as_time_series=False):
    """
    A function to extract information from pha and pha2 files. It is kept separate because the same method is
    used for reading time series (MUCH faster than building a lot of individual spectra) and single spectra.


    :param pha_file_or_instance: either a PHA file name or threeML.plugins.OGIP.pha.PHAII instance
    :param spectrum_number: (optional) the spectrum number of the TypeII file to be used
    :param file_type: observed or background
    :param rsp_file: RMF filename or threeML.plugins.OGIP.response.InstrumentResponse instance
    :param arf_file: (optional) and ARF filename
    :param treat_as_time_series:
    :return:
    """

    assert isinstance(pha_file_or_instance, str) or isinstance(
        pha_file_or_instance,
        PHAII), 'Must provide a FITS file name or PHAII instance'

    if isinstance(pha_file_or_instance, str):

        ext = os.path.splitext(pha_file_or_instance)[-1]

        if '{' in ext:
            spectrum_number = int(ext.split('{')[-1].replace('}', ''))

            pha_file_or_instance = pha_file_or_instance.split('{')[0]

        # Read the data

        filename = pha_file_or_instance

        # create a FITS_FILE instance

        pha_file_or_instance = PHAII.from_fits_file(pha_file_or_instance)

    # If this is already a FITS_FILE instance,

    elif isinstance(pha_file_or_instance, PHAII):

        # we simply create a dummy filename

        filename = 'pha_instance'

    else:

        raise RuntimeError('This is a bug')

    file_name = filename

    assert file_type.lower() in ['observed', 'background'
                                 ], "Unrecognized filetype keyword value"

    file_type = file_type.lower()

    try:

        HDUidx = pha_file_or_instance.index_of("SPECTRUM")

    except:

        raise RuntimeError("The input file %s is not in PHA format" %
                           (pha_file_or_instance))

    # spectrum_number = spectrum_number

    spectrum = pha_file_or_instance[HDUidx]

    data = spectrum.data
    header = spectrum.header

    # We don't support yet the rescaling

    if "CORRFILE" in header:

        if (header.get("CORRFILE").upper().strip() !=
                "NONE") and (header.get("CORRFILE").upper().strip() != ''):
            raise RuntimeError("CORRFILE is not yet supported")

    # See if there is there is a QUALITY==0 in the header

    if "QUALITY" in header:

        has_quality_column = False

        if header["QUALITY"] == 0:

            is_all_data_good = True

        else:

            is_all_data_good = False

    else:

        if "QUALITY" in data.columns.names:

            has_quality_column = True

            is_all_data_good = False

        else:

            has_quality_column = False

            is_all_data_good = True

            warnings.warn(
                'Could not find QUALITY in columns or header of PHA file. This is not a valid OGIP file. Assuming QUALITY =0 (good)'
            )

    # looking for tstart and tstop

    tstart = None
    tstop = None

    has_tstart = False
    has_tstop = False
    has_telapse = False

    if "TSTART" in header:

        has_tstart_column = False

        has_tstart = True

    else:

        if "TSTART" in data.columns.names:

            has_tstart_column = True

            has_tstart = True

    if "TELAPSE" in header:

        has_telapse_column = False

        has_telapse = True

    else:

        if "TELAPSE" in data.columns.names:
            has_telapse_column = True

            has_telapse = True

    if "TSTOP" in header:

        has_tstop_column = False

        has_tstop = True

    else:

        if "TSTOP" in data.columns.names:
            has_tstop_column = True

            has_tstop = True

    if has_tstop and has_telapse:

        warnings.warn(
            'Found TSTOP and TELAPSE. This file is invalid. Using TSTOP.')

        has_telapse = False

    # Determine if this file contains COUNTS or RATES

    if "COUNTS" in data.columns.names:

        has_rates = False
        data_column_name = "COUNTS"

    elif "RATE" in data.columns.names:

        has_rates = True
        data_column_name = "RATE"

    else:

        raise RuntimeError(
            "This file does not contain a RATE nor a COUNTS column. "
            "This is not a valid PHA file")

    # Determine if this is a PHA I or PHA II
    if len(data.field(data_column_name).shape) == 2:

        typeII = True

        if spectrum_number == None and not treat_as_time_series:
            raise RuntimeError(
                "This is a PHA Type II file. You have to provide a spectrum number"
            )

    else:

        typeII = False

    # Collect information from mandatory keywords

    keys = _required_keywords[file_type]

    gathered_keywords = {}

    for k in keys:

        internal_name, keyname = k.split(":")

        key_has_been_collected = False

        if keyname in header:
            if keyname in _required_keyword_types and type(header.get(
                    keyname)) is not _required_keyword_types[keyname]:
                warnings.warn(
                    "unexpected type of %(keyname)s, expected %(expected_type)s\n found %(found_type)s: %(found_value)s"
                    % dict(
                        keyname=keyname,
                        expected_type=_required_keyword_types[keyname],
                        found_type=type(header.get(keyname)),
                        found_value=header.get(keyname),
                    ))
            else:
                gathered_keywords[internal_name] = header.get(keyname)

                # Fix "NONE" in None
                if gathered_keywords[internal_name] == "NONE" or \
                                gathered_keywords[internal_name] == 'none':
                    gathered_keywords[internal_name] = None

                key_has_been_collected = True

        # Note that we check again because the content of the column can override the content of the header

        if keyname in _might_be_columns[file_type] and typeII:

            # Check if there is a column with this name

            if keyname in data.columns.names:
                # This will set the exposure, among other things

                if not treat_as_time_series:

                    # if we just want a single spectrum

                    gathered_keywords[internal_name] = data[keyname][
                        spectrum_number - 1]

                else:

                    # else get all the columns

                    gathered_keywords[internal_name] = data[keyname]

                # Fix "NONE" in None
                if gathered_keywords[internal_name] == "NONE" or \
                                gathered_keywords[internal_name] == 'none':
                    gathered_keywords[internal_name] = None

                key_has_been_collected = True

        if not key_has_been_collected:

            # The keyword POISSERR is a special case, because even if it is missing,
            # it is assumed to be False if there is a STAT_ERR column in the file

            if keyname == "POISSERR" and "STAT_ERR" in data.columns.names:

                warnings.warn(
                    "POISSERR is not set. Assuming non-poisson errors as given in the "
                    "STAT_ERR column")

                gathered_keywords['poisserr'] = False

            elif keyname == "ANCRFILE":

                # Some non-compliant files have no ARF because they don't need one. Don't fail, but issue a
                # warning

                warnings.warn(
                    "ANCRFILE is not set. This is not a compliant OGIP file. Assuming no ARF."
                )

                gathered_keywords['ancrfile'] = None

            elif keyname == "FILTER":

                # Some non-compliant files have no FILTER because they don't need one. Don't fail, but issue a
                # warning

                warnings.warn(
                    "FILTER is not set. This is not a compliant OGIP file. Assuming no FILTER."
                )

                gathered_keywords['filter'] = None

            else:

                raise RuntimeError(
                    "Keyword %s not found. File %s is not a proper PHA "
                    "file" % (keyname, filename))

    is_poisson = gathered_keywords['poisserr']

    exposure = gathered_keywords['exposure']

    # now we need to get the response file so that we can extract the EBOUNDS

    if file_type == 'observed':

        if rsp_file is None:

            # this means it should be specified in the header
            rsp_file = gathered_keywords['respfile']

            if arf_file is None:
                arf_file = gathered_keywords['ancrfile']

                # Read in the response

        if isinstance(rsp_file, str) or isinstance(rsp_file, str):
            rsp = OGIPResponse(rsp_file, arf_file=arf_file)

        else:

            # assume a fully formed OGIPResponse
            rsp = rsp_file

    if file_type == 'background':
        # we need the rsp ebounds from response to build the histogram

        assert isinstance(
            rsp_file, InstrumentResponse
        ), 'You must supply and OGIPResponse to extract the energy bounds'

        rsp = rsp_file

    # Now get the data (counts or rates) and their errors. If counts, transform them in rates

    if typeII:

        # PHA II file
        if has_rates:

            if not treat_as_time_series:

                rates = data.field(data_column_name)[spectrum_number - 1, :]

                rate_errors = None

                if not is_poisson:
                    rate_errors = data.field("STAT_ERR")[spectrum_number -
                                                         1, :]

            else:

                rates = data.field(data_column_name)

                rate_errors = None

                if not is_poisson:
                    rate_errors = data.field("STAT_ERR")

        else:

            if not treat_as_time_series:

                rates = old_div(
                    data.field(data_column_name)[spectrum_number - 1, :],
                    exposure)

                rate_errors = None

                if not is_poisson:
                    rate_errors = old_div(
                        data.field("STAT_ERR")[spectrum_number - 1, :],
                        exposure)

            else:

                rates = old_div(data.field(data_column_name),
                                np.atleast_2d(exposure).T)

                rate_errors = None

                if not is_poisson:
                    rate_errors = old_div(data.field("STAT_ERR"),
                                          np.atleast_2d(exposure).T)

        if "SYS_ERR" in data.columns.names:

            if not treat_as_time_series:

                sys_errors = data.field("SYS_ERR")[spectrum_number - 1, :]

            else:

                sys_errors = data.field("SYS_ERR")

        else:

            sys_errors = np.zeros(rates.shape)

        if has_quality_column:

            if not treat_as_time_series:

                try:

                    quality = data.field("QUALITY")[spectrum_number - 1, :]

                except (IndexError):

                    # GBM CSPEC files do not follow OGIP conventions and instead
                    # list simply QUALITY=0 for each spectrum
                    # so we have to read them differently

                    quality_element = data.field("QUALITY")[spectrum_number -
                                                            1]

                    warnings.warn(
                        'The QUALITY column has the wrong shape. This PHAII file does not follow OGIP standards'
                    )

                    if quality_element == 0:

                        quality = np.zeros_like(rates, dtype=int)

                    else:

                        quality = np.zeros_like(rates, dtype=int) + 5

            else:

                # we need to be careful again because the QUALITY column is not always the correct shape

                quality_element = data.field("QUALITY")

                if quality_element.shape == rates.shape:

                    # This is the proper way for the quality to be stored

                    quality = quality_element

                else:

                    quality = np.zeros_like(rates, dtype=int)

                    for i, q in enumerate(quality_element):

                        if q != 0:
                            quality[i, :] = 5

        else:

            if is_all_data_good:

                quality = np.zeros_like(rates, dtype=int)

            else:

                quality = np.zeros_like(rates, dtype=int) + 5

        if has_tstart:

            if has_tstart_column:

                if not treat_as_time_series:

                    tstart = data.field("TSTART")[spectrum_number - 1]

                else:

                    tstart = data.field("TSTART")

        if has_tstop:

            if has_tstop_column:

                if not treat_as_time_series:

                    tstop = data.field("TSTOP")[spectrum_number - 1]

                else:

                    tstop = data.field("TSTOP")

        if has_telapse:

            if has_telapse_column:

                if not treat_as_time_series:

                    tstop = tstart + data.field("TELAPSE")[spectrum_number - 1]

                else:

                    tstop = tstart + data.field("TELAPSE")

    elif typeII == False:

        assert not treat_as_time_series, 'This is not a PHAII file but you specified to treat it as a time series'

        # PHA 1 file
        if has_rates:

            rates = data.field(data_column_name)

            rate_errors = None

            if not is_poisson:
                rate_errors = data.field("STAT_ERR")

        else:

            rates = old_div(data.field(data_column_name), exposure)

            rate_errors = None

            if not is_poisson:
                rate_errors = old_div(data.field("STAT_ERR"), exposure)

        if "SYS_ERR" in data.columns.names:

            sys_errors = data.field("SYS_ERR")

        else:

            sys_errors = np.zeros(rates.shape)

        if has_quality_column:

            quality = data.field("QUALITY")

        else:

            if is_all_data_good:

                quality = np.zeros_like(rates, dtype=int)

            else:

                quality = np.zeros_like(rates, dtype=int) + 5

        # read start and stop times if needed

        if has_tstart:

            if has_tstart_column:

                tstart = data.field("TSTART")

            else:

                tstart = header['TSTART']

        if has_tstop:

            if has_tstop_column:

                tstop = data.field("TSTOP")

            else:

                tstop = header['TSTOP']

        if has_telapse:

            if has_telapse_column:

                tstop = tstart + data.field("TELAPSE")

            else:

                tstop = tstart + header['TELAPSE']

        # Now that we have read it, some safety checks

        assert rates.shape[0] == gathered_keywords['detchans'], \
            "The data column (RATES or COUNTS) has a different number of entries than the " \
            "DETCHANS declared in the header"

    quality = Quality.from_ogip(quality)

    if not treat_as_time_series:

        counts = rates * exposure

        if not is_poisson:

            count_errors = rate_errors * exposure

        else:

            count_errors = None

    else:

        exposure = np.atleast_2d(exposure).T

        counts = rates * exposure

        if not is_poisson:

            count_errors = rate_errors * exposure

        else:

            count_errors = None

    out = collections.OrderedDict(counts=counts,
                                  count_errors=count_errors,
                                  rates=rates,
                                  rate_errors=rate_errors,
                                  sys_errors=sys_errors,
                                  exposure=exposure,
                                  is_poisson=is_poisson,
                                  rsp=rsp,
                                  gathered_keywords=gathered_keywords,
                                  quality=quality,
                                  file_name=file_name,
                                  tstart=tstart,
                                  tstop=tstop)

    return out
    def from_lat_lle(cls,
                     name,
                     lle_file,
                     ft2_file,
                     rsp_file,
                     restore_background=None,
                     trigger_time=None,
                     poly_order=-1,
                     unbinned=False,
                     verbose=True):
        """
               A plugin to natively bin, view, and handle Fermi LAT LLE data.
               An LLE event file and FT2 (1 sec) are required as well as the associated response



               Background selections are specified as
               a comma separated string e.g. "-10-0,10-20"

               Initial source selection is input as a string e.g. "0-5"

               One can choose a background polynomial order by hand (up to 4th order)
               or leave it as the default polyorder=-1 to decide by LRT test

               :param name: name of the plugin
               :param lle_file: lle event file
               :param ft2_file: fermi FT2 file
               :param rsp_file: lle response file
               :param trigger_time: trigger time if needed
               :param poly_order: 0-4 or -1 for auto
               :param unbinned: unbinned likelihood fit (bool)
               :param verbose: verbose (bool)


               """

        lat_lle_file = LLEFile(lle_file, ft2_file, rsp_file)

        if trigger_time is not None:
            lat_lle_file.trigger_time = trigger_time

        # Mark channels less than 50 MeV as bad

        channel_30MeV = np.searchsorted(lat_lle_file.energy_edges[0],
                                        30000.) - 1

        native_quality = np.zeros(lat_lle_file.n_channels, dtype=int)

        idx = np.arange(lat_lle_file.n_channels) < channel_30MeV

        native_quality[idx] = 5

        event_list = EventListWithLiveTime(
            arrival_times=lat_lle_file.arrival_times -
            lat_lle_file.trigger_time,
            measurement=lat_lle_file.energies,
            n_channels=lat_lle_file.n_channels,
            live_time=lat_lle_file.livetime,
            live_time_starts=lat_lle_file.livetime_start -
            lat_lle_file.trigger_time,
            live_time_stops=lat_lle_file.livetime_stop -
            lat_lle_file.trigger_time,
            start_time=lat_lle_file.tstart - lat_lle_file.trigger_time,
            stop_time=lat_lle_file.tstop - lat_lle_file.trigger_time,
            quality=native_quality,
            first_channel=1,
            # rsp_file=rsp_file,
            instrument=lat_lle_file.instrument,
            mission=lat_lle_file.mission,
            verbose=verbose)

        # pass to the super class

        rsp = OGIPResponse(rsp_file)

        return cls(name,
                   event_list,
                   response=rsp,
                   poly_order=poly_order,
                   unbinned=unbinned,
                   verbose=verbose,
                   restore_poly_fit=restore_background,
                   container_type=BinnedSpectrumWithDispersion)
    def from_gbm_cspec_or_ctime(cls,
                                name,
                                cspec_or_ctime_file,
                                rsp_file,
                                restore_background=None,
                                trigger_time=None,
                                poly_order=-1,
                                verbose=True):
        """
               A plugin to natively bin, view, and handle Fermi GBM TTE data.
               A TTE event file are required as well as the associated response



               Background selections are specified as
               a comma separated string e.g. "-10-0,10-20"

               Initial source selection is input as a string e.g. "0-5"

               One can choose a background polynomial order by hand (up to 4th order)
               or leave it as the default polyorder=-1 to decide by LRT test

               :param name: name for your choosing
               :param tte_file: GBM tte event file
               :param rsp_file: Associated TTE CSPEC response file
               :param trigger_time: trigger time if needed
               :param poly_order: 0-4 or -1 for auto
               :param unbinned: unbinned likelihood fit (bool)
               :param verbose: verbose (bool)



               """

        # self._default_unbinned = unbinned

        # Load the relevant information from the TTE file

        cdata = GBMCdata(cspec_or_ctime_file, rsp_file)

        # Set a trigger time if one has not been set

        if trigger_time is not None:
            cdata.trigger_time = trigger_time

        # Create the the event list

        event_list = BinnedSpectrumSeries(cdata.spectrum_set,
                                          first_channel=0,
                                          mission='Fermi',
                                          instrument=cdata.det_name,
                                          verbose=verbose)

        # we need to see if this is an RSP2

        if isinstance(rsp_file, str) or isinstance(rsp_file, unicode):

            test = re.match('^.*\.rsp2$', rsp_file)

            # some GBM RSPs that are not marked RSP2 are in fact RSP2s
            # we need to check

            if test is None:

                with fits.open(rsp_file) as f:

                    # there should only be a header, ebounds and one spec rsp extension

                    if len(f) > 3:
                        # make test a dummy value to trigger the nest loop

                        test = -1

                        custom_warnings.warn(
                            'The RSP file is marked as a single response but in fact has multiple matrices. We will treat it as an RSP2'
                        )

            if test is not None:

                rsp = InstrumentResponseSet.from_rsp2_file(
                    rsp2_file=rsp_file,
                    counts_getter=event_list.counts_over_interval,
                    exposure_getter=event_list.exposure_over_interval,
                    reference_time=cdata.trigger_time)

            else:

                rsp = OGIPResponse(rsp_file)

        else:

            assert isinstance(
                rsp_file, InstrumentResponse
            ), 'The provided response is not a 3ML InstrumentResponse'
            rsp = rsp_file

        # pass to the super class

        return cls(name,
                   event_list,
                   response=rsp,
                   poly_order=poly_order,
                   unbinned=False,
                   verbose=verbose,
                   restore_poly_fit=restore_background,
                   container_type=BinnedSpectrumWithDispersion)
예제 #13
0
def test_OGIP_response_against_xspec():

    # Test for various photon indexes
    for index in [-0.5, 0.0, 0.5, 1.5, 2.0, 3.0, 4.0]:

        print("Processing index %s" % index)

        # First reset xspec
        xspec.AllData.clear()

        # Create a model in XSpec

        mo = xspec.Model("po")

        # Change the default value for the photon index
        # (remember that in XSpec the definition of the powerlaw is norm * E^(-PhoIndex),
        # so PhoIndex is positive normally. This is the opposite of astromodels.
        mo.powerlaw.PhoIndex = index
        mo.powerlaw.norm = 12.2

        # Now repeat the same in 3ML

        # Generate the astromodels function and set it to the same values as the XSpec power law
        # (the pivot in XSpec is set to 1). Remember also that the definition in xspec has the
        # sign of the photon index opposite
        powerlaw = Powerlaw()
        powerlaw.piv = 1.0
        powerlaw.index = -mo.powerlaw.PhoIndex.values[0]
        powerlaw.K = mo.powerlaw.norm.values[0]

        # Exploit the fact that the power law integral is analytic
        powerlaw_integral = Powerlaw()
        # Remove transformation
        powerlaw_integral.K._transformation = None
        powerlaw_integral.K.bounds = (None, None)
        powerlaw_integral.index = powerlaw.index.value + 1
        powerlaw_integral.K = powerlaw.K.value / (powerlaw.index.value + 1)

        powerlaw_integral.display()

        integral_function = lambda e1, e2: powerlaw_integral(e2) - powerlaw_integral(e1)

        # Now check that the two convoluted model give the same number of counts in each channel

        # Fake a spectrum so we can actually compute the convoluted model

        # Get path of response file
        rsp_file = get_path_of_data_file("ogip_test_gbm_n6.rsp")

        fs1 = xspec.FakeitSettings(rsp_file, exposure=1.0, fileName="_fake_spectrum.pha")

        xspec.AllData.fakeit(noWrite=True, applyStats=False, settings=fs1)

        # Get the expected counts
        xspec_counts = mo.folded(1)

        # Now get the convolution from 3ML
        rsp = OGIPResponse(rsp_file)

        rsp.set_function(integral_function)

        threeML_counts = rsp.convolve()

        # Compare them
        assert np.allclose(xspec_counts, threeML_counts)

        # Now do the same with a matrix with a ARF

        # First reset xspec
        xspec.AllData.clear()

        # Then load rsp and arf in XSpec

        rsp_file = get_path_of_data_file("ogip_test_xmm_pn.rmf")

        arf_file = get_path_of_data_file("ogip_test_xmm_pn.arf")

        fs1 = xspec.FakeitSettings(rsp_file, arf_file, exposure=1.0, fileName="_fake_spectrum.pha")

        xspec.AllData.fakeit(noWrite=True, applyStats=False, settings=fs1)

        # Get the expected counts
        xspec_counts = mo.folded(1)

        # Now get the convolution from 3ML
        rsp = OGIPResponse(rsp_file, arf_file=arf_file)

        rsp.set_function(integral_function)

        threeML_counts = rsp.convolve()

        # Compare them
        assert np.allclose(xspec_counts, threeML_counts)
예제 #14
0
    def from_gbm_tte(
        cls,
        name,
        tte_file,
        rsp_file=None,
        restore_background=None,
        trigger_time=None,
        poly_order=-1,
        unbinned=True,
        verbose=True,
        use_balrog=False,
        trigdat_file=None,
        poshist_file=None,
        cspec_file=None,
    ):
        """
        A plugin to natively bin, view, and handle Fermi GBM TTE data.
        A TTE event file are required as well as the associated response

        Background selections are specified as
        a comma separated string e.g. "-10-0,10-20"

        Initial source selection is input as a string e.g. "0-5"

        One can choose a background polynomial order by hand (up to 4th order)
        or leave it as the default polyorder=-1 to decide by LRT test

        :param name: name for your choosing
        :param tte_file: GBM tte event file
        :param rsp_file: Associated TTE CSPEC response file
        :param trigger_time: trigger time if needed
        :param poly_order: 0-4 or -1 for auto
        :param unbinned: unbinned likelihood fit (bool)
        :param verbose: verbose (bool)
        :param use_balrog:  (bool) if you have gbm_drm_gen installed, will build BALROGlike 
        :param trigdat_file: the trigdat file to use for location 
        :param poshist_file: the poshist file to use for location 
        :param cspec_file: the cspec file to use for location 


               """

        # self._default_unbinned = unbinned

        # Load the relevant information from the TTE file

        gbm_tte_file = GBMTTEFile(tte_file)

        # Set a trigger time if one has not been set

        if trigger_time is not None:
            gbm_tte_file.trigger_time = trigger_time

        # Create the the event list

        event_list = EventListWithDeadTime(
            arrival_times=gbm_tte_file.arrival_times -
            gbm_tte_file.trigger_time,
            measurement=gbm_tte_file.energies,
            n_channels=gbm_tte_file.n_channels,
            start_time=gbm_tte_file.tstart - gbm_tte_file.trigger_time,
            stop_time=gbm_tte_file.tstop - gbm_tte_file.trigger_time,
            dead_time=gbm_tte_file.deadtime,
            first_channel=0,
            instrument=gbm_tte_file.det_name,
            mission=gbm_tte_file.mission,
            verbose=verbose,
        )

        if use_balrog:

            assert has_balrog, "you must install the gbm_drm_gen package to use balrog"

            assert cspec_file is not None, "must include a cspecfile"

            if poshist_file is not None:

                drm_gen = gbm_drm_gen.DRMGenTTE(
                    tte_file,
                    poshist=poshist_file,
                    cspecfile=cspec_file,
                    T0=trigger_time,
                    mat_type=2,
                    occult=True,
                )

            elif trigdat_file is not None:

                drm_gen = gbm_drm_gen.DRMGenTTE(
                    tte_file,
                    trigdat=trigdat_file,
                    cspecfile=cspec_file,
                    mat_type=2,
                    occult=True,
                )

            else:

                RuntimeError("No poshist or trigdat file supplied")

            rsp = gbm_drm_gen.BALROG_DRM(drm_gen, 0, 0)

        elif isinstance(rsp_file, str) or isinstance(rsp_file, unicode):

            # we need to see if this is an RSP2

            test = re.match("^.*\.rsp2$", rsp_file)

            # some GBM RSPs that are not marked RSP2 are in fact RSP2s
            # we need to check

            if test is None:

                with fits.open(rsp_file) as f:

                    # there should only be a header, ebounds and one spec rsp extension

                    if len(f) > 3:

                        # make test a dummy value to trigger the nest loop

                        test = -1

                        custom_warnings.warn(
                            "The RSP file is marked as a single response but in fact has multiple matrices. We will treat it as an RSP2"
                        )

            if test is not None:

                rsp = InstrumentResponseSet.from_rsp2_file(
                    rsp2_file=rsp_file,
                    counts_getter=event_list.counts_over_interval,
                    exposure_getter=event_list.exposure_over_interval,
                    reference_time=gbm_tte_file.trigger_time,
                )

            else:

                rsp = OGIPResponse(rsp_file)

        else:

            assert isinstance(
                rsp_file, InstrumentResponse
            ), "The provided response is not a 3ML InstrumentResponse"
            rsp = rsp_file

        # pass to the super class

        return cls(
            name,
            event_list,
            response=rsp,
            poly_order=poly_order,
            unbinned=unbinned,
            verbose=verbose,
            restore_poly_fit=restore_background,
            container_type=BinnedSpectrumWithDispersion,
            use_balrog=use_balrog,
        )