コード例 #1
0
ファイル: test_spectrumlike.py プロジェクト: Husky22/threeML
def test_spectrum_like_with_background_model():
    energies = np.logspace(1, 3, 51)

    low_edge = energies[:-1]
    high_edge = energies[1:]

    sim_K = 1E-1
    sim_kT = 20.

    # get a blackbody source function
    source_function = Blackbody(K=sim_K, kT=sim_kT)

    # power law background function
    background_function = Powerlaw(K=5, index=-1.5, piv=100.)

    spectrum_generator = SpectrumLike.from_function(
        'fake',
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge)

    background_plugin = SpectrumLike.from_background('background',
                                                     spectrum_generator)

    bb = Blackbody()

    pl = Powerlaw()
    pl.piv = 100

    bkg_ps = PointSource('bkg', 0, 0, spectral_shape=pl)

    bkg_model = Model(bkg_ps)

    jl_bkg = JointLikelihood(bkg_model, DataList(background_plugin))

    _ = jl_bkg.fit()

    plugin_bkg_model = SpectrumLike('full',
                                    spectrum_generator.observed_spectrum,
                                    background=background_plugin)

    pts = PointSource('mysource', 0, 0, spectral_shape=bb)

    model = Model(pts)

    # MLE fitting

    jl = JointLikelihood(model, DataList(plugin_bkg_model))

    result = jl.fit()

    K_variates = jl.results.get_variates('mysource.spectrum.main.Blackbody.K')

    kT_variates = jl.results.get_variates(
        'mysource.spectrum.main.Blackbody.kT')

    assert np.all(
        np.isclose([K_variates.average, kT_variates.average], [sim_K, sim_kT],
                   rtol=0.5))
コード例 #2
0
def test_spectrumlike_fit():

    energies = np.logspace(1, 3, 51)

    low_edge = energies[:-1]
    high_edge = energies[1:]

    sim_K = 1e-1
    sim_kT = 20.0

    # get a blackbody source function
    source_function = Blackbody(K=sim_K, kT=sim_kT)

    # power law background function
    background_function = Powerlaw(K=1, index=-1.5, piv=100.0)

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    bb = Blackbody()

    pts = PointSource("mysource", 0, 0, spectral_shape=bb)

    model = Model(pts)

    # MLE fitting

    jl = JointLikelihood(model, DataList(spectrum_generator))

    result = jl.fit()

    K_variates = jl.results.get_variates("mysource.spectrum.main.Blackbody.K")

    kT_variates = jl.results.get_variates(
        "mysource.spectrum.main.Blackbody.kT")

    assert np.all(
        np.isclose([K_variates.average, kT_variates.average], [sim_K, sim_kT],
                   atol=1))
コード例 #3
0
def test_dispersionspectrumlike_fit():

    response = OGIPResponse(
        get_path_of_data_file("datasets/ogip_powerlaw.rsp"))

    sim_K = 1e-1
    sim_kT = 20.0

    # get a blackbody source function
    source_function = Blackbody(K=sim_K, kT=sim_kT)

    # power law background function
    background_function = Powerlaw(K=1, index=-1.5, piv=100.0)

    spectrum_generator = DispersionSpectrumLike.from_function(
        "test",
        source_function=source_function,
        response=response,
        background_function=background_function,
    )

    bb = Blackbody()

    pts = PointSource("mysource", 0, 0, spectral_shape=bb)

    model = Model(pts)

    # MLE fitting

    jl = JointLikelihood(model, DataList(spectrum_generator))

    result = jl.fit()

    K_variates = jl.results.get_variates("mysource.spectrum.main.Blackbody.K")

    kT_variates = jl.results.get_variates(
        "mysource.spectrum.main.Blackbody.kT")

    assert np.all(
        np.isclose([K_variates.average, kT_variates.average], [sim_K, sim_kT],
                   atol=1))
コード例 #4
0
def test_all():
    response = OGIPResponse(
        get_path_of_data_file("datasets/ogip_powerlaw.rsp"))

    np.random.seed(1234)

    # rescale the functions for the response
    source_function = Blackbody(K=1e-7, kT=500.0)
    background_function = Powerlaw(K=1, index=-1.5, piv=1.0e3)
    spectrum_generator = DispersionSpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        response=response)

    source_function.K.prior = Log_normal(mu=np.log(1e-7), sigma=1)
    source_function.kT.prior = Log_normal(mu=np.log(300), sigma=2)

    ps = PointSource("demo", 0, 0, spectral_shape=source_function)

    model = Model(ps)

    ba = BayesianAnalysis(model, DataList(spectrum_generator))

    ba.set_sampler()

    ba.sample(quiet=True)

    ppc = compute_ppc(ba,
                      ba.results,
                      n_sims=500,
                      file_name="my_ppc.h5",
                      overwrite=True,
                      return_ppc=True)

    ppc.fake.plot(bkg_subtract=True)

    ppc.fake.plot(bkg_subtract=False)
コード例 #5
0
def test_spectrum_like_with_background_model():
    energies = np.logspace(1, 3, 51)

    low_edge = energies[:-1]
    high_edge = energies[1:]

    sim_K = 1e-1
    sim_kT = 20.0

    # get a blackbody source function
    source_function = Blackbody(K=sim_K, kT=sim_kT)

    # power law background function
    background_function = Powerlaw(K=5, index=-1.5, piv=100.0)

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    background_plugin = SpectrumLike.from_background("background",
                                                     spectrum_generator)

    bb = Blackbody()

    pl = Powerlaw()
    pl.piv = 100

    bkg_ps = PointSource("bkg", 0, 0, spectral_shape=pl)

    bkg_model = Model(bkg_ps)

    jl_bkg = JointLikelihood(bkg_model, DataList(background_plugin))

    _ = jl_bkg.fit()

    plugin_bkg_model = SpectrumLike("full",
                                    spectrum_generator.observed_spectrum,
                                    background=background_plugin)

    pts = PointSource("mysource", 0, 0, spectral_shape=bb)

    model = Model(pts)

    # MLE fitting

    jl = JointLikelihood(model, DataList(plugin_bkg_model))

    result = jl.fit()

    K_variates = jl.results.get_variates("mysource.spectrum.main.Blackbody.K")

    kT_variates = jl.results.get_variates(
        "mysource.spectrum.main.Blackbody.kT")

    assert np.all(
        np.isclose([K_variates.average, kT_variates.average], [sim_K, sim_kT],
                   rtol=0.5))

    ## test with ogiplike
    with within_directory(__example_dir):
        ogip = OGIPLike("test_ogip",
                        observation="test.pha{1}",
                        background=background_plugin)
コード例 #6
0
def test_assigning_source_name():

    energies = np.logspace(1, 3, 51)

    low_edge = energies[:-1]
    high_edge = energies[1:]

    sim_K = 1e-1
    sim_kT = 20.0

    # get a blackbody source function
    source_function = Blackbody(K=sim_K, kT=sim_kT)

    # power law background function
    background_function = Powerlaw(K=1, index=-1.5, piv=100.0)

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    # good name setting

    bb = Blackbody()

    pts = PointSource("good_name", 0, 0, spectral_shape=bb)

    model = Model(pts)

    # before setting model

    spectrum_generator.assign_to_source("good_name")

    jl = JointLikelihood(model, DataList(spectrum_generator))

    _ = jl.fit()

    # after setting model

    pts = PointSource("good_name", 0, 0, spectral_shape=bb)

    model = Model(pts)

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    jl = JointLikelihood(model, DataList(spectrum_generator))

    spectrum_generator.assign_to_source("good_name")

    # after with bad name

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    jl = JointLikelihood(model, DataList(spectrum_generator))

    with pytest.raises(RuntimeError):

        spectrum_generator.assign_to_source("bad_name")

        # before with bad name

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    spectrum_generator.assign_to_source("bad_name")

    with pytest.raises(RuntimeError):

        jl = JointLikelihood(model, DataList(spectrum_generator))

    # multisource model

    spectrum_generator = SpectrumLike.from_function(
        "fake",
        source_function=source_function,
        background_function=background_function,
        energy_min=low_edge,
        energy_max=high_edge,
    )

    ps1 = PointSource("ps1", 0, 0, spectral_shape=Blackbody())
    ps2 = PointSource("ps2", 0, 0, spectral_shape=Powerlaw())

    model = Model(ps1, ps2)

    model.ps2.spectrum.main.Powerlaw.K.fix = True
    model.ps2.spectrum.main.Powerlaw.index.fix = True

    spectrum_generator.assign_to_source("ps1")

    dl = DataList(spectrum_generator)

    jl = JointLikelihood(model, dl)

    _ = jl.fit()
コード例 #7
0
ファイル: fit_point_source.py プロジェクト: threeML/hawc_hal
def fit_point_source(roi,
                     maptree,
                     response,
                     point_source_model,
                     bin_list,
                     confidence_intervals=False,
                     liff=False,
                     pixel_size=0.17,
                     verbose=False):

    data_radius = roi.data_radius.to("deg").value

    if not liff:

        # This is a 3ML plugin
        hawc = HAL("HAWC",
                   maptree,
                   response,
                   roi,
                   flat_sky_pixels_size=pixel_size)

        hawc.set_active_measurements(bin_list=bin_list)

    else:

        from threeML import HAWCLike

        hawc = HAWCLike("HAWC", maptree, response, fullsky=True)

        hawc.set_bin_list(bin_list)

        ra_roi, dec_roi = roi.ra_dec_center

        hawc.set_ROI(ra_roi, dec_roi, data_radius)

    if not liff:

        hawc.display()

    data = DataList(hawc)

    jl = JointLikelihood(point_source_model, data, verbose=verbose)

    point_source_model.display(complete=True)

    try:

        jl.set_minimizer("minuit")

    except:

        jl.set_minimizer("minuit")

    param_df, like_df = jl.fit()

    if confidence_intervals:

        ci = jl.get_errors()

    else:

        ci = None

    return param_df, like_df, ci, jl.results
コード例 #8
0
def compute_ppc(analysis, result, n_sims, file_name):
    """ 
    Compute a posterior predictive check from a 3ML DispersionLike
    Plugin. The resulting posterior data simulations are stored
    in an HDF5 file which can be read by the PPC class

    :param analysis: 3ML bayesian analysis object 
    :param result: 3ML analysis result
    :param n_sims: the number of posterior simulations to create
    :param file_name: the filename to save to
    :returns: None
    :rtype: 

    """

    with h5py.File(file_name, 'w', libver='latest') as database:

        # first we collect the real data data and save it so that we will not have to
        # look it up in the future

        data_names = []

        database.attrs['n_sims'] = n_sims

        for data in analysis.data_list.values():

            data_names.append(data.name)
            grp = database.create_group(data.name)
            grp.attrs['exposure'] = data.exposure
            grp.create_dataset('ebounds',
                               data=data.response.ebounds,
                               compression='lzf')
            grp.create_dataset('obs_counts',
                               data=data.observed_counts,
                               compression='lzf')
            grp.create_dataset('bkg_counts',
                               data=data.background_counts,
                               compression='lzf')
            grp.create_dataset('mask', data=data.mask, compression='lzf')

        # select random draws from the posterior

        choices = np.random.choice(len(result.samples.T),
                                   replace=False,
                                   size=n_sims)

        # for each posterior sample

        for j, choice in enumerate(choices):

            # get the parameters of the choice

            params = result.samples.T[choice]

            # set the analysis free parameters to the value of the posterior
            for i, (k, v) in enumerate(
                    analysis.likelihood_model.free_parameters.items()):
                v.value = params[i]

            # create simulated data sets with these free parameters
            sim_dl = DataList(*[
                data.get_simulated_dataset()
                for data in analysis.data_list.values()
            ])

            # set the model of the simulated data to the model of the simulation
            for i, data in enumerate(sim_dl.values()):

                # clone the model for saftey's sake
                # and set the model. For now we do nothing with this

                data.set_model(clone_model(analysis.likelihood_model))

                # store the PPC data in the file
                grp = database[data_names[i]]
                grp.create_dataset('ppc_counts_%d' % j,
                                   data=data.observed_counts,
                                   compression='lzf')
                grp.create_dataset('ppc_background_counts_%d' % j,
                                   data=data.background_counts,
                                   compression='lzf')
コード例 #9
0
ファイル: ppc.py プロジェクト: grburgess/twopc
def compute_ppc(analysis: BayesianAnalysis,
                result: BayesianResults,
                n_sims: int,
                file_name: str,
                overwrite: bool = False,
                return_ppc: bool = False) -> Union["PPC", None]:
    """
    Compute a posterior predictive check from a 3ML DispersionLike
    Plugin. The resulting posterior data simulations are stored
    in an HDF5 file which can be read by the PPC class

    :param analysis: 3ML bayesian analysis object
    :param result: 3ML analysis result
    :param n_sims: the number of posterior simulations to create
    :param file_name: the filename to save to
    :param overwrite: to overwrite an existsing file
    :param return_ppc: if true, PPC object will be return directy
    :returns: None
    :rtype:

    """

    update_logging_level("WARNING")

    p = Path(file_name)

    if p.exists() and (not overwrite):

        raise RuntimeError(f"{file_name} already exists!")

    with h5py.File(file_name, 'w', libver='latest') as database:

        # first we collect the real data data and save it so that we will not have to
        # look it up in the future

        data_names = []

        database.attrs['n_sims'] = n_sims

        for data in analysis.data_list.values():

            data_names.append(data.name)
            grp = database.create_group(data.name)
            grp.attrs['exposure'] = data.exposure
            grp.create_dataset('ebounds',
                               data=data.response.ebounds,
                               compression='lzf')
            grp.create_dataset('obs_counts',
                               data=data.observed_counts,
                               compression='lzf')
            grp.create_dataset('bkg_counts',
                               data=data.background_counts,
                               compression='lzf')
            grp.create_dataset('mask', data=data.mask, compression='lzf')

        # select random draws from the posterior

        n_samples = len(result.samples.T)

        if n_samples < n_sims:

            print("too many sims")

            n_sims = n_samples

        choices = np.random.choice(len(result.samples.T),
                                   replace=False,
                                   size=n_sims)

        # for each posterior sample

        with silence_console_log(and_progress_bars=False):

            for j, choice in enumerate(tqdm(choices,
                                            desc="sampling posterior")):

                # get the parameters of the choice

                params = result.samples.T[choice]

                # set the analysis free parameters to the value of the posterior
                for i, (k, v) in enumerate(
                        analysis.likelihood_model.free_parameters.items()):
                    v.value = params[i]

                # create simulated data sets with these free parameters
                sim_dl = DataList(*[
                    data.get_simulated_dataset()
                    for data in analysis.data_list.values()
                ])

                # set the model of the simulated data to the model of the simulation
                for i, data in enumerate(sim_dl.values()):

                    # clone the model for saftey's sake
                    # and set the model. For now we do nothing with this

                    data.set_model(clone_model(analysis.likelihood_model))

                    # store the PPC data in the file
                    grp = database[data_names[i]]
                    grp.create_dataset('ppc_counts_%d' % j,
                                       data=data.observed_counts,
                                       compression='lzf')
                    grp.create_dataset('ppc_background_counts_%d' % j,
                                       data=data.background_counts,
                                       compression='lzf')
                # sim_dls.append(sim_dl)
        if return_ppc:

            return PPC(file_name)
コード例 #10
0
def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi):

    #data_radius = 5.0
    #model_radius = 7.0
    output = dirname(geminga_maptree)

    ra_src, dec_src = 101.7, 16.0
    maptree, response, roi = geminga_maptree, geminga_response, geminga_roi

    hawc = HAL("HAWC", maptree, response, roi)

    # Use from bin 1 to bin 9
    hawc.set_active_measurements(1, 9)

    # Display information about the data loaded and the ROI
    hawc.display()
    '''
    Define model: Two sources, 1 point, 1 extended

    Same declination, but offset in RA

    Different spectral index, but both power laws

    '''
    pt_shift = 3.0
    ext_shift = 2.0

    # First source
    spectrum1 = Powerlaw()
    source1 = PointSource("point",
                          ra=ra_src + pt_shift,
                          dec=dec_src,
                          spectral_shape=spectrum1)

    spectrum1.K = 1e-12 / (u.TeV * u.cm**2 * u.s)
    spectrum1.piv = 1 * u.TeV
    spectrum1.index = -2.3

    spectrum1.piv.fix = True
    spectrum1.K.fix = True
    spectrum1.index.fix = True

    # Second source
    shape = Gaussian_on_sphere(lon0=ra_src - ext_shift,
                               lat0=dec_src,
                               sigma=0.3)
    spectrum2 = Powerlaw()
    source2 = ExtendedSource("extended",
                             spatial_shape=shape,
                             spectral_shape=spectrum2)

    spectrum2.K = 1e-12 / (u.TeV * u.cm**2 * u.s)
    spectrum2.piv = 1 * u.TeV
    spectrum2.index = -2.0

    shape.lon0.fix = True
    shape.lat0.fix = True
    shape.sigma.fix = True

    spectrum2.piv.fix = True
    spectrum2.K.fix = True
    spectrum2.index.fix = True

    # Define model with both sources
    model = Model(source1, source2)

    # Define the data we are using
    data = DataList(hawc)

    # Define the JointLikelihood object (glue the data to the model)
    jl = JointLikelihood(model, data, verbose=False)

    # This has the effect of loading the model cache
    fig = hawc.display_spectrum()

    # the test file names
    model_file_name = "{0}/test_model.hdf5".format(output)
    residual_file_name = "{0}/test_residual.hdf5".format(output)

    # Write the map trees for testing
    model_map_tree = hawc.write_model_map(model_file_name,
                                          poisson_fluctuate=True,
                                          test_return_map=True)
    residual_map_tree = hawc.write_residual_map(residual_file_name,
                                                test_return_map=True)

    # Read the maps back in
    hawc_model = map_tree_factory(model_file_name, roi)
    hawc_residual = map_tree_factory(residual_file_name, roi)

    check_map_trees(hawc_model, model_map_tree)
    check_map_trees(hawc_residual, residual_map_tree)