def main():
    #  Initialize the 'GaussPyPrepare' class and read in the parameter settings from 'gausspy+.ini'.
    prepare = GaussPyPrepare(config_file='gausspy+.ini')

    #  The following lines will override the corresponding parameter settings defined in 'gausspy+.ini'.

    #  Path to the FITS cube.
    prepare.path_to_file = os.path.join(
        '..', 'gausspyplus', 'data', 'grs-test_field.fits')
    #  Directory in which all files produced by GaussPy+ are saved.
    prepare.dirpath_gpy = 'decomposition_grs'
    #  Prepare the data cube for the decomposition
    prepare.prepare_cube()
    #  (Optional) Produce a FITS image with the estimated root-mean-square values
    prepare.produce_noise_map()

    #  (Optional) Plot some of the spectra and the estimated signal ranges

    #  Filepath to pickled dictionary of the prepared data.
    path_to_pickled_file = os.path.join(
        'decomposition_grs', 'gpy_prepared', 'grs-test_field.pickle')
    #  Directory in which the plots are saved.
    path_to_plots = os.path.join(
        'decomposition_grs', 'gpy_plots')
    #  Here we select a subregion of the data cube, whose spectra we want to plot.
    pixel_range = {'x': [30, 34], 'y': [25, 29]}
    plot_spectra(path_to_pickled_file, path_to_plots=path_to_plots,
                 signal_ranges=True, pixel_range=pixel_range)
def main():
    #  Initialize the 'GaussPyDecompose' class and read in the parameter settings from 'gausspy+.ini'.
    decompose = GaussPyDecompose(config_file='gausspy+.ini')

    #  The following lines will override the corresponding parameter settings defined in 'gausspy+.ini'.

    #  Filepath to pickled dictionary of the prepared data.
    decompose.path_to_pickle_file = os.path.join('decomposition_grs',
                                                 'gpy_prepared',
                                                 'grs-test_field.pickle')
    #  First smoothing parameter
    decompose.alpha1 = 2.58
    #  Second smoothing parameter
    decompose.alpha2 = 5.14
    #  Suffix for the filename of the pickled dictionary with the decomposition results.
    decompose.suffix = '_g+'
    #  Start the decomposition.
    decompose.decompose()

    #  (Optional) Produce a FITS image showing the number of fitted components
    decompose.produce_component_map()
    #  (Optional) Produce a FITS image showing the reduced chi-square values
    decompose.produce_rchi2_map()

    #  (Optional) Plot some of the spectra and the decomposition results

    #  Filepath to pickled dictionary of the prepared data.
    path_to_pickled_file = decompose.path_to_pickle_file
    #  Filepath to pickled dictionary with the decomposition results
    path_to_decomp_pickle = os.path.join('decomposition_grs', 'gpy_decomposed',
                                         'grs-test_field_g+_fit_fin.pickle')
    #  Directory in which the plots are saved.
    path_to_plots = os.path.join('decomposition_grs', 'gpy_plots')
    #  Here we select a subregion of the data cube, whose spectra we want to plot.
    pixel_range = {'x': [30, 34], 'y': [25, 29]}
    plot_spectra(path_to_pickled_file,
                 path_to_plots=path_to_plots,
                 path_to_decomp_pickle=path_to_decomp_pickle,
                 signal_ranges=True,
                 pixel_range=pixel_range)
def main():
    #  Initialize the 'GaussPyTrainingSet' class and read in the parameter settings from 'gausspy+.ini'.
    training = GaussPyTrainingSet(config_file='gausspy+.ini')

    #  The following lines will override the corresponding parameter settings defined in 'gausspy+.ini'.

    #  Path to the FITS cube.
    training.path_to_file = os.path.join('..', 'gausspyplus', 'data',
                                         'grs-test_field.fits')
    #  Directory to which all files produced by GaussPy+ will get saved.
    training.dirpath_gpy = 'decomposition_grs'
    #  Number of spectra included in the training set. We recommend to have at least 250 spectra for a good training set.
    training.n_spectra = 100
    #  (Optional) The initial seed that is used to create pseudorandom numbers. Change this value in case the spectra chosen for the training set are not ideal.
    training.random_seed = 111
    #  (Optional) We set the upper limit for the reduced chi-square value to a lower number to only include good fits in the training sample
    training.rchi2_limit = 1.2
    #  (Optional) This will enforce a maximum upper limit for the FWHM value of fitted Gaussian components, in this case 50 channels. We recommended to use this upper limit for the FWHM only for the creation of the training set.
    training.max_fwhm = 50.
    #  (Optional) Here we specify the filename for the resulting pickled dictionary file. If 'filename_out' is not supplied it will be automatically generated.
    training.filename_out = \
        'grs-test_field-training_set_{}_spectra.pickle'.format(training.n_spectra)

    training.decompose_spectra()  # Create the training set.

    #  (Optional) Plot the fitting results of the training set.

    #  Filepath to pickled dictionary of the training set.
    path_to_training_set = os.path.join(training.dirpath_gpy, 'gpy_training',
                                        training.filename_out)
    #  Directory in which the plots are saved.
    path_to_plots = os.path.join(training.dirpath_gpy, 'gpy_training')
    plot_spectra(
        path_to_training_set,
        path_to_plots=path_to_plots,
        training_set=True,
        n_spectra=20  # Plot 20 random spectra of the training set.
    )
Beispiel #4
0
#  Directory to which all files produced by GaussPy+ will get saved.
training.dirpath_gpy = "decomposition"
#  Number of spectra included in the training set. We recommend to have at least 250 spectra for a good training set.
training.n_spectra = 100
#  (Optional) The initial seed that is used to create pseudorandom numbers. Change this value in case the spectra chosen for the training set are not ideal.
training.random_seed = 111
#  (Optional) We set the upper limit for the reduced chi-square value to a lower number to only include good fits in the training sample
training.rchi2_limit = 1.2
#  (Optional) This will enforce a maximum upper limit for the FWHM value of fitted Gaussian components, in this case 50 channels. We recommended to use this upper limit for the FWHM only for the creation of the training set.
training.max_fwhm = 50.0
#  (Optional) Here we specify the filename for the resulting pickled dictionary file. If 'filename_out' is not supplied it will be automatically generated.
training.filename_out = "smc_field-training_set_{}_spectra.pickle".format(
    training.n_spectra)

training.decompose_spectra()  # Create the training set.

#  (Optional) Plot the fitting results of the training set.

#  Filepath to pickled dictionary of the training set.
path_to_training_set = os.path.join(training.dirpath_gpy, "gpy_training",
                                    training.filename_out)
#  Directory in which the plots are saved.
path_to_plots = os.path.join(training.dirpath_gpy, "gpy_training")
plot_spectra(
    path_to_training_set,
    path_to_plots=path_to_plots,
    training_set=True,
    n_spectra=20,  # Plot 20 random spectra of the training set.
)
#  (Optional) Plot maps of the reduced chi-square values and the number of fitted components

#  Initialize the 'GaussPyDecompose' class and read in the parameter settings from 'gausspy+.ini'.
decompose = GaussPyDecompose(config_file='gausspy+.ini')
#  Filepath to pickled dictionary of the prepared data.
decompose.path_to_pickle_file = sp.path_to_pickle_file
#  Filepath to the pickled dictionary with the decomposition results
path_to_decomp_pickle = os.path.join(
    'decomposition_grs', 'gpy_decomposed',
    'grs-test_field_g+_fit_fin_sf-p1.pickle')
#  Load the decomposition results
decompose.load_final_results(path_to_decomp_pickle)
#  Produce a FITS image showing the number of fitted components
decompose.produce_component_map()
#  Produce a FITS image showing the reduced chi-square values
decompose.produce_rchi2_map()

#  (Optional) Plot some of the spectra and the decomposition results

#  Filepath to pickled dictionary of the prepared data.
path_to_pickled_file = sp.path_to_pickle_file
#  Directory in which the plots are saved.
path_to_plots = os.path.join(
    'decomposition_grs', 'gpy_plots')
#  Here we select a subregion of the data cube, whose spectra we want to plot.
pixel_range = {'x': [30, 34], 'y': [25, 29]}
plot_spectra(path_to_pickled_file, path_to_plots=path_to_plots,
             path_to_decomp_pickle=path_to_decomp_pickle,
             signal_ranges=True, pixel_range=pixel_range)
Beispiel #6
0
    #  (Optional) Produce a FITS image with the estimated root-mean-square values
    prepare.produce_noise_map()

    #  (Optional) Plot some of the spectra and the estimated signal ranges

    #  Filepath to pickled dictionary of the prepared data.
    path_to_pickled_file = os.path.join(
        "decomposition", "gpy_prepared",
        "smc_HI_cube_askap_sub_" + str(i) + ".pickle")
    #  Directory in which the plots are saved.
    path_to_plots = os.path.join("decomposition", "gpy_plots")
    #  Here we select a subregion of the data cube, whose spectra we want to plot.
    pixel_range = {"x": [30, 34], "y": [25, 29]}
    plot_spectra(
        path_to_pickled_file,
        path_to_plots=path_to_plots,
        signal_ranges=True,
        pixel_range=pixel_range,
    )

components = [
    "decomposition/gpy_maps/smc_HI_cube_askap_sub_" + str(i) +
    "_noise_map.fits" for i in range(64)
]
new_map, new_head = spectral_cube_functions.combine_fields(
    components,
    ncols=8,
    nrows=8,
    path_to_output_file=
    "decomposition/gpy_maps/smc_HI_cube_askap_noise_map.fits",
    save=True,
)
def main():
    #  Initialize the 'SpatialFitting' class and read in the parameter settings from 'gausspy+.ini'.
    sp = SpatialFitting(config_file='gausspy+.ini')

    #  The following lines will override the corresponding parameter settings defined in 'gausspy+.ini'.

    #  filepath to the pickled dictionary of the prepared data
    sp.path_to_pickle_file = os.path.join('decomposition_grs', 'gpy_prepared',
                                          'grs-test_field.pickle')
    #  Filepath to the pickled dictionary of the decomposition results
    sp.path_to_decomp_file = os.path.join('decomposition_grs',
                                          'gpy_decomposed',
                                          'grs-test_field_g+_fit_fin.pickle')
    #  Try to refit blended fit components
    sp.refit_blended = True
    #  Try to refit spectra with negative residual features
    sp.refit_neg_res_peak = True
    #  Try to refit broad fit components
    sp.refit_broad = True
    #  Flag spectra with non-Gaussian distributed residuals
    sp.flag_residual = True
    #  Do not try to refit spectra with non-Gaussian distributed residuals
    sp.refit_residual = False
    #  Try to refit spectra for which the number of fit components is incompatible with its direct neighbors
    sp.refit_ncomps = True
    #  We set the maximum allowed difference in the number of fitted components compared to the weighted median of all immediate neighbors to 1
    sp.max_diff_comps = 1
    # We set the maximum allowed difference in the number of fitted components between individual neighboring spectra to 2
    sp.max_jump_comps = 2
    # We will flag and try to refit all spectra which show jumps in the number of components of more than 2 to at least two direct neighbors
    sp.n_max_jump_comps = 1
    # Maximum difference in offset positions of fit components for grouping.
    sp.mean_separation = 2.
    # Maximum difference in FWHM values of fit components for grouping.
    sp.fwhm_separation = 4.

    #  Start phase 1 of the spatially coherent refitting
    sp.spatial_fitting()

    #  (Optional) Plot maps of the reduced chi-square values and the number of fitted components

    #  Initialize the 'GaussPyDecompose' class and read in the parameter settings from 'gausspy+.ini'.
    decompose = GaussPyDecompose(config_file='gausspy+.ini')
    #  Filepath to pickled dictionary of the prepared data.
    decompose.path_to_pickle_file = sp.path_to_pickle_file
    #  Filepath to the pickled dictionary with the decomposition results
    path_to_decomp_pickle = os.path.join(
        'decomposition_grs', 'gpy_decomposed',
        'grs-test_field_g+_fit_fin_sf-p1.pickle')
    #  Load the decomposition results
    decompose.load_final_results(path_to_decomp_pickle)
    #  Produce a FITS image showing the number of fitted components
    decompose.produce_component_map()
    #  Produce a FITS image showing the reduced chi-square values
    decompose.produce_rchi2_map()

    #  (Optional) Plot some of the spectra and the decomposition results

    #  Filepath to pickled dictionary of the prepared data.
    path_to_pickled_file = sp.path_to_pickle_file
    #  Directory in which the plots are saved.
    path_to_plots = os.path.join('decomposition_grs', 'gpy_plots')
    #  Here we select a subregion of the data cube, whose spectra we want to plot.
    pixel_range = {'x': [30, 34], 'y': [25, 29]}
    plot_spectra(path_to_pickled_file,
                 path_to_plots=path_to_plots,
                 path_to_decomp_pickle=path_to_decomp_pickle,
                 signal_ranges=True,
                 pixel_range=pixel_range)