Ejemplo n.º 1
0
    def _project_reference_hdu(self, name_hdr, muse_hdu=None):
        """Project the reference image onto the MUSE field
        """
        if self.use_montage:
            # The original way. Sometimes this introduces an offset
            hdu_repr = montage.reproject_hdu(self.reference_hdu,
                                             header=name_hdr,
                                             exact_size=True)
        else:
            # The mpdaf way
            if muse_hdu is not None:

                wcs_ref = WCS(hdr=self.reference_hdu.header)
                ima_ref = Image(data=self.reference_hdu.data, wcs=wcs_ref)

                wcs_muse = WCS(hdr=muse_hdu.header)
                ima_muse = Image(data=muse_hdu.data, wcs=wcs_muse)

                ima_ref = ima_ref.align_with_image(ima_muse)

                hdu_repr = ima_ref.get_data_hdu()

            else:
                hdu_repre = None
                print(
                    "Warning: provide target HDU when not using montage to reproject"
                )

        return hdu_repr
Ejemplo n.º 2
0
def test_fftconvolve():
    """Image class: testing FFT convolution method."""
    wcs = WCS(cdelt=(0.2, 0.3), crval=(8.5, 12), shape=(40, 30), deg=True)
    data = np.zeros((40, 30))
    data[19, 14] = 1
    ima = Image(wcs=wcs, data=data)
    ima2 = ima.fftconvolve_gauss(center=None,
                                 flux=1.,
                                 fwhm=(20000., 10000.),
                                 peak=False,
                                 rot=60.,
                                 factor=1,
                                 unit_center=u.deg,
                                 unit_fwhm=u.arcsec)

    g = ima2.gauss_fit(verbose=False)
    assert_almost_equal(g.fwhm[0], 20000, 2)
    assert_almost_equal(g.fwhm[1], 10000, 2)
    assert_almost_equal(g.center[0], 8.5)
    assert_almost_equal(g.center[1], 12)
    ima2 = ima.fftconvolve_moffat(center=None,
                                  flux=1.,
                                  a=10000,
                                  q=1,
                                  n=2,
                                  peak=False,
                                  rot=60.,
                                  factor=1,
                                  unit_center=u.deg,
                                  unit_a=u.arcsec)
    m = ima2.moffat_fit(verbose=False)
    assert_almost_equal(m.center[0], 8.5)
    assert_almost_equal(m.center[1], 12)
Ejemplo n.º 3
0
def test_dtype():
    """Image class: testing dtype."""
    wcs = WCS(cdelt=(0.2, 0.3), crval=(8.5, 12), shape=(40, 30), deg=True)
    data = np.zeros((40, 30))
    data[19, 14] = 1
    ima = Image(wcs=wcs, data=data, dtype=int)
    ima2 = ima.fftconvolve_gauss(center=None,
                                 flux=1.,
                                 fwhm=(20000., 10000.),
                                 peak=False,
                                 rot=60.,
                                 factor=1,
                                 unit_center=u.deg,
                                 unit_fwhm=u.arcsec)

    g = ima2.gauss_fit(verbose=False)
    assert_almost_equal(g.fwhm[0], 20000, 2)
    assert_almost_equal(g.fwhm[1], 10000, 2)
    assert_almost_equal(g.center[0], 8.5)
    assert_almost_equal(g.center[1], 12)
    assert_equal(ima2.dtype, np.float64)

    ima3 = ima2.resample(newdim=(32, 24),
                         newstart=None,
                         newstep=ima2.get_step(unit=u.arcsec) * 0.8)
    assert_equal(ima3.dtype, np.float64)
Ejemplo n.º 4
0
def psf_photometry(sources, path, size=15):
    if True:
        image = Image(path)
        print('start photometry')
        psf = []
        for s in sources:
            x, y = s['ra'], s['dec']
            img = image.truncate(y - size / 3600, y + size / 3600,
                                 x - size / 3600, x + size / 3600)
            seg = img.segment()[0]
            fit = seg.moffat_fit(plot=False)
            psf.append(
                (s['id'], fit.center[0], fit.center[1], fit.err_center[0],
                 fit.err_center[1], fit.flux, fit.err_flux, fit.peak,
                 fit.err_peak, fit.fwhm[0], fit.fwhm[1], fit.err_fwhm[0],
                 fit.err_fwhm[1], fit.n, fit.err_n, fit.rot, fit.err_rot,
                 fit.cont, fit.err_cont))
            print(seg)
        psf = Table(rows=psf,
                    names=[
                        'id', 'center_x', 'center_y', 'center_x_err',
                        'center_y_err', 'flux', 'flux_err', 'peak', 'peak_err',
                        'fwhm_major', 'fwhm_minor', 'fwhm_major_err',
                        'fwhm_minor_err', 'beta', 'beta_err', 'rotation',
                        'rotation_err', 'continuum', 'continuum_err'
                    ])

        psf['mag'] = -2.5 * np.log10(psf['flux'])
        psf['mag_err'] = np.abs(2.5 /
                                (psf['flux'] * np.log(10))) * psf['flux_err']
        return psf
Ejemplo n.º 5
0
def slice_cube(r):
    """ Slice field A cube around the center within a given radius in kpc. """
    ###########################################################################
    # Input files
    cubefile = "output_zap.fits"
    imfile="NGC3311_FieldA_exp1_(white)_IMAGE_FOV_2014-12-27T07:52:48.469.fits"
    ###########################################################################
    # Setting up the parameters of the new cube
    ps = 0.262 # kpc / arcsec
    rarcsec = r / ps
    center = np.array([dec0, ra0])
    ymin, xmin = center - rarcsec / 3600.
    ymax, xmax = center + rarcsec / 3600.
    wave = wavelength_array(cubefile, axis=3, extension=1)
    wmin, wmax = wave[0], wave[1800]
    ###########################################################################
    # Loading data and processing
    im = Image(imfile)
    cube = Cube(cubefile)
    ###########################################################################
    # Peocessing data
    im2 = im.truncate(ymin, ymax, xmin, xmax)
    outim = "NGC3311_FieldA_{0}kpc_image.fits".format(r)
    im2.write(outim)
    outcube = "NGC3311_FieldA_{0}kpc_cube.fits".format(r)
    cube2 = cube.truncate([wmin, ymin, xmin, wmax, ymax, xmax])
    cube2.write(outcube)
    return
Ejemplo n.º 6
0
def test_ee():
    """Image class: testing ensquared energy."""
    wcs = WCS()
    data = np.ones(shape=(6, 5)) * 2
    image1 = Image(data=data, wcs=wcs)
    image1.mask_region((2, 2), (1.5, 1.5),
                       inside=False,
                       unit_center=None,
                       unit_radius=None)

    assert image1.ee() == 9 * 2
    assert image1.ee(frac=True) == 1.0
    ee = image1.ee(center=(2, 2), unit_center=None, radius=1, unit_radius=None)
    assert ee == 4 * 2

    r, eer = image1.eer_curve(center=(2, 2),
                              unit_center=None,
                              unit_radius=None,
                              cont=0)
    assert r[1] == 1.0
    assert eer[1] == 1.0

    size = image1.ee_size(center=(2, 2),
                          unit_center=None,
                          unit_size=None,
                          cont=0)
    assert_almost_equal(size[0], 1.775)
Ejemplo n.º 7
0
def test_background(a370II):
    """Image class: testing background value"""
    wcs = WCS()
    data = np.ones(shape=(6, 5)) * 2
    image1 = Image(data=data, wcs=wcs)
    (background, std) = image1.background()
    assert background == 2
    assert std == 0
    (background, std) = a370II[1647:1732, 618:690].background()
    # compare with IRAF results
    assert (background - std < 1989) & (background + std > 1989)
Ejemplo n.º 8
0
def test_peak(a370II):
    """Image class: testing peak research"""
    wcs = WCS()
    data = np.ones(shape=(6, 5)) * 2
    image1 = Image(data=data, wcs=wcs)
    image1.data[2, 3] = 8
    p = image1.peak()
    assert p['p'] == 2
    assert p['q'] == 3
    p = a370II.peak(center=(790, 875), radius=20, plot=False, unit_center=None,
                    unit_radius=None)
    assert_almost_equal(p['p'], 793.1, 1)
    assert_almost_equal(p['q'], 875.9, 1)
Ejemplo n.º 9
0
    def __init__(self, source=None, **kwargs) :
        """Initialisation of the opening of an Image
        """
        self.verbose = kwargs.pop('verbose', False)
        # Arguments for the plots
        self.title = kwargs.pop('title', "Frame")
        self.scale = kwargs.pop('scale', "log")
        self.vmin = np.int(kwargs.pop('vmin', 0))
        self.colorbar = kwargs.pop('colorbar', "v")

        if source is not None:
            self.__dict__.update(source.__dict__)
        else :
            Image.__init__(self, **kwargs)
 
        self.get_fwhm_startend()
Ejemplo n.º 10
0
def test_convolve():

    shape = (3, 12, 25)
    data = np.zeros(shape)
    data[:, 7, 5] = 1.0
    mask = np.zeros(shape, dtype=bool)
    mask[:, 5, 3] = True
    c = generate_cube(data=data, mask=mask, shape=shape,
                      wave=WaveCoord(crval=1, cunit=u.angstrom))

    # Create a symmetric convolution kernel with an even number of elements
    # along one dimension and and odd number along the other dimension.
    # Make the kernel symmetric around (shape-1)//2. This requires that
    # the final column be all zeros.
    kern = np.array([[[0.1, 0.25, 0.1, 0.0],
                      [0.25, 0.50, 0.25, 0.0],
                      [0.1, 0.25, 0.1, 0.0]]])

    # The image should consist of a copy of the convolution kernel, centered
    # such that pixels (kern.shape-1)//2 is at pixel 7,5 of data.
    expected_data = ma.array(data=np.zeros(shape), mask=mask)
    expected_data.data[:, 6:9, 4:8] = kern

    res = c.convolve(kern)
    assert_masked_allclose(res.data, expected_data, atol=1e-15)

    res = c.convolve(Image(data=kern))
    assert_masked_allclose(res.data, expected_data, atol=1e-15)

    res = c.fftconvolve(kern)
    assert_masked_allclose(res.data, expected_data, atol=1e-15)
Ejemplo n.º 11
0
    def __get__(self, obj, owner=None):
        if obj is None:
            return

        try:
            val = obj.__dict__[self.label]
        except KeyError:
            return

        if isinstance(val, str):
            if os.path.isfile(val):
                kind = self.kind
                if kind == 'cube':
                    val = Cube(val)
                if kind == 'image':
                    val = Image(val)
                elif kind == 'table':
                    val = _format_cat(Table.read(val))
                elif kind == 'array':
                    val = np.loadtxt(val, ndmin=1)
                elif kind == 'spectra':
                    val = load_spectra(val)
                obj.__dict__[self.label] = val
            else:
                val = None

        return val
Ejemplo n.º 12
0
def cut_save(filename, position, size):
    #Load image for data and wht, also wcs
    hdu = fits.open(os.getcwd() + '/DATA/' + filename)
    hdr = hdu[0].header
    data = hdu[0].data
    wcs_data = WCS(hdr)

    try:
        #Make the cutout, including the wcs
        cutout_data = Cutout2D(data,
                               position=position,
                               size=size,
                               wcs=wcs_data)
        hdu[0].data = cutout_data

        #Plotting cutout image
        plt.imshow(cutout_data, origin='lower', cmap='v', norm=LogNorm())
        plt.show()

        #Update header with the cutout WCS
        data.header.update(cutout_data.wcs.to_header())

        #Write the cutout to a new FITS file
        cutout_filename = 'cut_' + filename
        hdu.writeto(os.getcwd() + '/DATA/' + cutout_filename, overwrite=True)

    except:
        ima = Image(os.getcwd() + '/DATA/' + filename)
        x_1, x_2 = hdr['CRPIX1'] - size[0], hdr['CRPIX2'] + size[0]
        y_1, y_2 = hdr['CRPIX1'] - size[1], hdr['CRPIX2'] + size[1]
        ima_cut = Image.copy(ima[x_1:x_2, y_1:y_2])
        hdu[0].data = ima_cut.data

        #Plotting cutout image
        plt.imshow(ima_cut.data,
                   origin='lower',
                   cmap='viridis',
                   norm=LogNorm())
        plt.show()

        #Write the cutout to a new FITS file
        cutout_filename = 'cut_' + filename
        hdu.writeto(os.getcwd() + '/DATA/' + cutout_filename, overwrite=True)

    hdu.close()
Ejemplo n.º 13
0
def elliptic_aperture_pix(filename,
                          position,
                          ellipticity,
                          theta,
                          r_end=9,
                          plot=True):
    #Opening FITS, getting data and header
    ima = Image(os.getcwd() + '/DATA/' + filename + '.fits')
    hdr = ima.data_header
    data = ima.data.copy()

    #Galaxy center position and position for apertures
    center = (hdr['CRPIX1'], hdr['CRPIX2'])
    position = position + center

    #Defining radius for circular apertures
    radius = 50 * np.arange(1, r_end)
    aperture = [
        EllipticalAperture(position, i, i * ellipticity, theta) for i in radius
    ]

    if plot == True:
        #Plot galaxy image with apertures
        fig, ax = plt.subplots(sharex=True, sharey=True)
        ima.data[ima.data < 0] = 0

        ima.plot(ax,
                 scale='log',
                 vmin=0,
                 vmax=0.9 * np.amax(ima.data),
                 colorbar='v')
        for i in range(len(aperture)):
            aperture[i].plot(ax, color='white', lw=2)
        ax.set_title(r'Original image - NGC 3614')
        #plt.savefig('Image_apertures', dpi = 200)

    #Table with apertures data (flux sum)
    phot_table = aperture_photometry(data, aperture)
    for col in phot_table.colnames:
        phot_table[col].info.format = '%.8g'
    print(phot_table)

    return data, position, aperture
Ejemplo n.º 14
0
 def open_image(self, image_folder="./", image_name=None) :
     """Open the image
     """
     self.image_folder = image_folder
     self._isImage = True
     self.image_name = image_name
     if (imagein is None) | (not os.path.isfile(joinpath(image_folder, imagein))):
         self._isImage = False
     else :
         self._isImage = True
     self.image_galaxy = Image(joinpath(image_folder, image_name))
Ejemplo n.º 15
0
def test_arithmetric():
    """Spectrum class: testing arithmetic functions"""
    wave = WaveCoord(crpix=2.0, cdelt=3.0, crval=0.5, cunit=u.nm)
    spectrum1 = Spectrum(data=np.array([0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
                         wave=wave)
    spectrum2 = spectrum1 > 6  # [-,-,-,-,-,-,-,7,8,9]
    # +
    spectrum3 = spectrum1 + spectrum2
    assert spectrum3.data.data[3] == 3
    assert spectrum3.data.data[8] == 16
    spectrum3 = 4.2 + spectrum1
    assert spectrum3.data.data[3] == 3 + 4.2
    # -
    spectrum3 = spectrum1 - spectrum2
    assert spectrum3.data.data[3] == 3
    assert spectrum3.data.data[8] == 0
    spectrum3 = spectrum1 - 4.2
    assert spectrum3.data.data[8] == 8 - 4.2
    # *
    spectrum3 = spectrum1 * spectrum2
    assert spectrum3.data.data[8] == 64
    spectrum3 = 4.2 * spectrum1
    assert spectrum3.data.data[9] == 9 * 4.2
    # /
    spectrum3 = spectrum1 / spectrum2
    # divide functions that have a validity domain returns the masked constant
    # whenever the input is masked or falls outside the validity domain.
    assert spectrum3.data.data[8] == 1
    spectrum3 = 1.0 / (4.2 / spectrum1)
    assert spectrum3.data.data[5] == 5 / 4.2

    # with cube
    wcs = WCS()
    cube1 = Cube(data=np.ones(shape=(10, 6, 5)), wave=wave, wcs=wcs)
    cube2 = spectrum1 + cube1
    sp1data = spectrum1.data[:, np.newaxis, np.newaxis]
    assert_array_almost_equal(cube2.data, sp1data + cube1.data)

    cube2 = spectrum1 - cube1
    assert_array_almost_equal(cube2.data, sp1data - cube1.data)

    cube2 = spectrum1 * cube1
    assert_array_almost_equal(cube2.data, sp1data * cube1.data)

    cube2 = spectrum1 / cube1
    assert_array_almost_equal(cube2.data, sp1data / cube1.data)

    # spectrum * image
    data = np.ones(shape=(6, 5)) * 2
    image1 = Image(data=data, wcs=wcs)
    cube2 = spectrum1 * image1
    assert_array_almost_equal(cube2.data,
                              sp1data * image1.data[np.newaxis, :, :])
Ejemplo n.º 16
0
def a370II():
    """Return a test image from a real observation """

    # The CD matrix of the above image includes a small shear term which means
    # that the image can't be displayed accurately with rectangular pixels. All
    # of the functions in MPDAF assume rectangular pixels, so replace the CD
    # matrix with a similar one that doesn't have a shear component.
    ima = Image(get_data_file('obj', 'a370II.fits'))
    ima.wcs.set_cd(
        np.array([[2.30899476e-5, -5.22301199e-5],
                  [-5.22871997e-5, -2.30647413e-5]]))
    return ima
Ejemplo n.º 17
0
def error_ponderation(subima_err):
    (l, k) = np.shape(subima_err)
    xc, yc = l / 2, k / 2
    L = []
    for i in range(0, l):
        for j in range(0, k):
            L.append((1 / l) * ((i - xc)**2 + (j - yc)**2)**0.5)
    L = np.array(L)
    L = np.reshape(L, (l, k))

    subima_err = Image(data=(subima_err.data + L), wcs=subima_err.wcs)
    return subima_err
Ejemplo n.º 18
0
def test_create_psf_cube():
    src = Source.from_file(get_data_file('sdetect', 'origin-00026.fits'))
    cube = Cube(get_data_file('sdetect', 'subcub_mosaic.fits'))
    src.add_FSF(cube)

    wcs = src.images['MUSE_WHITE'].wcs
    shape = src.images['MUSE_WHITE'].shape
    # a, b, beta, field = src.get_FSF()
    a = 0.862
    b = -3.46e-05
    beta = 2.8
    psf = b * cube.wave.coord() + a

    # Gaussian
    gauss = create_psf_cube(psf.shape + shape, psf, wcs=wcs)
    im = Image(data=gauss[0], wcs=wcs)
    res = im.gauss_fit()
    assert_almost_equal(wcs.sky2pix(res.center)[0], [12., 12.])
    assert np.allclose(res.fwhm, psf[0])
    assert np.allclose(res.flux, 1.0)

    # Moffat
    moff = create_psf_cube(psf.shape + shape, psf, wcs=wcs, beta=beta)
    im = Image(data=moff[0], wcs=wcs)
    res = im.moffat_fit()
    assert_almost_equal(wcs.sky2pix(res.center)[0], [12., 12.])
    assert np.allclose(res.fwhm, psf[0])
    assert np.allclose(res.flux, 1.0, atol=1e-2)
Ejemplo n.º 19
0
def test_convolve():
    """Image class: testing discrete convolution method."""

    shape = (12, 25)
    wcs = WCS(cdelt=(1.0, 1.0), crval=(0.0, 0.0), shape=shape)
    data = np.zeros(shape)
    data[7, 5] = 1.0
    mask = np.zeros(shape, dtype=bool)
    mask[5, 3] = True
    ima = Image(wcs=wcs, data=data, mask=mask, copy=False)

    # Create a symmetric convolution kernel with an even number of elements
    # along one dimension and and odd number along the other dimension.
    # Make the kernel symmetric around (shape-1)//2. This requires that
    # the final column be all zeros.
    kern = np.array([[0.1, 0.25, 0.1, 0.0],
                     [0.25, 0.50, 0.25, 0.0],
                     [0.1, 0.25, 0.1, 0.0]])

    # The image should consist of a copy of the convolution kernel, centered
    # such that pixels (kern.shape-1)//2 is at pixel 7,5 of data.
    expected_data = np.ma.array(data=np.zeros(shape), mask=mask)
    expected_data.data[6:9, 4:8] = kern

    res = ima.convolve(kern)
    assert_masked_allclose(res.data, expected_data)

    res = ima.convolve(Image(data=kern))
    assert_masked_allclose(res.data, expected_data)

    res = ima.fftconvolve(kern)
    assert_masked_allclose(res.data, expected_data, atol=1e-15)
Ejemplo n.º 20
0
def test_rebin():
    """Image class: testing rebin methods."""
    wcs = WCS(crval=(0, 0))
    data = np.arange(30).reshape(6, 5)
    image1 = Image(data=data, wcs=wcs, var=np.ones(data.shape) * 0.5)
    image1.mask_region((2, 2), (1.5, 1.5),
                       inside=False,
                       unit_center=None,
                       unit_radius=None)

    # The test data array looks as follows:
    #
    # ---- ---- ---- ---- ----
    # ----  6.0  7.0  8.0 ----
    # ---- 11.0 12.0 13.0 ----
    # ---- 16.0 17.0 18.0 ----
    # ---- ---- ---- ---- ----
    # ---- ---- ---- ---- ----
    #
    # Where ---- signifies a masked value.
    #
    # After reducing both dimensions by a factor of 2, we should
    # get a data array of the following 6 means of 4 pixels each:
    #
    #  ---- ---- => 6/1         ---- ---- => (7+8)/2
    #  ----  6.0                 7.0  8.0
    #
    #  ---- 11.0 => (11+16)/2   12.0 13.0 => (12+13+17+18)/4
    #  ---- 16.0                17.0 18.0
    #
    #  ---- ---- => ----        ---- ---- => ----
    #  ---- ----                ---- ----

    expected = np.ma.array(data=[[6.0, 7.5], [13.5, 15], [0.0, 0.0]],
                           mask=[[False, False], [False, False], [True, True]])
    image2 = image1.rebin(2)
    assert_masked_allclose(image2.data, expected)

    image2 = image1.rebin(factor=(2, 2))
    assert_masked_allclose(image2.data, expected)

    # The variances of the original pixels were all 0.5, so taking the
    # mean of N of these should give the mean a variance of 0.5/N.
    # Given the number of pixels averaged in each of the above means,
    # we thus expect the variance array to look as follows.

    expected = np.ma.array(data=[[0.5, 0.25], [0.25, 0.125], [0.0, 0.0]],
                           mask=[[False, False], [False, False], [True, True]])
    assert_masked_allclose(image2.var, expected)

    # Check the WCS information.

    start = image2.get_start()
    assert start[0] == 0.5
    assert start[1] == 0.5
Ejemplo n.º 21
0
def test_segmap():
    segfile = get_data_file('segmap', 'segmap.fits')
    img = Image(segfile)
    refdata = np.arange(14)

    for arg in (segfile, img, img.data):
        segmap = Segmap(arg)
        assert segmap.img.shape == (90, 90)
        assert str(segmap.img.data.dtype) == '>i8'
        assert np.max(segmap.img._data) == 13
        assert_array_equal(np.unique(segmap.img._data), refdata)

    assert_array_equal(segmap.copy().img.data, segmap.img.data)

    cmap = segmap.cmap()
    assert cmap.N == 14  # nb of values in the segmap
Ejemplo n.º 22
0
def createSemiRealSource(srcData, cube, SNR):
    """
    Create a Source object with a cube of data mixing real noise from a MUSE (sub-)cube and signal
    of a simulated halo object. The object is centered in the real MUSE subcube

    """
    dec, ra = cube.wcs.pix2sky([cube.shape[1] // 2, cube.shape[2] // 2])[0]
    lmbda = cube.wave.coord(cube.shape[0] // 2)
    cube.data = cube.data + SNR * srcData
    src = Source.from_data(4000,
                           ra,
                           dec,
                           origin='Simulated',
                           cubes={'MUSE_CUBE': cube})
    src.add_line(['LBDA_OBS', 'LINE'], [lmbda, "LYALPHA"])
    src.images['TRUTH_DET_BIN_ALL'] = Image(data=obj.maskSources > 0)
    return src
Ejemplo n.º 23
0
def Image_conv(im, tab, unmask=True):
    """ Defines the convolution between an Image object and an array.
    Designed to be used with the multiprocessing function
    'FSF_convolution_multiprocessing'.

        :param im: Image object
        :type im: class 'mpdaf.obj.Image'
        :param tab: array containing the convolution kernel
        :type tab: array
        :param unmask: if True use .data of masked array (faster computation)
        :type unmask: bool
        :return: array
        :rtype: array

    """
    if unmask is True:
        res = ssl.fftconvolve(im.data.data, tab, 'full')
    else:
        res = ssl.fftconvolve(im.data, tab, 'full')
    a, b = tab.shape
    im_tmp = Image(data=res[int(a-1)//2:im.data.shape[0] + (a-1)//2,
                            (b-1)//2:im.data.shape[1]+(b-1)//2])
    return im_tmp.data
Ejemplo n.º 24
0
def interpolate_continuum(ima,
                          JP,
                          IP,
                          I,
                          J,
                          pcont,
                          cont,
                          JP3,
                          IP3,
                          tcont,
                          plot=False,
                          F_min=0):
    (h, w) = np.shape(ima)
    coords = []
    map_data = ima.data
    for i in range(0, len(IP)):
        if cont[i] > 0:
            IP3.append(int(IP[i]))
            JP3.append(int(JP[i]))
            tcont.append(cont[i])
    for i in range(0, len(I)):
        if pcont[i] > 0:
            IP3.append(int(I[i]))
            JP3.append(int(J[i]))
            tcont.append(pcont[i])
    (n, m, l, k) = (np.amax(IP3), np.amax(JP3), np.amin(IP3), np.amin(JP3))

    x = np.linspace(0, h - 1, h)
    y = np.linspace(0, w - 1, w)
    X, Y = np.meshgrid(x, y)
    grid_z0 = interpolate.griddata((JP3, IP3),
                                   np.array(tcont), (X, Y),
                                   method='nearest')
    # plt.imshow(grid_z0.T, origin='lower')
    grid = Image(data=(grid_z0.T), wcs=ima.wcs)
    grid.gaussian_filter(sigma=5, inplace=True)
    if plot:
        fig = plt.figure()
        grid.plot(scale='log', vmin=0, vmax=np.amax(ima.data), colorbar='v')
    return grid
Ejemplo n.º 25
0
s_n_mask = np.load('out_r4_3.npy')
all_pixels = []
for i in range(len(s_n_mask)):
    posi = [s_n_mask[i][0], s_n_mask[i][1]]
    all_pixels.append(posi)

all_pixels = np.asarray(all_pixels)

datacube = Cube('nor_cut_cube.fits')

xlen = len(datacube[0, :, 0].data)
ylen = len(datacube[0, 0, :].data[0])

wcs1 = WCS(crval=0, cdelt=0.2)
MyData = np.ones((xlen, ylen))
ima = Image(data=MyData, wcs=wcs1)

for i in range(xlen):
    for j in range(ylen):
        par = params.valuesdict()
        incli = par['incli']
        col_denst = par['col_dens']
        h = par['height']
        bes = par['dop_param']
        v_max = par['vel_max']
        h_vt = par['h_v']

        csize = par['csize']
        r_0t = par['r_0']

        h_v = 10**h_vt
Ejemplo n.º 26
0
    def write(self, path=None, erase=False):
        """Save the current session in a folder that will have the name of the
        ORIGIN object (self.name).

        The ORIGIN.load(folder, newname=None) method will be used to load a
        session. The parameter newname will let the user to load a session but
        continue in a new one.

        Parameters
        ----------
        path : str
            Path where the folder (self.name) will be stored.
        erase : bool
            Remove the folder if it exists.

        """
        self._loginfo("Writing...")

        # adapt session if path changes
        if path is not None and path != self.path:
            if not os.path.exists(path):
                raise ValueError(f"path does not exist: {path}")
            self.path = path
            outpath = os.path.join(path, self.name)
            # copy outpath to the new path
            shutil.copytree(self.outpath, outpath)
            self.outpath = outpath
            self._setup_logfile(self.logger)

        if erase:
            shutil.rmtree(self.outpath)
        os.makedirs(self.outpath, exist_ok=True)

        # PSF
        if isinstance(self.PSF, list):
            for i, psf in enumerate(self.PSF):
                cube = Cube(data=psf, mask=np.ma.nomask, copy=False)
                cube.write(os.path.join(self.outpath,
                                        "cube_psf_%02d.fits" % i))
        else:
            cube = Cube(data=self.PSF, mask=np.ma.nomask, copy=False)
            cube.write(os.path.join(self.outpath, "cube_psf.fits"))

        if self.wfields is not None:
            for i, wfield in enumerate(self.wfields):
                im = Image(data=wfield, mask=np.ma.nomask)
                im.write(os.path.join(self.outpath, "wfield_%02d.fits" % i))

        if self.ima_white is not None:
            self.ima_white.write("%s/ima_white.fits" % self.outpath)

        for step in self.steps.values():
            step.dump(self.outpath)

        # parameters in .yaml
        with open(f"{self.outpath}/{self.name}.yaml", "w") as stream:
            dump_yaml(self.param, stream)

        # step3 - saving this manually for now
        if self.nbAreas is not None:
            if self.testO2 is not None:
                for area in range(1, self.nbAreas + 1):
                    np.savetxt("%s/testO2_%d.txt" % (self.outpath, area),
                               self.testO2[area - 1])
            if self.histO2 is not None:
                for area in range(1, self.nbAreas + 1):
                    np.savetxt("%s/histO2_%d.txt" % (self.outpath, area),
                               self.histO2[area - 1])
            if self.binO2 is not None:
                for area in range(1, self.nbAreas + 1):
                    np.savetxt("%s/binO2_%d.txt" % (self.outpath, area),
                               self.binO2[area - 1])

        self._loginfo("Current session saved in %s", self.outpath)
Ejemplo n.º 27
0
    def load(cls, folder, newname=None, loglevel=None, logcolor=None):
        """Load a previous session of ORIGIN.

        ORIGIN.write() method saves a session in a folder that has the name of
        the ORIGIN object (self.name).

        Parameters
        ----------
        folder : str
            Folder name (with the relative path) where the ORIGIN data
            have been stored.
        newname : str
            New name for this session. This parameter lets the user to load a
            previous session but continue in a new one. If None, the user will
            continue the loaded session.
        loglevel : str
            Level for the logger (by default reuse the saved level).
        logcolor : bool
            Use color for the logger levels.

        """
        path = os.path.dirname(os.path.abspath(folder))
        name = os.path.basename(folder)

        with open(f"{folder}/{name}.yaml", "r") as stream:
            param = load_yaml(stream)

        if "FWHM PSF" in param:
            FWHM_PSF = np.asarray(param["FWHM PSF"])
        else:
            FWHM_PSF = None

        if "LBDA_FWHM PSF" in param:
            LBDA_FWHM_PSF = np.asarray(param["LBDA FWHM PSF"])
        else:
            LBDA_FWHM_PSF = None

        if os.path.isfile(param["PSF"]):
            PSF = param["PSF"]
        else:
            if os.path.isfile("%s/cube_psf.fits" % folder):
                PSF = "%s/cube_psf.fits" % folder
            else:
                PSF_files = glob.glob("%s/cube_psf_*.fits" % folder)
                if len(PSF_files) == 0:
                    PSF = None
                elif len(PSF_files) == 1:
                    PSF = PSF_files[0]
                else:
                    PSF = sorted(PSF_files)
        wfield_files = glob.glob("%s/wfield_*.fits" % folder)
        if len(wfield_files) == 0:
            wfields = None
        else:
            wfields = sorted(wfield_files)

        # step0
        if os.path.isfile("%s/ima_white.fits" % folder):
            ima_white = Image("%s/ima_white.fits" % folder)
        else:
            ima_white = None

        if newname is not None:
            # copy outpath to the new path
            shutil.copytree(os.path.join(path, name),
                            os.path.join(path, newname))
            name = newname

        loglevel = loglevel if loglevel is not None else param["loglevel"]
        logcolor = logcolor if logcolor is not None else param["logcolor"]

        obj = cls(
            path=path,
            name=name,
            param=param,
            imawhite=ima_white,
            loglevel=loglevel,
            logcolor=logcolor,
            filename=param["cubename"],
            fieldmap=param["fieldmap"],
            wfields=wfields,
            profiles=param["profiles"],
            PSF=PSF,
            FWHM_PSF=FWHM_PSF,
            LBDA_FWHM_PSF=LBDA_FWHM_PSF,
        )

        for step in obj.steps.values():
            step.load(obj.outpath)

        # special case for step3
        NbAreas = param.get("nbareas")
        if NbAreas is not None:
            if os.path.isfile("%s/testO2_1.txt" % folder):
                obj.testO2 = [
                    np.loadtxt("%s/testO2_%d.txt" % (folder, area), ndmin=1)
                    for area in range(1, NbAreas + 1)
                ]
            if os.path.isfile("%s/histO2_1.txt" % folder):
                obj.histO2 = [
                    np.loadtxt("%s/histO2_%d.txt" % (folder, area), ndmin=1)
                    for area in range(1, NbAreas + 1)
                ]
            if os.path.isfile("%s/binO2_1.txt" % folder):
                obj.binO2 = [
                    np.loadtxt("%s/binO2_%d.txt" % (folder, area), ndmin=1)
                    for area in range(1, NbAreas + 1)
                ]

        return obj
Ejemplo n.º 28
0
    def plot_NB(self, src_ind, ax1=None, ax2=None, ax3=None):
        """Plot the narrow band images.

        Parameters
        ----------
        src_ind : int
            Index of the object in self.Cat0.
        ax1 : matplotlib.Axes
            The Axes instance in which the NB image around the source is drawn.
        ax2 : matplotlib.Axes
            The Axes instance in which a other NB image for check is drawn.
        ax3 : matplotlib.Axes
            The Axes instance in which the difference is drawn.

        """
        if self.Cat0 is None:
            raise ValueError("Run the step 05 to initialize self.Cat0")

        if ax1 is None and ax2 is None and ax3 is None:
            fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4))

        # Coordinates of the source
        x0 = self.Cat0[src_ind]["x0"]
        y0 = self.Cat0[src_ind]["y0"]
        z0 = self.Cat0[src_ind]["z0"]
        # Larger spatial ranges for the plots
        longxy0 = 20
        y01 = max(0, y0 - longxy0)
        y02 = min(self.shape[1], y0 + longxy0 + 1)
        x01 = max(0, x0 - longxy0)
        x02 = min(self.shape[2], x0 + longxy0 + 1)
        # Coordinates in this window
        y00 = y0 - y01
        x00 = x0 - x01
        # spectral profile
        num_prof = self.Cat0[src_ind]["profile"]
        profil0 = self.profiles[num_prof]
        # length of the spectral profile
        profil1 = profil0[profil0 > 1e-13]
        long0 = profil1.shape[0]
        # half-length of the spectral profile
        longz = long0 // 2
        # spectral range
        intz1 = max(0, z0 - longz)
        intz2 = min(self.shape[0], z0 + longz + 1)
        # subcube for the plot
        cube_test_plot = self.cube_raw[intz1:intz2, y01:y02, x01:x02]
        wcs = self.wcs[y01:y02, x01:x02]
        # controle cube
        nb_ranges = 3
        if (z0 + longz + nb_ranges * long0) < self.shape[0]:
            intz1c = intz1 + nb_ranges * long0
            intz2c = intz2 + nb_ranges * long0
        else:
            intz1c = intz1 - nb_ranges * long0
            intz2c = intz2 - nb_ranges * long0
        cube_controle_plot = self.cube_raw[intz1c:intz2c, y01:y02, x01:x02]
        # (1/sqrt(2)) * difference of the 2 sububes
        diff_cube_plot = (1 / np.sqrt(2)) * (cube_test_plot -
                                             cube_controle_plot)

        if ax1 is not None:
            ax1.plot(x00, y00, "m+")
            ima_test_plot = Image(data=cube_test_plot.sum(axis=0), wcs=wcs)
            title = "cube test - (%d,%d)\n" % (x0, y0)
            title += "lambda=%d int=[%d,%d[" % (z0, intz1, intz2)
            ima_test_plot.plot(colorbar="v", title=title, ax=ax1)
            ax1.get_xaxis().set_visible(False)
            ax1.get_yaxis().set_visible(False)

        if ax2 is not None:
            ax2.plot(x00, y00, "m+")
            ima_controle_plot = Image(data=cube_controle_plot.sum(axis=0),
                                      wcs=wcs)
            title = "check - (%d,%d)\n" % (x0, y0) + "int=[%d,%d[" % (intz1c,
                                                                      intz2c)
            ima_controle_plot.plot(colorbar="v", title=title, ax=ax2)
            ax2.get_xaxis().set_visible(False)
            ax2.get_yaxis().set_visible(False)

        if ax3 is not None:
            ax3.plot(x00, y00, "m+")
            ima_diff_plot = Image(data=diff_cube_plot.sum(axis=0), wcs=wcs)
            title = "Difference narrow band - (%d,%d)\n" % (
                x0, y0) + "int=[%d,%d[" % (
                    intz1c,
                    intz2c,
                )
            ima_diff_plot.plot(colorbar="v", title=title, ax=ax3)
            ax3.get_xaxis().set_visible(False)
            ax3.get_yaxis().set_visible(False)
Ejemplo n.º 29
0
def test_mask():
    """Image class: testing mask functionalities"""
    wcs = WCS()
    data = np.ones(shape=(6, 5)) * 2

    # A region of half-width=1 and half-height=1 should have a size of
    # 2x2 pixels. A 2x2 region of pixels has a center at the shared
    # corner of the 4 pixels, and the closest corner to the requested
    # center of 2.1,1.8 is 2.5,1.5, so we expect the square of unmasked pixels
    # to be pixels 2,3 along the Y axis, and pixels 1,2 along the X axis.
    image1 = Image(data=data, wcs=wcs)
    image1.mask_region((2.1, 1.8), (1, 1), inside=False, unit_center=None,
                       unit_radius=None)
    expected_mask = np.array([[1, 1, 1, 1, 1],
                              [1, 1, 1, 1, 1],
                              [1, 0, 0, 1, 1],
                              [1, 0, 0, 1, 1],
                              [1, 1, 1, 1, 1],
                              [1, 1, 1, 1, 1]], dtype=bool)
    assert_array_equal(image1._mask, expected_mask)

    # Test that inside=True gives the opposite result
    image1.unmask()
    image1.mask_region((2.1, 1.8), (1, 1), inside=True, unit_center=None,
                       unit_radius=None)
    assert_array_equal(image1._mask, ~expected_mask)

    # And test with a rotation, 90° so should give the same result
    image1.unmask()
    image1.mask_region((2.1, 1.8), (1, 1), inside=True, unit_center=None,
                       unit_radius=None, posangle=90)
    assert_array_equal(image1._mask, ~expected_mask)

    # Try exactly the same experiment as the above, except that the center
    # and size of the region are specified in world-coordinates instead of
    # pixels.
    wcs = WCS(deg=True)
    image1 = Image(data=data, wcs=wcs)
    image1.mask_region(wcs.pix2sky([2.1, 1.8]), (3600, 3600), inside=False)
    assert_array_equal(image1._mask, expected_mask)

    # And same with a rotation
    image1.unmask()
    image1.mask_region(wcs.pix2sky([2.1, 1.8]), (3600, 3600), inside=True,
                       posangle=90)
    assert_array_equal(image1._mask, ~expected_mask)

    # Mask around a region of half-width and half-height 1.1 pixels,
    # specified in arcseconds, centered close to pixel 2.4,3.8. This
    # ideally corresponds to a region of 2.2x2.2 pixels. The closest
    # possible size is 2x2 pixels. A region of 2x2 pixels has its
    # center at the shared corner of these 4 pixels, and the nearest
    # corner to the desired central index of (2.4,3.8) is (2.5,3.5).
    # So all of the image should be masked, except for a 2x2 area of
    # pixel indexes 2,3 along the Y axis and pixel indexes 3,4 along
    # the X axis.
    image1.unmask()
    image1.mask_region(wcs.pix2sky([2.4, 3.8]), 1.1 * 3600.0, inside=False)
    expected_mask = np.array([[1, 1, 1, 1, 1],
                              [1, 1, 1, 1, 1],
                              [1, 1, 1, 0, 0],
                              [1, 1, 1, 0, 0],
                              [1, 1, 1, 1, 1],
                              [1, 1, 1, 1, 1]], dtype=bool)
    assert_array_equal(image1._mask, expected_mask)

    # Mask outside an elliptical region centered at pixel 3.5,3.5.
    # The boolean expected_mask array given below was a verified
    # output of mask_ellipse() for the specified ellipse parameters.
    data = np.ones(shape=(8, 8))
    image1 = Image(data=data, wcs=wcs)
    image1.mask_ellipse([3.5, 3.5], (2.5, 3.5), 45.0, unit_radius=None,
                        unit_center=None, inside=False)
    expected_mask = np.array([
        [1, 1, 1, 1, 1, 1, 1, 1],
        [1, 1, 1, 0, 0, 0, 1, 1],
        [1, 1, 0, 0, 0, 0, 0, 1],
        [1, 0, 0, 0, 0, 0, 0, 1],
        [1, 0, 0, 0, 0, 0, 0, 1],
        [1, 0, 0, 0, 0, 0, 1, 1],
        [1, 1, 0, 0, 0, 1, 1, 1],
        [1, 1, 1, 1, 1, 1, 1, 1]],
        dtype=bool)
    assert_array_equal(image1._mask, expected_mask)

    # Use np.where to select the masked pixels and check that mask_selection()
    # then reproduces the same mask.
    ksel = np.where(image1.data.mask)
    image1.unmask()
    image1.mask_selection(ksel)
    assert_array_equal(image1._mask, expected_mask)

    # Check inside=True
    image1.unmask()
    image1.mask_ellipse([3.5, 3.5], (2.5, 3.5), 45.0, unit_radius=None,
                        unit_center=None, inside=True)
    assert_array_equal(image1._mask, ~expected_mask)
Ejemplo n.º 30
0
    def convolve(self, basename, main_beam):

        outName = string.split(basename, '.fits')[0]
        outName = outName + '_cv.fits'
        dat, baseheader = fint.openFile(basename)
        basefile = fits.open(basename)
        #baseheader = basefile[0].header
        basedata = Image(basename)

        if 'NAXIS3' in baseheader:
            del baseheader['NAXIS3']
        if 'NAXIS4' in baseheader:
            del baseheader['NAXIS4']

        aaa = basedata.data

        beam = np.array([float(baseheader['BMAJ']), float(baseheader['BMIN'])])
        print main_beam, beam
        if main_beam[0] > beam[0] and main_beam[1] > beam[1]:
            bx = np.sqrt(main_beam[0] * main_beam[0] - beam[0] * beam[0])
            by = np.sqrt(main_beam[1] * main_beam[1] - beam[1] * beam[1])
            #bx= main_beam[0]
            #by = main_beam[1]
            if 'CDELT1' in baseheader:
                pix_size = -baseheader['CDELT1']
            elif 'CD1_1' in baseheader:
                pix_size = -baseheader['CD1_1']

            beam_area = 2 * np.pi * beam[0] / 2.35482 * beam[1] / 2.35482
            main_beam_area = 2 * np.pi * main_beam[0] / 2.35482 * main_beam[
                1] / 2.35482

            number_pix_beam = beam_area / (pix_size * pix_size)

            #aaa = np.divide(aaa,number_pix_beam)
            #aaa = np.squeeze(aaa)
            #basedata.data = np.squeeze(basedata.data)
            #basedata.data=aaa
            #print basedata.shape

            newdata = Image.fftconvolve_gauss(basedata,
                                              center=None,
                                              flux=1,
                                              peak=True,
                                              factor=1,
                                              fwhm=(by, bx),
                                              unit_center=u.degree,
                                              unit_fwhm=u.degree,
                                              inplace=False)

            aaa = np.array(newdata.data)

            result = np.divide(aaa, np.power(main_beam_area / beam_area, 2))

            #result = np.multiply(aaa,main_beam_area/(pix_size*pix_size))

            baseheader['BMAJ'] = main_beam[0]
            baseheader['BMIN'] = main_beam[1]

            fits.writeto(outName, result, baseheader, overwrite=True)

        else:
            outName = basename

        return outName
Ejemplo n.º 31
0
def create_source(
    source_id,
    source_table,
    source_lines,
    origin_params,
    cube_cor_filename,
    cube_std_filename,
    mask_filename,
    skymask_filename,
    spectra_fits_filename,
    segmaps,
    version,
    source_ts,
    profile_fwhm,
    *,
    author="",
    nb_fwhm=2,
    expmap_filename=None,
    save_to=None,
):
    """Create a MPDAF source.

    This function create a MPDAF source object for the ORIGIN source.

    Parameters
    ----------
    source_id : int
        Identifier for the source in the source and line tables.
    source_table : astropy.table.Table
        Catalogue of sources like the Cat3_sources one.
    source_lines : astropy.table.Table
        Catalogue of lines like the Cat3_lines one.
    origin_params : dict
        Dictionary of the parameters for the ORIGIN run.
    cube_cor_filename : str
        Name of the file containing the correlation cube of the ORIGIN run.
    cube_std_filename : str
        Name of the file containing the std cube of the ORIGIN run.
    mask_filenam e: str
        Name of the file containing the mask of the source.
    skymask_filename : str
        Name of the file containing the sky mask of the source.
    spectra_fits_filename : str
        Name of the FITS file containing the spectra of the lines.
    segmaps : dict(str: str)
        Dictionnary associating to a segmap type the associated FITS file name.
    version : str
        Version number stored in the source.
    source_ts : str
        Time stamp for when the source was created.
    profile_fwhm : list of int
        List of line profile FWHM in pixel. The index in the list is the
        profile number.
    author : str
        Name of the author.
    nb_fwhm : float
        Factor multiplying the FWHM of the line to compute the width of the
        narrow band image.
    expmap_filename : str
        Name of the file containing the exposure map.  If not None, a cut-out
        of the exposure map will be added to the source file.
    save_to : str
        If not None, the source will be saved to the given file.

    Returns
    -------
    mpdaf.sdetect.Source or None
        If save_to is used, the function returns None.

    """
    logger = logging.getLogger(__name__)

    # [0] is to get a Row not a table.
    source_table = source_table.filled()
    source_info = source_table[source_table["ID"] == source_id][0]

    # The mask size is used for the cut-out size.
    mask = Image(mask_filename)
    mask_size = mask.shape[0]

    data_cube = Cube(origin_params["cubename"], convert_float64=False)

    origin = (
        "ORIGIN",
        origin_version,
        os.path.basename(origin_params["cubename"]),
        data_cube.primary_header.get("CUBE_V", ""),
    )
    source = Source.from_data(
        source_info["ID"], source_info["ra"], source_info["dec"], origin
    )

    # Information about the source in the headers
    source.header["SRC_V"] = version, "Source version"
    source.header["SRC_TS"] = source_ts, "Timestamp of the source creation"
    source.header["CAT3_TS"] = (
        source_table.meta["CAT3_TS"],
        "Timestamp of the catalog creation",
    )
    source.add_history("Source created with ORIGIN", author)

    source.header["OR_X"] = source_info["x"], "x position in pixels"
    source.header["OR_Y"] = source_info["y"], "y position in pixels"
    source.header["OR_SEG"] = (
        source_info["seg_label"],
        "Label in the segmentation map",
    )
    source.header["OR_V"] = origin_version, "ORIGIN version"
    source.header["OR_FLUX"] = source_info["flux"], "flux maximum in all lines"
    source.header["OR_PMAX"] = (source_info["purity"], "maximum purity in all lines")

    if not np.isnan(source_info["STD"]):
        source.header["OR_STD"] = (source_info["STD"], "STD max value in all lines")

    if not np.isnan(source_info["nsigSTD"]):
        source.header["OR_nSTD"] = (
            source_info["nsigSTD"],
            "max of STD/std(STD) in all lines",
        )

    if not np.isnan(source_info["T_GLR"]):
        source.header["OR_TGLR"] = (
            source_info["T_GLR"],
            "T_GLR max value in all lines",
        )
    if not np.isnan(source_info["nsigTGLR"]):
        source.header["OR_nTGLR"] = (
            source_info["nsigTGLR"],
            "max of T_GLR/std(T_GLR) in all lines",
        )

    # source_header_keyword: (key_in_origin_param, description)
    parameters_to_add = {
        "OR_PROF": ("profiles", "OR input, spectral profiles"),
        "OR_FSF": ("PSF", "OR input, FSF cube"),
        "OR_THL%02d": ("threshold_list", "OR input threshold per area"),
        "OR_NA": ("nbareas", "OR number of areas"),
        "preprocessing": {"OR_DCT": ("dct_order", "OR input, DCT order")},
        "areas": {
            "OR_PFAA": ("pfa", "OR input, PFA used to create the area map"),
            "OR_SIZA": ("maxsize", "OR input, maximum area size in pixels"),
            "OR_MSIZA": ("minsize", "OR input, minimum area size in pixels"),
        },
        "compute_PCA_threshold": {"OR_PFAT": ("pfa_test", "OR input, PFA test")},
        "compute_greedy_PCA": {
            "OR_FBG": ("Noise_population", "OR input: fraction of spectra estimated"),
            "OR_ITMAX": ("itermax", "OR input, maximum number of iterations"),
        },
        "compute_TGLR": {"OR_NG": ("size", "OR input, connectivity size")},
        "detection": {
            "OR_DXY": ("tol_spat", "OR input, spatial tolerance for merging (pix)"),
            "OR_DZ": ("tol_spec", "OR input, spectral tolerance for merging (pix)"),
        },
        "compute_spectra": {"OR_NXZ": ("grid_dxy", "OR input, grid Nxy")},
    }

    def add_keyword(keyword, param, description, params):
        if param == "threshold_list" and param in params:
            for idx, threshold in enumerate(params["threshold_list"]):
                source.header[keyword % idx] = (float("%0.2f" % threshold), description)
        elif param in params:
            if params[param] is None:
                source.header[keyword] = "", description
            else:
                source.header[keyword] = params[param], description
        else:
            logger.debug("Parameter %s absent of the parameter list.", param)

    for keyword, val in parameters_to_add.items():
        if isinstance(val, dict) and keyword in origin_params:
            for key, val2 in val.items():
                add_keyword(key, *val2, origin_params[keyword]["params"])
        else:
            add_keyword(keyword, *val, origin_params)

    source.header["COMP_CAT"] = (
        source_info["comp"],
        "1/0 (1=Pre-detected in STD, 0=detected in CORREL)",
    )

    if source.COMP_CAT:
        threshold_keyword, purity_keyword = "threshold_std", "purity_std"
    else:
        threshold_keyword, purity_keyword = "threshold", "purity"
    source.header["OR_TH"] = (
        float("%0.2f" % origin_params[threshold_keyword]),
        "OR input, threshold",
    )
    source.header["OR_PURI"] = (
        float("%0.2f" % origin_params[purity_keyword]),
        "OR input, purity",
    )

    # Mini-cubes
    source.add_cube(
        data_cube, "MUSE_CUBE", size=mask_size, unit_size=None, add_white=True
    )
    # Add FSF with the full cube, to have the same shape as fieldmap, then we
    # can work directly with the subcube
    has_fsf = True
    try:
        source.add_FSF(data_cube, fieldmap=origin_params["fieldmap"])
    except:
        logger.debug('No FSF information found in the cube')
        has_fsf = False
    data_cube = source.cubes["MUSE_CUBE"]

    if source.COMP_CAT:
        cube_ori = Cube(cube_std_filename, convert_float64=False)
        source.add_cube(cube_ori, "ORI_SNCUBE", size=mask_size, unit_size=None)
        cube_ori = source.cubes["ORI_SNCUBE"]
    else:
        cube_ori = Cube(cube_cor_filename, convert_float64=False)
        source.add_cube(cube_ori, "ORI_CORREL", size=mask_size, unit_size=None)
        cube_ori = source.cubes["ORI_CORREL"]

    # Table of sources around the exported sources.
    radius = mask_size / 2
    x_min, x_max = source_info["x"] - radius, source_info["x"] + radius
    y_min, y_max = source_info["y"] - radius, source_info["y"] + radius
    nearby_sources = (
        (source_table["x"] >= x_min)
        & (source_table["x"] <= x_max)
        & (source_table["y"] >= y_min)
        & (source_table["y"] <= y_max)
    )
    source.tables["ORI_CAT"] = source_table["ID", "ra", "dec"][nearby_sources]

    # Maps
    # The white map was added when adding the MUSE cube.
    source.images["ORI_MAXMAP"] = cube_ori.max(axis=0)
    # Using add_image, the image size is taken from the white map.
    source.add_image(mask, "ORI_MASK_OBJ")
    source.add_image(Image(skymask_filename), "ORI_MASK_SKY")
    for segmap_type, segmap_filename in segmaps.items():
        source.add_image(Image(segmap_filename), "ORI_SEGMAP_%s" % segmap_type)
    if expmap_filename is not None:
        source.add_image(Image(expmap_filename), "EXPMAP")

    # Full source spectra
    source.extract_spectra(
        data_cube, obj_mask="ORI_MASK_OBJ", sky_mask="ORI_MASK_SKY", skysub=True
    )
    source.extract_spectra(
        data_cube, obj_mask="ORI_MASK_OBJ", sky_mask="ORI_MASK_SKY", skysub=False
    )
    if source.COMP_CAT:
        source.spectra["ORI_CORR"] = (
            source.cubes["ORI_SNCUBE"] * source.images["ORI_MASK_OBJ"]
        ).mean(axis=(1, 2))
    else:
        source.spectra["ORI_CORR"] = (
            source.cubes["ORI_CORREL"] * source.images["ORI_MASK_OBJ"]
        ).mean(axis=(1, 2))

    # Add the FSF information to the source and use this information to compute
    # the PSF weighted spectra.
    if has_fsf:
        try:    
            fsfmodel = source.get_FSF()
            fwhm_fsf = fsfmodel.get_fwhm(data_cube.wave.coord()) 
            beta_fsf = fsfmodel.get_beta(data_cube.wave.coord()) 
            source.extract_spectra(
                data_cube,
                obj_mask="ORI_MASK_OBJ",
                sky_mask="ORI_MASK_SKY",
                skysub=True,
                psf=fwhm_fsf,
                beta=beta_fsf,
            )
            source.extract_spectra(
                data_cube,
                obj_mask="ORI_MASK_OBJ",
                sky_mask="ORI_MASK_SKY",
                skysub=False,
                psf=fwhm_fsf,
                beta=beta_fsf,
            )
        except:
            # WIP to work with the new FSF model
            has_fsf = False

    # Per line data: the line table, the spectrum of each line, the narrow band
    # map from the data and from the correlation cube.
    # Content of the line table in the source
    line_columns, line_units, line_fmt = zip(
        *[
            ("NUM_LINE", None, None),
            ("RA_LINE", u.deg, ".2f"),
            ("DEC_LINE", u.deg, ".2f"),
            ("LBDA_OBS", u.Angstrom, ".2f"),
            ("FWHM", u.Angstrom, ".2f"),
            ("FLUX", u.erg / (u.s * u.cm ** 2), ".1f"),
            ("GLR", None, ".1f"),
            ("nGLR", None, ".1f"),
            ("PROF", None, None),
            ("PURITY", None, ".2f"),
        ]
    )

    # If the line is a complementary one, the GLR column is replace by STD
    if source.COMP_CAT:
        line_columns = list(line_columns)
        line_columns[6] = "STD"
        line_columns[7] = "nSTD"

    # We put all the ORIGIN lines in an ORI_LINES tables but keep only the
    # unique lines in the LINES tables.
    source.add_table(source_lines, "ORI_LINES", select_in=None, col_dist=None)

    # Table containing the information on the narrow band images.
    nb_par_rows = []

    hdulist = fits.open(spectra_fits_filename)

    for line in source_lines[source_lines["merged_in"] == -9999]:
        num_line, lbda_ori, prof = line[["num_line", "lbda", "profile"]]
        fwhm_ori = profile_fwhm[prof] * data_cube.wave.get_step(unit=u.Angstrom)
        if source.COMP_CAT:
            glr_std = line["STD"]
            nglr_std = line["nsigSTD"]
        else:
            glr_std = line["T_GLR"]
            nglr_std = line["nsigTGLR"]

        source.add_line(
            cols=line_columns,
            values=[
                num_line,
                line["ra"],
                line["dec"],
                lbda_ori,
                fwhm_ori,
                line["flux"],
                glr_std,
                nglr_std,
                prof,
                line["purity"],
            ],
            units=line_units,
            fmt=line_fmt,
            desc=None,
        )

        if f"DATA{num_line}" in hdulist:  # RB add test
            source.spectra[f"ORI_SPEC_{num_line}"] = Spectrum(
                hdulist=hdulist,
                ext=(f"DATA{num_line}", f"STAT{num_line}"),
                convert_float64=False,
            )

        source.add_narrow_band_image_lbdaobs(
            data_cube,
            f"NB_LINE_{num_line}",
            lbda=lbda_ori,
            width=nb_fwhm * fwhm_ori,
            method="sum",
            subtract_off=True,
            margin=10.0,
            fband=3.0,
        )

        nb_par_rows.append(
            [f"NB_LINE_{num_line}", lbda_ori, nb_fwhm * fwhm_ori, 10.0, 3.0]
        )

        source.add_narrow_band_image_lbdaobs(
            cube_ori,
            f"ORI_CORR_{num_line}",
            lbda=lbda_ori,
            width=nb_fwhm * fwhm_ori,
            method="max",
            subtract_off=False,
        )

        # Compute the spectra weighted by the correlation map for the
        # current line
        tags = [f"ORI_CORR_{num_line}"]
        source.extract_spectra(
            data_cube,
            obj_mask="ORI_MASK_OBJ",
            sky_mask="ORI_MASK_SKY",
            skysub=True,
            tags_to_try=tags,
        )
        source.extract_spectra(
            data_cube,
            obj_mask="ORI_MASK_OBJ",
            sky_mask="ORI_MASK_SKY",
            skysub=False,
            tags_to_try=tags,
        )

    # set REFSPEC to the spectrum weighted by the correlation map of the
    # brightest line
    num_max = source.lines["NUM_LINE"][np.argmax(source.lines["FLUX"])]
    source.header["REFSPEC"] = f"ORI_CORR_{num_max}_SKYSUB"

    hdulist.close()

    nb_par = Table(
        names=["LINE", "LBDA", "WIDTH", "MARGIN", "FBAND"],
        dtype=["U20", float, float, float, float],
        rows=nb_par_rows,
    )
    source.add_table(nb_par, "NB_PAR", select_in=None, col_dist=None)

    if save_to is not None:
        source.write(save_to)
    else:
        return source