Exemple #1
0
def test_stats():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())

    # Create a fake bin map
    bin_indx = numpy.arange(cube.nspec / 4,
                            dtype=int).reshape(cube.spatial_shape[0] // 2,
                                               cube.spatial_shape[0] // 2)
    bin_indx = numpy.repeat(bin_indx, 2, axis=0)
    bin_indx = numpy.repeat(bin_indx, 2, axis=1)

    # Get the bin area
    bins, area = cube.binned_on_sky_area(bin_indx)

    assert numpy.array_equal(bins,
                             numpy.arange(cube.nspec / 4)), 'Bad bin list'
    assert numpy.allclose(area, 1.), 'Bad area calculation'

    methods = available_reduction_assessments()
    i = numpy.where([m['key'] == 'SNRG' for m in methods])[0]
    assert len(
        i) == 1, 'Could not find correct reduction assessment definition.'

    cen_wave = cube.central_wavelength(
        response_func=methods[i[0]]['response_func'],
        flag=cube.do_not_use_flags())
    assert numpy.isclose(cen_wave, 4638.0), 'Central wavelength changed.'

    cen_wave = cube.central_wavelength(waverange=[4000, 8000],
                                       flag=cube.do_not_use_flags(),
                                       fluxwgt=True)
    assert numpy.isclose(cen_wave, 5895.7), 'Central wavelength changed.'

    cen_wave = cube.central_wavelength(waverange=[4000, 8000],
                                       flag=cube.do_not_use_flags(),
                                       per_pixel=False)
    assert numpy.isclose(cen_wave, 6044.9), 'Central wavelength changed.'

    sig, var, snr = cube.flux_stats(
        response_func=methods[i[0]]['response_func'])
    assert sig.shape == cube.spatial_shape, 'Should be shaped as a map.'
    assert isinstance(sig, numpy.ma.MaskedArray), 'Expected masked arrays'
    assert numpy.ma.amax(snr) > 60, 'S/N changed'

    # Try it with the linear cube
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       log=False)
    _sig, _var, _snr = cube.flux_stats(
        response_func=methods[i[0]]['response_func'])
    # TODO: Not sure why these are not closer.
    assert numpy.absolute(numpy.ma.median((sig-_sig)/_sig)) < 0.01, \
            'Signal should be the same to better than 1%.'
    assert numpy.absolute(numpy.ma.median((var-_var)/_var)) < 0.03, \
            'Variance should be the same to better than 3%.'
    assert numpy.absolute(numpy.ma.median((snr-_snr)/_snr)) < 0.02, \
            'S/N should be the same to better than 2%.'
Exemple #2
0
def main(args):
    t = time.perf_counter()

    if args.drpcomplete is not None:
        # Use the DRPcomplete file
        root_dir = os.path.dirname(args.drpcomplete)
        if len(root_dir) == 0:
            root_dir = '.'
        drpver = args.drpcomplete[args.drpcomplete.find('_v') +
                                  1:args.drpcomplete.find('.fits')]
        drpc = DRPComplete(drpver=drpver,
                           directory_path=root_dir,
                           readonly=True)
        index = drpc.entry_index(args.plate, args.ifudesign)
        MaNGADataCube.write_config(args.ofile,
                                   drpc['PLATE'][index],
                                   drpc['IFUDESIGN'][index],
                                   log=True,
                                   z=drpc['VEL'][index] /
                                   astropy.constants.c.to('km/s').value,
                                   vdisp=drpc['VDISP'][index],
                                   ell=drpc['ELL'][index],
                                   pa=drpc['PA'][index],
                                   reff=drpc['REFF'][index],
                                   sres_ext=args.sres_ext,
                                   sres_fill=args.sres_fill,
                                   covar_ext=args.covar_ext,
                                   drpver=args.drpver,
                                   redux_path=args.redux_path,
                                   overwrite=args.overwrite)
        return

    # Use the DRPall file
    with fits.open(args.drpall) as hdu:
        indx = numpy.where(hdu['MANGA'].data['PLATEIFU'] == '{0}-{1}'.format(
            args.plate, args.ifudesign))[0]
        if len(indx) != 1:
            raise ValueError(
                '{0}-{1} either does not exist or has more than one match!'.
                format(args.plate, args.ifudesign))

        MaNGADataCube.write_config(
            args.ofile,
            args.plate,
            args.ifudesign,
            z=hdu[1].data['z'][indx[0]],
            ell=1 - hdu[1].data['nsa_elpetro_ba'][indx[0]],
            pa=hdu[1].data['nsa_elpetro_phi'][indx[0]],
            reff=hdu[1].data['nsa_elpetro_th50_r'][indx[0]],
            sres_ext=args.sres_ext,
            sres_fill=args.sres_fill,
            covar_ext=args.covar_ext,
            drpver=args.drpver,
            redux_path=args.redux_path,
            directory_path=args.directory_path,
            overwrite=args.overwrite)

    print('Elapsed time: {0} seconds'.format(time.perf_counter() - t))
Exemple #3
0
def test_sres_ext():
    file = remote_data_file(
        filename=MaNGADataCube.build_file_name(7815, 3702, log=True))
    hdu = fits.open(file)
    assert MaNGADataCube.spectral_resolution_extension(hdu) == 'LSFPRE', \
                'Bad spectral resolution extension selection'
    assert MaNGADataCube.spectral_resolution_extension(hdu, ext='SPECRES') == 'SPECRES', \
                'Bad spectral resolution extension selection'
    assert MaNGADataCube.spectral_resolution_extension(hdu, ext='junk') is None, \
                'Should return None for a bad extension name.'
Exemple #4
0
def get_spectrum(plt, ifu, x, y, directory_path=None):
    """
    Extract a single spectrum from a MaNGA observation.

    Args:
        plt (:obj:`int`):
            Plate number
        ifu (:obj:`int`):
            IFU identifier
        x (:obj:`int`):
            The spaxel coordinate along the RA axis.
        y (:obj:`int`):
            The spaxel coordinate along the DEC axis.
        directory_path (:obj:`str`, optional):
            Directory with the DRP LOGCUBE file. If None, uses the
            default directory path based on the environmental
            variables.

    Returns:
        :obj:`tuple`: Returns 4 numpy vectors: The wavelength, flux,
        flux inverse variance, and spectral resolution extracted from
        the datacube.
    """
    cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
    flat_indx = cube.spatial_shape[1] * x + y
    # This function always returns as masked array
    flux = cube.copy_to_masked_array(attr='flux', flag=cube.do_not_fit_flags())
    ivar = cube.copy_to_masked_array(attr='ivar', flag=cube.do_not_fit_flags())
    sres = cube.copy_to_array(attr='sres')
    return cube.wave, flux[flat_indx, :], ivar[flat_indx, :], sres[
        flat_indx, :]
Exemple #5
0
def fit_one_cube(plt,
                 ifu,
                 drpall_file=None,
                 directory_path=None,
                 analysis_path=None):
    # Grab the required input parameters
    config_file = '{0}-{1}.cfg'.format(plt, ifu)
    get_config(plt, ifu, config_file, drpall_file=drpall_file)

    # Read the datacube
    cube = MaNGADataCube.from_config(config_file,
                                     directory_path=directory_path)

    # Define how you want to analyze the data
    plan = AnalysisPlanSet([
        AnalysisPlan(
            drpqa_key='SNRG',
            bin_key='VOR10',  #'HYB10',
            continuum_key='MILESHCMPL10',
            elmom_key='EMOMMPL10',
            elfit_key='EFITMPL10',  #'EFITMPL9DB',
            spindex_key='INDXEN')
    ])

    # Run it!
    return manga_dap(cube,
                     plan,
                     verbose=2,
                     directory_path=directory_path,
                     analysis_path=analysis_path)
Exemple #6
0
def test_drpbitmask():
    # Read the data
    specfile = data_test_file('MaNGA_test_spectra.fits.gz')
    hdu = fits.open(specfile)
    drpbm = DRPFitsBitMask()
    assert numpy.sum(drpbm.flagged(hdu['MASK'].data, MaNGADataCube.do_not_fit_flags())) == 4601, \
                'Flags changed'
Exemple #7
0
def test_read_lin():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       log=False)
    assert not cube.log, 'Wavelength sampling should be linear'
    assert numpy.isclose(numpy.std(numpy.diff(cube.wave)), 0.), \
                'Wavelength sampling should be linear'
Exemple #8
0
def get_spectra(plt, ifu, x, y, directory_path=None):
    cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
    flat_indx = cube.spatial_shape[1]*x+y
    # This function always returns as masked array
    flux = cube.copy_to_masked_array(attr='flux', flag=cube.do_not_fit_flags())
    ivar = cube.copy_to_masked_array(attr='ivar', flag=cube.do_not_fit_flags())
    sres = cube.copy_to_array(attr='sres')
    return cube.wave, flux[flat_indx,:], ivar[flat_indx,:], sres[flat_indx,:]
Exemple #9
0
def test_copyto():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    flux = cube.copy_to_array()
    assert not isinstance(flux,
                          numpy.ma.MaskedArray), 'Should output normal array'
    assert flux.shape[0] == cube.nspec, 'Should be flattened into a 2D array.'
    assert flux.shape[1] == cube.nwave, 'Should be flattened into a 2D array.'

    # Apply a wavelength mask
    waverange = [5000, 7000]
    flux = cube.copy_to_array(waverange=waverange)
    indx = (cube.wave > waverange[0]) & (cube.wave < waverange[1])
    assert flux.shape[1] == numpy.sum(indx), 'Wavelength range masking failed'

    # Find the spaxels with non-zero signal
    methods = available_reduction_assessments()
    i = numpy.where([m['key'] == 'SNRG' for m in methods])[0]
    assert len(
        i) == 1, 'Could not find correct reduction assessment definition.'
    sig, var, snr = cube.flux_stats(
        response_func=methods[i[0]]['response_func'])
    indx = ((sig > 0) & numpy.invert(numpy.ma.getmaskarray(sig))).data.ravel()
    ngood = numpy.sum(indx)

    # Select the spaxels with non-zero signal
    flux = cube.copy_to_array(waverange=waverange, select_bins=indx)
    assert flux.shape[0] == ngood, 'Bin selection failed'

    # Get the masked array
    flux = cube.copy_to_masked_array()
    assert isinstance(flux,
                      numpy.ma.MaskedArray), 'Should output a masked array'
    assert flux.shape[0] == cube.nspec, 'Should be flattened into a 2D array.'
    assert flux.shape[1] == cube.nwave, 'Should be flattened into a 2D array.'

    # Select the spaxels with non-zero signal
    flux = cube.copy_to_masked_array(select_bins=indx)
    assert flux.shape[0] == ngood, 'Bin selection failed'

    # Try to get the inverse variance
    i = cube.nspec // 2 + cube.spatial_shape[1] // 2
    ivar = cube.copy_to_masked_array(attr='ivar')
    assert ivar.shape == (cube.nspec, cube.nwave), 'Bad ivar shape'
    assert numpy.array_equal(
        cube.ivar[numpy.unravel_index(i, cube.spatial_shape)],
        ivar[i].data), 'Did not pull ivar data.'

    # Try to get the spectral resolution
    sres = cube.copy_to_masked_array(attr='sres')
    assert sres.shape == (cube.nspec, cube.nwave), 'Bad sres shape'
    assert numpy.array_equal(
        cube.sres[numpy.unravel_index(i, cube.spatial_shape)],
        sres[i].data), 'Did not pull sres data.'
def test_rectification_shape():
    # Load the datacube and the row-stacked spectra
    cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
    cube.load_rss()

    # Get the recitification parameters
    pixelscale, rlim, sigma, recenter, width_buffer \
            = MaNGARSS._parse_rectification_parameters(None, None, None, None, None)
    # Get the cube dimensions
    cube.rss._cube_dimensions(pixelscale=pixelscale, recenter=recenter, width_buffer=width_buffer)
    # Make sure they match what the DRP produced
    assert cube.spatial_shape == (cube.rss.nx, cube.rss.ny), 'Mismatched cube spatial dimensions'
Exemple #11
0
def test_read():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())

    assert cube.file_name == MaNGADataCube.build_file_name(
        cube.plate, cube.ifudesign, log=cube.log), 'Name mismatch'
    assert cube.log, 'Should read the log-binned version by default.'
    assert cube.wcs is not None, 'WCS should be defined.'
    assert cube.shape[:
                      2] == cube.spatial_shape, 'Spatial shape should be first two axes.'
    assert cube.nspec == numpy.prod(
        cube.spatial_shape), 'Definition of number of spectra changed.'
    assert cube.sres is not None, 'Spectral resolution data was not constructed.'
    assert cube.sres_ext == 'LSFPRE', 'Should default to LSFPRE extension.'
    assert abs(cube.pixelscale -
               cube._get_pixelscale()) < 1e-6, 'Bad match in pixel scale.'
    # NOTE: This is worse than it should be because of how the WCS in MaNGA is defined.
    assert numpy.all(numpy.absolute(cube.wave - cube._get_wavelength_vector(cube.nwave)) < 2e-4), \
            'Bad calculation of wavelength vector.'
    assert cube.covar is None, 'Covariance should not have been read'
Exemple #12
0
def get_config(plt, ifu, config_file, drpall_file=None):
    if drpall_file is None:
        drpall_file = manga.drpall_file()

    # Use the DRPall file
    with fits.open(drpall_file) as hdu:
        indx = numpy.where(
            hdu['MANGA'].data['PLATEIFU'] == '{0}-{1}'.format(plt, ifu))[0]
        if len(indx) != 1:
            raise ValueError(
                '{0}-{1} either does not exist or has more than one match!'.
                format(plt, ifu))

        MaNGADataCube.write_config(
            config_file,
            plt,
            ifu,
            z=hdu[1].data['z'][indx[0]],
            ell=1 - hdu[1].data['nsa_elpetro_ba'][indx[0]],
            pa=hdu[1].data['nsa_elpetro_phi'][indx[0]],
            reff=hdu[1].data['nsa_elpetro_th50_r'][indx[0]],
            overwrite=True)
Exemple #13
0
def gmr_data(plt, ifu, drpver, redux_path):
    # Get the g-r map from the data cube

    drp_cube_file = os.path.join(*MaNGADataCube.default_paths(
        plt, ifu, drpver=drpver, redux_path=redux_path))
    if not os.path.isfile(drp_cube_file):
        raise FileNotFoundError('{0} does not exist!'.format(drp_cube_file))

    with fits.open(drp_cube_file) as hdu:
        return -2.5 * numpy.ma.log10(
            numpy.ma.MaskedArray(hdu['GIMG'].data,
                                 mask=numpy.invert(hdu['GIMG'].data > 0)) /
            numpy.ma.MaskedArray(hdu['RIMG'].data,
                                 mask=numpy.invert(hdu['RIMG'].data > 0)))
def test_match_resolution():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    tpl = TemplateLibrary('MILESHC',
                          cube=cube,
                          match_resolution=True,
                          velscale_ratio=4,
                          hardcopy=False)

    # Resolution should be virtually identical in unmasked regions
    indx = tpl['MASK'].data == 0
    assert numpy.std(tpl.sres(tpl['WAVE'].data[indx[0]]) - tpl['SPECRES'].data[0,indx[0]]) < 0.1, \
                'Spectral resolution difference is above tolerance.'
Exemple #15
0
def test_read_correl():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       covar_ext='GCORREL')
    assert isinstance(cube.covar, Covariance), 'Incorrect type for covariance.'
    assert cube.covar.shape == (cube.nspec,
                                cube.nspec), 'Covariance has incorrect shape.'
    assert cube.covar.is_correlation, 'Covariance object should be in a correlation mode.'

    # Check that the variances are all unity (or close to it when it's defined)
    unique_var = numpy.unique(cube.covar.var)
    assert numpy.allclose(unique_var[unique_var > 0],
                          1.), 'Bad variance values'
Exemple #16
0
def test_wcs():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    x, y = cube.mean_sky_coordinates(offset=None)
    assert x[0, 0] > x[-1, 0], 'RA should increase from large to small indices'
    assert y[0, 0] < y[0,
                       -1], 'DEC should increase from small to small indices'
    assert numpy.unravel_index(numpy.argmin( numpy.square(x - cube.prihdr['OBJRA'])
                                            + numpy.square(y - cube.prihdr['OBJDEC'])), x.shape) \
                == (21,21), 'Object should be at cube center.'
    x, y = cube.mean_sky_coordinates(center_coo=(x[0, 0], y[0, 0]))
    assert numpy.isclose(x[0, 0], 0.0) and numpy.isclose(
        y[0, 0], 0.0), 'Offset incorrect'
    x, y = cube.mean_sky_coordinates()
    assert abs(x[21, 21]) < 1e-2 and abs(y[21, 21]) < 1e-2, 'Offset incorrect'
Exemple #17
0
def test_rectification_recovery():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       covar_ext='GCORREL')
    cube.load_rss()

    hdu = fits.open(cube.file_path())
    channel = hdu['GCORREL'].header['BBINDEX']

    gcorrel = numpy.zeros(eval(hdu['GCORREL'].header['COVSHAPE']), dtype=float)
    i = numpy.ravel_multi_index(
        (hdu['GCORREL'].data['INDXI_C1'], hdu['GCORREL'].data['INDXI_C2']),
        cube.spatial_shape)

    j = numpy.ravel_multi_index(
        (hdu['GCORREL'].data['INDXJ_C1'], hdu['GCORREL'].data['INDXJ_C2']),
        cube.spatial_shape)
    gcorrel[i, j] = hdu['GCORREL'].data['RHOIJ']
    gcorrel[j, i] = hdu['GCORREL'].data['RHOIJ']

    assert numpy.allclose(cube.covar.toarray(), gcorrel), 'Bad covariance read'

    flux, C = cube.rss.rectify_wavelength_plane(channel, return_covar=True)
    assert numpy.allclose(cube.flux[..., channel],
                          flux), 'Bad flux rectification'

    ivar = numpy.ma.power(C.variance().reshape(cube.spatial_shape),
                          -1).filled(0.0)
    assert numpy.allclose(cube.ivar[..., channel],
                          ivar), 'Bad inverse variance rectification'

    C.to_correlation()
    assert numpy.allclose(C.toarray(), gcorrel), 'Bad covariance calculation'

    sres = numpy.ma.divide(cube.rss.wave[channel],
                           cube.rss.instrumental_dispersion_plane(channel).ravel()) \
                / DAPConstants.sig2fwhm

    # WARNING: The computations done by the DRP and DAP are different
    # in detail, but (at least for this test cube) the results are
    # virtually identical except for notable outliers.
    assert numpy.ma.median(cube.sres[...,channel].ravel() - sres) < 0.1, \
            'Bad spectral resolution rectification'
Exemple #18
0
def test_covariance():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())

    with pytest.raises(ValueError):
        # Have to load the RSS first
        cube.covariance_matrix(1000)

    # Load the RSS
    cube.load_rss()

    # Construct a covariance matrix
    C = cube.covariance_matrix(1000)
    assert C.shape == (1764, 1764), 'Bad covariance shape'

    # Make it a correlation matrix and check it
    C.to_correlation()

    # Check that the variances are all unity (or close to it when it's defined)
    unique_var = numpy.unique(numpy.diag(C.toarray()))
    assert numpy.allclose(unique_var[unique_var > 0],
                          1.), 'Bad correlation diagonal'

    # Try multiple channels
    C = cube.covariance_cube(channels=[1000, 2000])
    assert numpy.array_equal(C.input_indx, [1000, 2000]), 'Bad matrix indices'
    assert C.shape == (1764, 1764, 2), 'Bad covariance shape'

    # Try to convert multiple channels
    C.to_correlation()
    # And reverting it
    C.revert_correlation()

    # Try to generate an approximate correlation matrix, covariance
    # matrix, and covariance cube
    approxC = cube.approximate_correlation_matrix()
    approxC = cube.approximate_covariance_matrix(1000)
    approxC = cube.approximate_covariance_cube(channels=[1000, 2000])

    # Variance should be the same for direct and approximate calculations
    assert numpy.allclose(approxC.variance(),
                          C.variance()), 'Variances should be the same.'
Exemple #19
0
def test_match_resolution():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    tpl = TemplateLibrary('MILESHC',
                          cube=cube,
                          match_resolution=True,
                          velscale_ratio=4,
                          hardcopy=False,
                          output_path=remote_data_file())

    # Resolution should be virtually identical in unmasked regions
    indx = tpl['MASK'].data == 0
    assert numpy.std(tpl.sres(tpl['WAVE'].data[indx[0]]) - tpl['SPECRES'].data[0,indx[0]]) < 0.1, \
                'Spectral resolution difference is above tolerance.'

    # Check the file that would have been written has the expected path
    assert cube.directory_path == tpl.directory_path, 'Cube and TPL paths should match.'
    assert tpl.file_name().startswith(
        cube.output_root), 'TPL file should start with the cube root'
Exemple #20
0
def test_read_drp():
    drpfile = os.path.join(remote_data_file(),
                           MaNGADataCube.build_file_name(7815, 3702))

    assert os.path.isfile(drpfile), 'Did not find file'

    with fits.open(drpfile) as hdu:
        covar = Covariance.from_fits(hdu,
                                     ivar_ext=None,
                                     covar_ext='GCORREL',
                                     impose_triu=True,
                                     correlation=True)
        var = numpy.ma.power(
            hdu['IVAR'].data[hdu['GCORREL'].header['BBINDEX']].T.ravel(),
            -1).filled(0.0)

    covar = covar.apply_new_variance(var)
    covar.revert_correlation()

    assert numpy.array_equal(var, numpy.diag(
        covar.toarray())), 'New variance not applied'
Exemple #21
0
def resample_test():

    # Read the example datacube and get the expected redshift You can download
    # these data using
    # https://github.com/sdss/mangadap/blob/master/download_test_data.py
    plt = 7815
    ifu = 3702
    drpver = 'v3_1_1'
    directory_path = defaults.dap_source_dir() / 'data' / 'remote'
    cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
    drpall_file = directory_path / f'drpall-{drpver}.fits'
    z = get_redshift(plt, ifu, drpall_file)

    # Pull out two example spectra from the datacube
    old_wave = cube.wave
    old_flux = numpy.ma.MaskedArray(cube.flux[10, 10:12, :],
                                    mask=cube.mask[10, 10:12, :] > 0)
    old_flux[:, (old_wave > 5570) & (old_wave < 5586)] = numpy.ma.masked
    old_ferr = numpy.ma.power(cube.ivar[10, 10:12, :], -0.5)

    if spectres is not None:
        # Use spectres to resample the spectrum, ignoring last pixel
        indx = (old_wave > old_wave[0] / (1 + z)) & (old_wave < old_wave[-2] /
                                                     (1 + z))
        t = time.perf_counter()
        new_flux_spectres = numpy.empty((old_flux.shape[0], numpy.sum(indx)),
                                        dtype=float)
        new_ferr_spectres = numpy.empty((old_flux.shape[0], numpy.sum(indx)),
                                        dtype=float)
        for i in range(old_flux.shape[0]):
            new_flux_spectres[i,:], new_ferr_spectres[i,:] \
                    = spectres.spectres(old_wave[indx], old_wave/(1+z), old_flux[i,:].filled(0.0),
                                        spec_errs=old_ferr[i,:].filled(0.0))
        print('SpectRes Time: ', time.perf_counter() - t)

    # Use a brute-force integration of the spectra to resample the spectrum
    t = time.perf_counter()
    borders = grid_borders(numpy.array([old_wave[0], old_wave[-1]]),
                           old_wave.size,
                           log=True)[0]
    _p = numpy.repeat(borders, 2)[1:-1].reshape(-1, 2)
    new_flux_brute = numpy.array([
        passband_integral(old_wave / (1 + z), f, passband=_p, log=True)
        for f in old_flux.filled(0.0)
    ])
    new_flux_brute /= (_p[:, 1] - _p[:, 0])[None, :]
    print('Brute Force Time: ', time.perf_counter() - t)

    # Use the Resample class to resample the spectrum
    t = time.perf_counter()
    r = Resample(old_flux,
                 e=old_ferr,
                 x=old_wave / (1 + z),
                 newRange=[old_wave[0], old_wave[-1]],
                 inLog=True,
                 newLog=True)
    print('Resample Time: ', time.perf_counter() - t)

    # Estimate the differences between the resampling methods (these should all
    # be the same to nearly numerical accuracy)
    print('Mean diff:')
    if spectres is not None:
        print('    spectres - brute    = {0:.5e}'.format(
            numpy.mean(
                numpy.absolute(new_flux_spectres - new_flux_brute[:, indx]))))
        print('    spectres - resample = {0:.5e}'.format(
            numpy.mean(numpy.absolute(new_flux_spectres - r.outy[:, indx]))))
    print('    brute - resample    = {0:.5e}'.format(
        numpy.mean(numpy.absolute(new_flux_brute - r.outy))))

    # Plot the original and resampled versions for all spectra.  The resampled
    # versions should all be indistinguishable.
    for i in range(old_flux.shape[0]):
        pyplot.plot(old_wave / (1 + z), old_flux[i, :], label='Data')
        if spectres is not None:
            pyplot.plot(old_wave[indx],
                        new_flux_spectres[i, :],
                        label='spectres')
        pyplot.plot(old_wave, new_flux_brute[i, :], label='brute')
        pyplot.plot(r.outx, r.outy[i, :], label='Resample')
        pyplot.plot(r.outx, r.outf[i, :], label='Good-pixel Mask')
        pyplot.legend()
        pyplot.xlabel('Wavelength')
        pyplot.ylabel('Flux')
        pyplot.show()
Exemple #22
0
def test_load_rss():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    cube.load_rss()
Exemple #23
0
def test_from_config():
    cube = MaNGADataCube.from_config(data_test_file('datacube.ini'))
    assert cube.meta['z'] == 0.0293823, 'Bad config file read'
    assert cube.meta['ell'] == 0.110844, 'Bad config file read'
Exemple #24
0
def test_moments():

    # Read the data
    specfile = data_test_file('MaNGA_test_spectra.fits.gz')
    hdu = fits.open(specfile)
    drpbm = DRPFitsBitMask()
    flux = numpy.ma.MaskedArray(hdu['FLUX'].data,
                                mask=drpbm.flagged(
                                    hdu['MASK'].data,
                                    MaNGADataCube.do_not_fit_flags()))
    ferr = numpy.ma.power(hdu['IVAR'].data, -0.5)
    flux[ferr.mask] = numpy.ma.masked
    ferr[flux.mask] = numpy.ma.masked
    nspec = flux.shape[0]

    # Read the database that define the emission lines and passbands
    momdb = EmissionMomentsDB.from_key('ELBMILES')

    # Measure the moments
    elmombm = EmissionLineMomentsBitMask()
    elmom = EmissionLineMoments.measure_moments(momdb,
                                                hdu['WAVE'].data,
                                                flux,
                                                redshift=hdu['Z'].data,
                                                bitmask=elmombm)

    # Measure the EW based on the moments
    include_band = numpy.array([numpy.invert(momdb.dummy)]*nspec) \
                        & numpy.invert(elmombm.flagged(elmom['MASK'],
                                                       flag=['BLUE_EMPTY', 'RED_EMPTY']))
    line_center = (1.0 + hdu['Z'].data)[:, None] * momdb['restwave'][None, :]
    elmom['BMED'], elmom['RMED'], pos, elmom['EWCONT'], elmom['EW'], elmom['EWERR'] \
            = emission_line_equivalent_width(hdu['WAVE'].data, flux, momdb['blueside'],
                                             momdb['redside'], line_center, elmom['FLUX'],
                                             redshift=hdu['Z'].data,
                                             line_flux_err=elmom['FLUXERR'],
                                             include_band=include_band)

    # Check the flags
    reference = {
        'BLUE_INCOMP': 21,
        'MAIN_JUMP': 0,
        'UNDEFINED_MOM2': 46,
        'JUMP_BTWN_SIDEBANDS': 0,
        'RED_JUMP': 0,
        'DIVBYZERO': 0,
        'NO_ABSORPTION_CORRECTION': 176,
        'RED_EMPTY': 21,
        'UNDEFINED_BANDS': 8,
        'DIDNOTUSE': 0,
        'UNDEFINED_MOM1': 0,
        'FORESTAR': 0,
        'NON_POSITIVE_CONTINUUM': 0,
        'LOW_SNR': 0,
        'MAIN_EMPTY': 21,
        'BLUE_JUMP': 0,
        'RED_INCOMP': 21,
        'MAIN_INCOMP': 21,
        'BLUE_EMPTY': 21
    }
    assert numpy.all([
        reference[k] == numpy.sum(elmombm.flagged(elmom['MASK'], flag=k))
        for k in elmombm.keys()
    ]), 'Number of flagged measurements changed'

    # Check that the values are finite
    assert numpy.all([ numpy.all(numpy.isfinite(elmom[n])) for n in elmom.dtype.names]), \
                        'Found non-finite values in output'

    # Check the band definitions
    assert numpy.all(numpy.equal(elmom['REDSHIFT'],
                                 hdu['Z'].data)), 'Redshift changed'
    assert numpy.all(numpy.isclose(numpy.mean(momdb['blueside'], axis=1)[None,:],
                                   elmom['BCEN']/(1+hdu['Z'].data[:,None]))
                        | elmombm.flagged(elmom['MASK'], flag='UNDEFINED_BANDS')), \
                'Blue passband center incorrect'
    assert numpy.all(numpy.isclose(numpy.mean(momdb['redside'], axis=1)[None,:],
                                   elmom['RCEN']/(1+hdu['Z'].data[:,None]))
                        | elmombm.flagged(elmom['MASK'], flag='UNDEFINED_BANDS')), \
                'Red passband center incorrect'

    # Check the values
    assert numpy.allclose(elmom['FLUX'][0],
                          numpy.array([
                              -0.83366296, 0., -0.7368989, -6.84760392,
                              -5.8392653, -3.84394899, -9.63158548,
                              -10.1459227, -1.86639944, 0.19851703, 0.04831539,
                              -5.58001859, 0.86652478, -1.3277138, 4.48556862,
                              0.12541773, -1.37675776, 1.14456948, -1.41808526,
                              2.48743805, -0.31254732, 0.04046428
                          ]),
                          rtol=0.0,
                          atol=1e-2), 'Fluxes changed'
    assert numpy.allclose(
        elmom['MOM1'][0],
        numpy.array([
            15403.91870501, 0., 13866.58355013, 14816.45834376, 14861.90408263,
            14545.21106265, 14929.76054479, 14774.62443577, 14943.56586856,
            13010.07824437, 15933.25294444, 14918.25984067, 14425.53398781,
            15207.53998774, 14803.71786274, 14160.66542001, 14720.66321017,
            14706.89675211, 14880.91017052, 14901.49219165, 14880.79548007,
            15615.43369812
        ]),
        rtol=0.0,
        atol=1e-1), '1st moments changed'
    assert numpy.allclose(elmom['MOM2'][0],
                          numpy.array([
                              0., 0., 0., 439.76305578, 479.32501708,
                              325.96571646, 348.71402151, 362.29430475,
                              128.76827924, 0., 0., 322.61461489, 268.26542796,
                              27.14271982, 259.24977286, 0., 181.94055378,
                              129.62366078, 147.48288905, 225.76488299,
                              132.57819153, 0.
                          ]),
                          rtol=0.0,
                          atol=1e-1), '2nd moments changed'
    assert numpy.allclose(elmom['EW'][0],
                          numpy.array([
                              -0.83148156, 0., -0.67854382, -6.65583709,
                              -4.99844209, -3.06783667, -6.6506484,
                              -6.86724193, -0.99166185, 0.08843696, 0.01728948,
                              -1.81199184, 0.28592615, -0.46054113, 1.48650809,
                              0.03822714, -0.40850899, 0.33980593, -0.42043643,
                              0.73608197, -0.09406925, 0.01217937
                          ]),
                          rtol=0.0,
                          atol=1e-2), 'EW changed'
Exemple #25
0
def test_moments_with_continuum():
    # Read the data
    specfile = data_test_file('MaNGA_test_spectra.fits.gz')
    hdu = fits.open(specfile)
    drpbm = DRPFitsBitMask()
    flux = numpy.ma.MaskedArray(hdu['FLUX'].data,
                                mask=drpbm.flagged(
                                    hdu['MASK'].data,
                                    MaNGADataCube.do_not_fit_flags()))
    ferr = numpy.ma.power(hdu['IVAR'].data, -0.5)
    flux[ferr.mask] = numpy.ma.masked
    ferr[flux.mask] = numpy.ma.masked
    nspec = flux.shape[0]

    # Instantiate the template libary
    velscale_ratio = 4
    tpl = TemplateLibrary('MILESHC',
                          match_resolution=False,
                          velscale_ratio=velscale_ratio,
                          spectral_step=1e-4,
                          log=True,
                          hardcopy=False)
    tpl_sres = numpy.mean(tpl['SPECRES'].data, axis=0)

    # Get the pixel mask
    pixelmask = SpectralPixelMask(artdb=ArtifactDB.from_key('BADSKY'),
                                  emldb=EmissionLineDB.from_key('ELPSCMSK'))

    # Instantiate the fitting class
    ppxf = PPXFFit(StellarContinuumModelBitMask())

    # Perform the fit
    fit_wave, fit_flux, fit_mask, fit_par \
        = ppxf.fit(tpl['WAVE'].data.copy(), tpl['FLUX'].data.copy(), hdu['WAVE'].data, flux, ferr,
                   hdu['Z'].data, numpy.full(nspec, 100.), iteration_mode='no_global_wrej',
                   reject_boxcar=100, ensemble=False, velscale_ratio=velscale_ratio,
                   mask=pixelmask, matched_resolution=False, tpl_sres=tpl_sres,
                   obj_sres=hdu['SRES'].data, degree=8, moments=2)

    # Remask the continuum fit
    sc_continuum = StellarContinuumModel.reset_continuum_mask_window(
        numpy.ma.MaskedArray(fit_flux, mask=fit_mask > 0))

    # Read the database that define the emission lines and passbands
    momdb = EmissionMomentsDB.from_key('ELBMILES')

    # Measure the moments
    elmombm = EmissionLineMomentsBitMask()
    elmom = EmissionLineMoments.measure_moments(momdb,
                                                hdu['WAVE'].data,
                                                flux,
                                                continuum=sc_continuum,
                                                redshift=hdu['Z'].data,
                                                bitmask=elmombm)

    # Measure the EW based on the moments
    include_band = numpy.array([numpy.invert(momdb.dummy)]*nspec) \
                        & numpy.invert(elmombm.flagged(elmom['MASK'],
                                                       flag=['BLUE_EMPTY', 'RED_EMPTY']))
    line_center = (1.0 + hdu['Z'].data)[:, None] * momdb['restwave'][None, :]
    elmom['BMED'], elmom['RMED'], pos, elmom['EWCONT'], elmom['EW'], elmom['EWERR'] \
            = emission_line_equivalent_width(hdu['WAVE'].data, flux, momdb['blueside'],
                                             momdb['redside'], line_center, elmom['FLUX'],
                                             redshift=hdu['Z'].data,
                                             line_flux_err=elmom['FLUXERR'],
                                             include_band=include_band)

    # Check the flags
    reference = {
        'BLUE_INCOMP': 21,
        'MAIN_JUMP': 0,
        'UNDEFINED_MOM2': 42,
        'JUMP_BTWN_SIDEBANDS': 0,
        'RED_JUMP': 0,
        'DIVBYZERO': 0,
        'NO_ABSORPTION_CORRECTION': 0,
        'RED_EMPTY': 21,
        'UNDEFINED_BANDS': 8,
        'DIDNOTUSE': 0,
        'UNDEFINED_MOM1': 0,
        'FORESTAR': 0,
        'NON_POSITIVE_CONTINUUM': 0,
        'LOW_SNR': 0,
        'MAIN_EMPTY': 21,
        'BLUE_JUMP': 0,
        'RED_INCOMP': 21,
        'MAIN_INCOMP': 21,
        'BLUE_EMPTY': 21
    }
    assert numpy.all([
        reference[k] == numpy.sum(elmombm.flagged(elmom['MASK'], flag=k))
        for k in elmombm.keys()
    ]), 'Number of flagged measurements changed'

    # Check that the values are finite
    assert numpy.all([ numpy.all(numpy.isfinite(elmom[n])) for n in elmom.dtype.names]), \
                        'Found non-finite values in output'

    # Check the band definitions
    assert numpy.all(numpy.equal(elmom['REDSHIFT'],
                                 hdu['Z'].data)), 'Redshift changed'
    assert numpy.all(numpy.isclose(numpy.mean(momdb['blueside'], axis=1)[None,:],
                                   elmom['BCEN']/(1+hdu['Z'].data[:,None]))
                        | elmombm.flagged(elmom['MASK'], flag='UNDEFINED_BANDS')), \
                'Blue passband center incorrect'
    assert numpy.all(numpy.isclose(numpy.mean(momdb['redside'], axis=1)[None,:],
                                   elmom['RCEN']/(1+hdu['Z'].data[:,None]))
                        | elmombm.flagged(elmom['MASK'], flag='UNDEFINED_BANDS')), \
                'Red passband center incorrect'

    # Check the values
    assert numpy.all(
        numpy.absolute(elmom['FLUX'][0] - numpy.array([
            0.63, 0.00, 0.22, -1.32, -0.88, -0.68, -0.44, -0.13, -1.14, -0.07,
            -0.11, 0.01, 0.38, 0.73, 0.71, 0.44, 0.08, 0.74, 1.30, 2.34, 0.55,
            0.44
        ])) < 0.01), 'Fluxes too different'

    assert numpy.all(numpy.absolute(elmom['MOM1'][0] -
                        numpy.array([ 14682.6,      0.0, 14843.2, 14865.8, 14890.4, 14404.7,
                                      14208.6,  12376.0, 14662.5, 14148.5, 15804.1, 17948.4,
                                      14874.5,  14774.9, 14840.5, 14746.0, 15093.1, 14857.8,
                                      14839.0,  14840.2, 14876.0, 14859.5])) < 0.1), \
                    '1st moments too different'

    assert numpy.all(numpy.absolute(elmom['MOM2'][0] -
                        numpy.array([322.2,   0.0, 591.4, 436.4, 474.6,   0.0,   0.0,   0.0,
                                     364.6,   0.0,   0.0,   0.0, 289.1, 226.9, 282.6, 283.8,
                                     227.0, 207.7, 207.7, 253.6, 197.0, 212.4])) < 0.1), \
                    '2nd moments too different'

    assert numpy.all(numpy.absolute(elmom['EW'][0] -
                        numpy.array([ 0.63,  0.00,  0.20, -1.28, -0.76, -0.54, -0.30, -0.09,
                                     -0.61, -0.03, -0.04,  0.00,  0.13,  0.25,  0.24,  0.13,
                                      0.02,  0.22,  0.38,  0.69,  0.17,  0.13])) < 0.01), \
                    'EW too different'
Exemple #26
0
def test_ppxffit():
    # Read the data
    specfile = data_test_file('MaNGA_test_spectra.fits.gz')
    hdu = fits.open(specfile)
    drpbm = DRPFitsBitMask()
    flux = numpy.ma.MaskedArray(hdu['FLUX'].data,
                                mask=drpbm.flagged(
                                    hdu['MASK'].data,
                                    MaNGADataCube.do_not_fit_flags()))
    ferr = numpy.ma.power(hdu['IVAR'].data, -0.5)
    flux[ferr.mask] = numpy.ma.masked
    ferr[flux.mask] = numpy.ma.masked
    nspec = flux.shape[0]

    # Instantiate the template libary
    velscale_ratio = 4
    tpl = TemplateLibrary('MILESHC',
                          match_resolution=False,
                          velscale_ratio=velscale_ratio,
                          spectral_step=1e-4,
                          log=True,
                          hardcopy=False)
    tpl_sres = numpy.mean(tpl['SPECRES'].data, axis=0)

    # Get the pixel mask
    pixelmask = SpectralPixelMask(artdb=ArtifactDB.from_key('BADSKY'),
                                  emldb=EmissionLineDB.from_key('ELPSCMSK'))

    # Instantiate the fitting class
    ppxf = PPXFFit(StellarContinuumModelBitMask())

    # Perform the fit
    fit_wave, fit_flux, fit_mask, fit_par \
        = ppxf.fit(tpl['WAVE'].data.copy(), tpl['FLUX'].data.copy(), hdu['WAVE'].data, flux, ferr,
                   hdu['Z'].data, numpy.full(nspec, 100.), iteration_mode='no_global_wrej',
                   reject_boxcar=100, ensemble=False, velscale_ratio=velscale_ratio,
                   mask=pixelmask, matched_resolution=False, tpl_sres=tpl_sres,
                   obj_sres=hdu['SRES'].data, degree=8, moments=2)

    # Test the results

    # Rejected pixels
    assert numpy.sum(ppxf.bitmask.flagged(fit_mask, flag='PPXF_REJECT')) == 119, \
                'Different number of rejected pixels'

    # Unable to fit
    assert numpy.array_equal(ppxf.bitmask.flagged_bits(fit_par['MASK'][5]), ['NO_FIT']), \
                'Expected NO_FIT in 6th spectrum'

    # Number of used templates
    assert numpy.array_equal(numpy.sum(numpy.absolute(fit_par['TPLWGT']) > 1e-10, axis=1),
                             [12, 13, 17, 15, 15,  0,  8, 12]), \
                'Different number of templates with non-zero weights'

    # Number of additive coefficients
    assert fit_par['ADDCOEF'].shape[
        1] == 9, 'Incorrect number of additive coefficients'

    # No multiplicative coefficients
    assert numpy.all(fit_par['MULTCOEF'] == 0), \
                'No multiplicative coefficients should exist'

    # Kinematics and errors
    assert numpy.all(numpy.absolute(fit_par['KIN'] -
                        numpy.array([[ 14880.7, 292.9], [ 15053.4, 123.2],
                                     [ 14787.5, 236.4], [  8291.8, 169.7],
                                     [  9261.4, 202.7], [     0.0,   0.0],
                                     [  5123.5,  63.8], [  5455.6,  51.8]])) < 0.1), \
                'Kinematics are too different'

    assert numpy.all(numpy.absolute(fit_par['KINERR'] -
                        numpy.array([[2.0,1.9], [1.5,1.7], [ 2.4, 2.4], [2.2,2.3],
                                     [1.1,1.1], [0.0,0.0], [26.1,30.8], [4.7,7.5]])) < 0.1), \
                'Kinematic errors are too different'

    # Velocity dispersion corrections
    assert numpy.all(numpy.absolute(fit_par['SIGMACORR_SRES'] -
                        numpy.array([23.5, 10.1, 27.3, 38.7, 22.3,  0.0, 63.8, 23.8])) < 0.1), \
                'SRES corrections are too different'

    assert numpy.all(numpy.absolute(fit_par['SIGMACORR_EMP'] -
                        numpy.array([22.6,  0.0, 26.0, 38.2, 18.0,  0.0, 70.1,  0.0])) < 0.1), \
                'EMP corrections are too different'

    # Figures of merit
    assert numpy.all(numpy.absolute(fit_par['RCHI2'] -
                        numpy.array([ 1.94, 1.18, 1.40, 1.53, 2.50, 0.00, 1.06, 0.86])) < 0.01), \
                'Reduced chi-square too different'

    assert numpy.all(
        numpy.absolute(fit_par['RMS'] - numpy.array(
            [0.033, 0.019, 0.034, 0.023, 0.046, 0.000, 0.015, 0.015])) < 0.001
    ), 'RMS too different'

    assert numpy.all(
        numpy.absolute(fit_par['FRMS'] - numpy.array(
            [0.018, 0.023, 0.023, 0.032, 0.018, 0.000, 33.577, 0.148])) < 0.001
    ), 'Fractional RMS too different'

    assert numpy.all(
        numpy.absolute(fit_par['RMSGRW'][:, 2] - numpy.array(
            [0.067, 0.037, 0.068, 0.046, 0.093, 0.000, 0.029, 0.027])) < 0.001
    ), 'Median absolute residual too different'
Exemple #27
0
def test_sasuke():
    # Read the data
    specfile = data_test_file('MaNGA_test_spectra.fits.gz')
    hdu = fits.open(specfile)
    drpbm = DRPFitsBitMask()
    flux = numpy.ma.MaskedArray(hdu['FLUX'].data,
                                mask=drpbm.flagged(
                                    hdu['MASK'].data,
                                    MaNGADataCube.do_not_fit_flags()))
    ferr = numpy.ma.power(hdu['IVAR'].data, -0.5)
    flux[ferr.mask] = numpy.ma.masked
    ferr[flux.mask] = numpy.ma.masked
    nspec = flux.shape[0]

    # Instantiate the template libary
    velscale_ratio = 4
    tpl = TemplateLibrary('MILESHC',
                          match_resolution=False,
                          velscale_ratio=velscale_ratio,
                          spectral_step=1e-4,
                          log=True,
                          hardcopy=False)
    tpl_sres = numpy.mean(tpl['SPECRES'].data, axis=0)

    # Get the pixel mask
    pixelmask = SpectralPixelMask(artdb=ArtifactDB.from_key('BADSKY'),
                                  emldb=EmissionLineDB.from_key('ELPSCMSK'))

    # Instantiate the fitting class
    ppxf = PPXFFit(StellarContinuumModelBitMask())

    # Perform the fit
    sc_wave, sc_flux, sc_mask, sc_par \
        = ppxf.fit(tpl['WAVE'].data.copy(), tpl['FLUX'].data.copy(), hdu['WAVE'].data, flux, ferr,
                   hdu['Z'].data, numpy.full(nspec, 100.), iteration_mode='no_global_wrej',
                   reject_boxcar=100, ensemble=False, velscale_ratio=velscale_ratio,
                   mask=pixelmask, matched_resolution=False, tpl_sres=tpl_sres,
                   obj_sres=hdu['SRES'].data, degree=8, moments=2)

    # Mask the 5577 sky line
    pixelmask = SpectralPixelMask(artdb=ArtifactDB.from_key('BADSKY'))

    # Read the emission line fitting database
    emldb = EmissionLineDB.from_key('ELPMILES')
    assert emldb['name'][
        18] == 'Ha', 'Emission-line database names or ordering changed'

    # Instantiate the fitting class
    emlfit = Sasuke(EmissionLineModelBitMask())

    # Perform the fit
    el_wave, model, el_flux, el_mask, el_fit, el_par \
            = emlfit.fit(emldb, hdu['WAVE'].data, flux, obj_ferr=ferr, obj_mask=pixelmask,
                         obj_sres=hdu['SRES'].data, guess_redshift=hdu['Z'].data,
                         guess_dispersion=numpy.full(nspec, 100.), reject_boxcar=101,
                         stpl_wave=tpl['WAVE'].data, stpl_flux=tpl['FLUX'].data,
                         stpl_sres=tpl_sres, stellar_kinematics=sc_par['KIN'],
                         etpl_sinst_mode='offset', etpl_sinst_min=10.,
                         velscale_ratio=velscale_ratio, matched_resolution=False)

    # Rejected pixels
    assert numpy.sum(emlfit.bitmask.flagged(el_mask, flag='PPXF_REJECT')) == 266, \
                'Different number of rejected pixels'

    # Unable to fit
    assert numpy.array_equal(emlfit.bitmask.flagged_bits(el_fit['MASK'][5]), ['NO_FIT']), \
                'Expected NO_FIT in 6th spectrum'

    # No *attempted* fits should fail
    assert numpy.sum(emlfit.bitmask.flagged(el_fit['MASK'], flag='FIT_FAILED')) == 0, \
                'Fits should not fail'

    # Number of used templates
    assert numpy.array_equal(numpy.sum(numpy.absolute(el_fit['TPLWGT']) > 1e-10, axis=1),
                             [25, 22, 34, 32, 27,  0, 16, 22]), \
                'Different number of templates with non-zero weights'

    # No additive coefficients
    assert numpy.all(el_fit['ADDCOEF'] == 0), \
                'No additive coefficients should exist'

    # No multiplicative coefficients
    assert numpy.all(el_fit['MULTCOEF'] == 0), \
                'No multiplicative coefficients should exist'

    # Fit statistics
    assert numpy.all(
        numpy.absolute(
            el_fit['RCHI2'] -
            numpy.array([2.34, 1.22, 1.58, 1.88, 3.20, 0., 1.05, 0.88])) < 0.02
    ), 'Reduced chi-square are too different'

    assert numpy.all(
        numpy.absolute(el_fit['RMS'] - numpy.array(
            [0.036, 0.019, 0.036, 0.024, 0.051, 0.000, 0.012, 0.012])) < 0.001
    ), 'RMS too different'

    assert numpy.all(numpy.absolute(el_fit['FRMS'] -
                                    numpy.array([0.021, 0.025, 0.025, 0.033, 0.018, 0.000,
                                                 1.052, 0.101])) < 0.001), \
            'Fractional RMS too different'

    assert numpy.all(numpy.absolute(el_fit['RMSGRW'][:,2] -
                                    numpy.array([0.070, 0.038, 0.071, 0.047, 0.101, 0.000, 0.026,
                                                 0.024])) < 0.001), \
            'Median absolute residual too different'

    # All lines should have the same velocity
    assert numpy.all(numpy.all(el_par['KIN'][:,:,0] == el_par['KIN'][:,None,0,0], axis=1)), \
                'All velocities should be the same'

    # Test velocity values
    # TODO: Need some better examples!
    assert numpy.all(numpy.absolute(el_par['KIN'][:,0,0] -
                                    numpy.array([14704.9, 14869.3, 14767.1, 8161.9, 9258.7, 0.0,
                                                  5130.9,  5430.3])) < 0.1), \
                'Velocities are too different'

    # H-alpha dispersions
    assert numpy.all(numpy.absolute(el_par['KIN'][:,18,1] -
                                    numpy.array([1000.5, 1000.5, 224.7, 124.9, 171.2, 0.0, 81.2,
                                                   50.0])) < 1e-1), \
            'H-alpha dispersions are too different'