예제 #1
0
def test_stats():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())

    # Create a fake bin map
    bin_indx = numpy.arange(cube.nspec / 4,
                            dtype=int).reshape(cube.spatial_shape[0] // 2,
                                               cube.spatial_shape[0] // 2)
    bin_indx = numpy.repeat(bin_indx, 2, axis=0)
    bin_indx = numpy.repeat(bin_indx, 2, axis=1)

    # Get the bin area
    bins, area = cube.binned_on_sky_area(bin_indx)

    assert numpy.array_equal(bins,
                             numpy.arange(cube.nspec / 4)), 'Bad bin list'
    assert numpy.allclose(area, 1.), 'Bad area calculation'

    methods = available_reduction_assessments()
    i = numpy.where([m['key'] == 'SNRG' for m in methods])[0]
    assert len(
        i) == 1, 'Could not find correct reduction assessment definition.'

    cen_wave = cube.central_wavelength(
        response_func=methods[i[0]]['response_func'],
        flag=cube.do_not_use_flags())
    assert numpy.isclose(cen_wave, 4638.0), 'Central wavelength changed.'

    cen_wave = cube.central_wavelength(waverange=[4000, 8000],
                                       flag=cube.do_not_use_flags(),
                                       fluxwgt=True)
    assert numpy.isclose(cen_wave, 5895.7), 'Central wavelength changed.'

    cen_wave = cube.central_wavelength(waverange=[4000, 8000],
                                       flag=cube.do_not_use_flags(),
                                       per_pixel=False)
    assert numpy.isclose(cen_wave, 6044.9), 'Central wavelength changed.'

    sig, var, snr = cube.flux_stats(
        response_func=methods[i[0]]['response_func'])
    assert sig.shape == cube.spatial_shape, 'Should be shaped as a map.'
    assert isinstance(sig, numpy.ma.MaskedArray), 'Expected masked arrays'
    assert numpy.ma.amax(snr) > 60, 'S/N changed'

    # Try it with the linear cube
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       log=False)
    _sig, _var, _snr = cube.flux_stats(
        response_func=methods[i[0]]['response_func'])
    # TODO: Not sure why these are not closer.
    assert numpy.absolute(numpy.ma.median((sig-_sig)/_sig)) < 0.01, \
            'Signal should be the same to better than 1%.'
    assert numpy.absolute(numpy.ma.median((var-_var)/_var)) < 0.03, \
            'Variance should be the same to better than 3%.'
    assert numpy.absolute(numpy.ma.median((snr-_snr)/_snr)) < 0.02, \
            'S/N should be the same to better than 2%.'
예제 #2
0
def get_spectrum(plt, ifu, x, y, directory_path=None):
    """
    Extract a single spectrum from a MaNGA observation.

    Args:
        plt (:obj:`int`):
            Plate number
        ifu (:obj:`int`):
            IFU identifier
        x (:obj:`int`):
            The spaxel coordinate along the RA axis.
        y (:obj:`int`):
            The spaxel coordinate along the DEC axis.
        directory_path (:obj:`str`, optional):
            Directory with the DRP LOGCUBE file. If None, uses the
            default directory path based on the environmental
            variables.

    Returns:
        :obj:`tuple`: Returns 4 numpy vectors: The wavelength, flux,
        flux inverse variance, and spectral resolution extracted from
        the datacube.
    """
    cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
    flat_indx = cube.spatial_shape[1] * x + y
    # This function always returns as masked array
    flux = cube.copy_to_masked_array(attr='flux', flag=cube.do_not_fit_flags())
    ivar = cube.copy_to_masked_array(attr='ivar', flag=cube.do_not_fit_flags())
    sres = cube.copy_to_array(attr='sres')
    return cube.wave, flux[flat_indx, :], ivar[flat_indx, :], sres[
        flat_indx, :]
예제 #3
0
def test_read_lin():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       log=False)
    assert not cube.log, 'Wavelength sampling should be linear'
    assert numpy.isclose(numpy.std(numpy.diff(cube.wave)), 0.), \
                'Wavelength sampling should be linear'
예제 #4
0
def get_spectra(plt, ifu, x, y, directory_path=None):
    cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
    flat_indx = cube.spatial_shape[1]*x+y
    # This function always returns as masked array
    flux = cube.copy_to_masked_array(attr='flux', flag=cube.do_not_fit_flags())
    ivar = cube.copy_to_masked_array(attr='ivar', flag=cube.do_not_fit_flags())
    sres = cube.copy_to_array(attr='sres')
    return cube.wave, flux[flat_indx,:], ivar[flat_indx,:], sres[flat_indx,:]
예제 #5
0
def test_copyto():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    flux = cube.copy_to_array()
    assert not isinstance(flux,
                          numpy.ma.MaskedArray), 'Should output normal array'
    assert flux.shape[0] == cube.nspec, 'Should be flattened into a 2D array.'
    assert flux.shape[1] == cube.nwave, 'Should be flattened into a 2D array.'

    # Apply a wavelength mask
    waverange = [5000, 7000]
    flux = cube.copy_to_array(waverange=waverange)
    indx = (cube.wave > waverange[0]) & (cube.wave < waverange[1])
    assert flux.shape[1] == numpy.sum(indx), 'Wavelength range masking failed'

    # Find the spaxels with non-zero signal
    methods = available_reduction_assessments()
    i = numpy.where([m['key'] == 'SNRG' for m in methods])[0]
    assert len(
        i) == 1, 'Could not find correct reduction assessment definition.'
    sig, var, snr = cube.flux_stats(
        response_func=methods[i[0]]['response_func'])
    indx = ((sig > 0) & numpy.invert(numpy.ma.getmaskarray(sig))).data.ravel()
    ngood = numpy.sum(indx)

    # Select the spaxels with non-zero signal
    flux = cube.copy_to_array(waverange=waverange, select_bins=indx)
    assert flux.shape[0] == ngood, 'Bin selection failed'

    # Get the masked array
    flux = cube.copy_to_masked_array()
    assert isinstance(flux,
                      numpy.ma.MaskedArray), 'Should output a masked array'
    assert flux.shape[0] == cube.nspec, 'Should be flattened into a 2D array.'
    assert flux.shape[1] == cube.nwave, 'Should be flattened into a 2D array.'

    # Select the spaxels with non-zero signal
    flux = cube.copy_to_masked_array(select_bins=indx)
    assert flux.shape[0] == ngood, 'Bin selection failed'

    # Try to get the inverse variance
    i = cube.nspec // 2 + cube.spatial_shape[1] // 2
    ivar = cube.copy_to_masked_array(attr='ivar')
    assert ivar.shape == (cube.nspec, cube.nwave), 'Bad ivar shape'
    assert numpy.array_equal(
        cube.ivar[numpy.unravel_index(i, cube.spatial_shape)],
        ivar[i].data), 'Did not pull ivar data.'

    # Try to get the spectral resolution
    sres = cube.copy_to_masked_array(attr='sres')
    assert sres.shape == (cube.nspec, cube.nwave), 'Bad sres shape'
    assert numpy.array_equal(
        cube.sres[numpy.unravel_index(i, cube.spatial_shape)],
        sres[i].data), 'Did not pull sres data.'
예제 #6
0
def test_rectification_shape():
    # Load the datacube and the row-stacked spectra
    cube = MaNGADataCube.from_plateifu(7815, 3702, directory_path=remote_data_file())
    cube.load_rss()

    # Get the recitification parameters
    pixelscale, rlim, sigma, recenter, width_buffer \
            = MaNGARSS._parse_rectification_parameters(None, None, None, None, None)
    # Get the cube dimensions
    cube.rss._cube_dimensions(pixelscale=pixelscale, recenter=recenter, width_buffer=width_buffer)
    # Make sure they match what the DRP produced
    assert cube.spatial_shape == (cube.rss.nx, cube.rss.ny), 'Mismatched cube spatial dimensions'
예제 #7
0
def test_read_correl():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       covar_ext='GCORREL')
    assert isinstance(cube.covar, Covariance), 'Incorrect type for covariance.'
    assert cube.covar.shape == (cube.nspec,
                                cube.nspec), 'Covariance has incorrect shape.'
    assert cube.covar.is_correlation, 'Covariance object should be in a correlation mode.'

    # Check that the variances are all unity (or close to it when it's defined)
    unique_var = numpy.unique(cube.covar.var)
    assert numpy.allclose(unique_var[unique_var > 0],
                          1.), 'Bad variance values'
예제 #8
0
def test_match_resolution():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    tpl = TemplateLibrary('MILESHC',
                          cube=cube,
                          match_resolution=True,
                          velscale_ratio=4,
                          hardcopy=False)

    # Resolution should be virtually identical in unmasked regions
    indx = tpl['MASK'].data == 0
    assert numpy.std(tpl.sres(tpl['WAVE'].data[indx[0]]) - tpl['SPECRES'].data[0,indx[0]]) < 0.1, \
                'Spectral resolution difference is above tolerance.'
예제 #9
0
def test_wcs():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    x, y = cube.mean_sky_coordinates(offset=None)
    assert x[0, 0] > x[-1, 0], 'RA should increase from large to small indices'
    assert y[0, 0] < y[0,
                       -1], 'DEC should increase from small to small indices'
    assert numpy.unravel_index(numpy.argmin( numpy.square(x - cube.prihdr['OBJRA'])
                                            + numpy.square(y - cube.prihdr['OBJDEC'])), x.shape) \
                == (21,21), 'Object should be at cube center.'
    x, y = cube.mean_sky_coordinates(center_coo=(x[0, 0], y[0, 0]))
    assert numpy.isclose(x[0, 0], 0.0) and numpy.isclose(
        y[0, 0], 0.0), 'Offset incorrect'
    x, y = cube.mean_sky_coordinates()
    assert abs(x[21, 21]) < 1e-2 and abs(y[21, 21]) < 1e-2, 'Offset incorrect'
예제 #10
0
def test_rectification_recovery():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file(),
                                       covar_ext='GCORREL')
    cube.load_rss()

    hdu = fits.open(cube.file_path())
    channel = hdu['GCORREL'].header['BBINDEX']

    gcorrel = numpy.zeros(eval(hdu['GCORREL'].header['COVSHAPE']), dtype=float)
    i = numpy.ravel_multi_index(
        (hdu['GCORREL'].data['INDXI_C1'], hdu['GCORREL'].data['INDXI_C2']),
        cube.spatial_shape)

    j = numpy.ravel_multi_index(
        (hdu['GCORREL'].data['INDXJ_C1'], hdu['GCORREL'].data['INDXJ_C2']),
        cube.spatial_shape)
    gcorrel[i, j] = hdu['GCORREL'].data['RHOIJ']
    gcorrel[j, i] = hdu['GCORREL'].data['RHOIJ']

    assert numpy.allclose(cube.covar.toarray(), gcorrel), 'Bad covariance read'

    flux, C = cube.rss.rectify_wavelength_plane(channel, return_covar=True)
    assert numpy.allclose(cube.flux[..., channel],
                          flux), 'Bad flux rectification'

    ivar = numpy.ma.power(C.variance().reshape(cube.spatial_shape),
                          -1).filled(0.0)
    assert numpy.allclose(cube.ivar[..., channel],
                          ivar), 'Bad inverse variance rectification'

    C.to_correlation()
    assert numpy.allclose(C.toarray(), gcorrel), 'Bad covariance calculation'

    sres = numpy.ma.divide(cube.rss.wave[channel],
                           cube.rss.instrumental_dispersion_plane(channel).ravel()) \
                / DAPConstants.sig2fwhm

    # WARNING: The computations done by the DRP and DAP are different
    # in detail, but (at least for this test cube) the results are
    # virtually identical except for notable outliers.
    assert numpy.ma.median(cube.sres[...,channel].ravel() - sres) < 0.1, \
            'Bad spectral resolution rectification'
예제 #11
0
def test_read():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())

    assert cube.log, 'Should read the log-binned version by default.'
    assert cube.wcs is not None, 'WCS should be defined.'
    assert cube.shape[:
                      2] == cube.spatial_shape, 'Spatial shape should be first two axes.'
    assert cube.nspec == numpy.prod(
        cube.spatial_shape), 'Definition of number of spectra changed.'
    assert cube.sres is not None, 'Spectral resolution data was not constructed.'
    assert cube.sres_ext == 'LSFPRE', 'Should default to LSFPRE extension.'
    assert abs(cube.pixelscale -
               cube._get_pixelscale()) < 1e-6, 'Bad match in pixel scale.'
    # NOTE: This is worse than it should be because of how the WCS in MaNGA is defined.
    assert numpy.all(numpy.absolute(cube.wave - cube._get_wavelength_vector()) < 2e-4), \
            'Bad calculation of wavelength vector.'
    assert cube.covar is None, 'Covariance should not have been read'
예제 #12
0
def test_covariance():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())

    with pytest.raises(ValueError):
        # Have to load the RSS first
        cube.covariance_matrix(1000)

    # Load the RSS
    cube.load_rss()

    # Construct a covariance matrix
    C = cube.covariance_matrix(1000)
    assert C.shape == (1764, 1764), 'Bad covariance shape'

    # Make it a correlation matrix and check it
    C.to_correlation()

    # Check that the variances are all unity (or close to it when it's defined)
    unique_var = numpy.unique(numpy.diag(C.toarray()))
    assert numpy.allclose(unique_var[unique_var > 0],
                          1.), 'Bad correlation diagonal'

    # Try multiple channels
    C = cube.covariance_cube(channels=[1000, 2000])
    assert numpy.array_equal(C.input_indx, [1000, 2000]), 'Bad matrix indices'
    assert C.shape == (1764, 1764, 2), 'Bad covariance shape'

    # Try to convert multiple channels
    C.to_correlation()
    # And reverting it
    C.revert_correlation()

    # Try to generate an approximate correlation matrix, covariance
    # matrix, and covariance cube
    approxC = cube.approximate_correlation_matrix()
    approxC = cube.approximate_covariance_matrix(1000)
    approxC = cube.approximate_covariance_cube(channels=[1000, 2000])

    # Variance should be the same for direct and approximate calculations
    assert numpy.allclose(approxC.variance(),
                          C.variance()), 'Variances should be the same.'
예제 #13
0
def test_match_resolution():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    tpl = TemplateLibrary('MILESHC',
                          cube=cube,
                          match_resolution=True,
                          velscale_ratio=4,
                          hardcopy=False,
                          output_path=remote_data_file())

    # Resolution should be virtually identical in unmasked regions
    indx = tpl['MASK'].data == 0
    assert numpy.std(tpl.sres(tpl['WAVE'].data[indx[0]]) - tpl['SPECRES'].data[0,indx[0]]) < 0.1, \
                'Spectral resolution difference is above tolerance.'

    # Check the file that would have been written has the expected path
    assert cube.directory_path == tpl.directory_path, 'Cube and TPL paths should match.'
    assert tpl.file_name().startswith(
        cube.output_root), 'TPL file should start with the cube root'
예제 #14
0
def test_load_rss():
    cube = MaNGADataCube.from_plateifu(7815,
                                       3702,
                                       directory_path=remote_data_file())
    cube.load_rss()
예제 #15
0
def resample_test():

    # Read the example datacube and get the expected redshift You can download
    # these data using
    # https://github.com/sdss/mangadap/blob/master/download_test_data.py
    plt = 7815
    ifu = 3702
    drpver = 'v3_1_1'
    directory_path = defaults.dap_source_dir() / 'data' / 'remote'
    cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
    drpall_file = directory_path / f'drpall-{drpver}.fits'
    z = get_redshift(plt, ifu, drpall_file)

    # Pull out two example spectra from the datacube
    old_wave = cube.wave
    old_flux = numpy.ma.MaskedArray(cube.flux[10, 10:12, :],
                                    mask=cube.mask[10, 10:12, :] > 0)
    old_flux[:, (old_wave > 5570) & (old_wave < 5586)] = numpy.ma.masked
    old_ferr = numpy.ma.power(cube.ivar[10, 10:12, :], -0.5)

    if spectres is not None:
        # Use spectres to resample the spectrum, ignoring last pixel
        indx = (old_wave > old_wave[0] / (1 + z)) & (old_wave < old_wave[-2] /
                                                     (1 + z))
        t = time.perf_counter()
        new_flux_spectres = numpy.empty((old_flux.shape[0], numpy.sum(indx)),
                                        dtype=float)
        new_ferr_spectres = numpy.empty((old_flux.shape[0], numpy.sum(indx)),
                                        dtype=float)
        for i in range(old_flux.shape[0]):
            new_flux_spectres[i,:], new_ferr_spectres[i,:] \
                    = spectres.spectres(old_wave[indx], old_wave/(1+z), old_flux[i,:].filled(0.0),
                                        spec_errs=old_ferr[i,:].filled(0.0))
        print('SpectRes Time: ', time.perf_counter() - t)

    # Use a brute-force integration of the spectra to resample the spectrum
    t = time.perf_counter()
    borders = grid_borders(numpy.array([old_wave[0], old_wave[-1]]),
                           old_wave.size,
                           log=True)[0]
    _p = numpy.repeat(borders, 2)[1:-1].reshape(-1, 2)
    new_flux_brute = numpy.array([
        passband_integral(old_wave / (1 + z), f, passband=_p, log=True)
        for f in old_flux.filled(0.0)
    ])
    new_flux_brute /= (_p[:, 1] - _p[:, 0])[None, :]
    print('Brute Force Time: ', time.perf_counter() - t)

    # Use the Resample class to resample the spectrum
    t = time.perf_counter()
    r = Resample(old_flux,
                 e=old_ferr,
                 x=old_wave / (1 + z),
                 newRange=[old_wave[0], old_wave[-1]],
                 inLog=True,
                 newLog=True)
    print('Resample Time: ', time.perf_counter() - t)

    # Estimate the differences between the resampling methods (these should all
    # be the same to nearly numerical accuracy)
    print('Mean diff:')
    if spectres is not None:
        print('    spectres - brute    = {0:.5e}'.format(
            numpy.mean(
                numpy.absolute(new_flux_spectres - new_flux_brute[:, indx]))))
        print('    spectres - resample = {0:.5e}'.format(
            numpy.mean(numpy.absolute(new_flux_spectres - r.outy[:, indx]))))
    print('    brute - resample    = {0:.5e}'.format(
        numpy.mean(numpy.absolute(new_flux_brute - r.outy))))

    # Plot the original and resampled versions for all spectra.  The resampled
    # versions should all be indistinguishable.
    for i in range(old_flux.shape[0]):
        pyplot.plot(old_wave / (1 + z), old_flux[i, :], label='Data')
        if spectres is not None:
            pyplot.plot(old_wave[indx],
                        new_flux_spectres[i, :],
                        label='spectres')
        pyplot.plot(old_wave, new_flux_brute[i, :], label='brute')
        pyplot.plot(r.outx, r.outy[i, :], label='Resample')
        pyplot.plot(r.outx, r.outf[i, :], label='Good-pixel Mask')
        pyplot.legend()
        pyplot.xlabel('Wavelength')
        pyplot.ylabel('Flux')
        pyplot.show()