def test_view_dict(imviz_helper): mv = MetadataViewer(app=imviz_helper.app) ndd_1 = NDData(np.zeros((2, 2)), meta={ 'EXTNAME': 'SCI', 'EXTVER': 1, 'BAR': 10.0, 'FOO': '', 'COMMENT': 'a test', 'BOZO': None}) ndd_2 = NDData(np.ones((2, 2)), meta={ 'EXTNAME': 'ASDF', 'REF': {'bar': 10.0, 'foo': {'1': '', '2': [1, 2]}}}) arr = np.zeros((2, 2)) imviz_helper.load_data(ndd_1, data_label='has_simple_meta') imviz_helper.load_data(ndd_2, data_label='has_nested_meta') imviz_helper.load_data(arr, data_label='no_meta') assert mv.dc_items == ['has_simple_meta[DATA]', 'has_nested_meta[DATA]', 'no_meta'] mv.selected_data = 'has_simple_meta[DATA]' assert mv.has_metadata assert mv.metadata == [ ('BAR', '10.0'), ('BOZO', 'None'), ('EXTNAME', 'SCI'), ('EXTVER', '1'), ('FOO', '')], mv.metadata mv.selected_data = 'has_nested_meta[DATA]' assert mv.has_metadata assert mv.metadata == [ ('EXTNAME', 'ASDF'), ('REF.bar', '10.0'), ('REF.foo.1', ''), ('REF.foo.2.0', '1'), ('REF.foo.2.1', '2')], mv.metadata mv.selected_data = 'no_meta' assert not mv.has_metadata assert mv.metadata == []
def test_append_table_to_extensions(tmp_path): testfile = tmp_path / 'test.fits' ad = astrodata.create({}) ad.append(NDData(np.zeros((4, 5)))) ad.append(NDData(np.zeros((4, 5)))) ad.append(NDData(np.zeros((4, 5)), meta={'header': {'FOO': 'BAR'}})) ad[0].TABLE1 = Table([[1]]) ad[0].TABLE2 = Table([[22]]) ad[1].TABLE2 = Table([[2]]) # extensions can have the same table name ad[2].TABLE3 = Table([[3]]) ad.write(testfile) ad = astrodata.open(testfile) # Check that slices do not report extension tables assert ad.exposed == set() assert ad[0].exposed == {'TABLE1', 'TABLE2'} assert ad[1].exposed == {'TABLE2'} assert ad[2].exposed == {'TABLE3'} assert ad[1:].exposed == set() assert ad[2].hdr['FOO'] == 'BAR' match = ("Cannot append table 'TABLE1' because it would hide an " "extension table") with pytest.raises(ValueError, match=match): ad.TABLE1 = Table([[1]])
def __init__( self, flux, dispersion=None, dispersion_unit=None, uncertainty=None, mask=None, wcs=None, meta=None, unit=None, flags=None, ): # needed to change order from (dispersion, flux) -> (flux, dispersion) # as dispersion=None for wcs. # added some WCS classes as I was not sure how to deal with both wcs and NDData.__init__(self, data=flux, uncertainty=uncertainty, mask=mask, wcs=wcs, meta=meta, unit=unit, flags=flags) if wcs == None: self.dispersion = dispersion self.dispersion_unit = dispersion_unit else: self.wcs = wcs self.dispersion = wcs.get_lookup_table() self.dispersion_unit = wcs.units[0]
def alinear(x1,y1,x2,y2,dim_1_x,dim_1_y,dim_2_x,dim_2_y, actual): # 1: Referencia 2: Pos. de la imagen actual """ desplaza el objeto de estudio a un punto de referencia comun args: - x1,y1,x2,y2: coordenadas antes-despues - dim_1_x,dim_1_y,dim_2_x,dim_2_y: dimensiones de la matriz, antes y antes-despues - actual: matriz de la imagen a alinear return: - matriz desplazada """ actual = NDData(actual) diferencia_x = x1 - x2 diferencia_y = y1 - y2 matriz_final = np.zeros((dim_1_x,dim_1_y)) matriz_final = NDData(matriz_final) for i in range(dim_2_x): for j in range(dim_2_y): x = i + diferencia_x y = j + diferencia_y if ( x > 0 and x < dim_1_x and y > 0 and y < dim_1_y): matriz_final.data[x][y] = actual.data[i][j] return matriz_final
def to_model(data, meta): """ Create a photutils GriddedPSFModel object from input data and meta information Parameters ---------- data : ndarray 3D numpy array of PSFs at different points across the detector meta : dict Dictionary containing meta data Returns ------- model : GriddedPSFModel Photutils object with 3D data array and metadata with specified grid_xypos and oversampling keys """ try: from photutils import GriddedPSFModel except ImportError: raise ImportError("This method requires photutils >= 0.6") ndd = NDData(data, meta=meta, copy=True) ndd.meta['grid_xypos'] = [((float(ndd.meta[key][0].split(',')[1].split(')')[0])), (float(ndd.meta[key][0].split(',')[0].split('(')[1]))) for key in ndd.meta.keys() if "DET_YX" in key] ndd.meta['oversampling'] = meta["OVERSAMP"][0] # just pull the value ndd.meta = {key.lower(): ndd.meta[key] for key in ndd.meta} model = GriddedPSFModel(ndd) return model
def test_register_nddata(self): from astropy.nddata import NDData from skimage.transform import SimilarityTransform transf = SimilarityTransform(rotation=np.pi / 2.0, translation=(1, 0)) nd = NDData([[0.0, 1.0], [2.0, 3.0]], mask=[[True, False], [False, False]]) registered_img, footp = aa.apply_transform(transf, nd, nd, propagate_mask=True) err = np.linalg.norm(registered_img - np.array([[2.0, 0.0], [3.0, 1.0]])) self.assertLess(err, 1e-6) err_mask = footp == np.array([[False, True], [False, False]]) self.assertTrue(all(err_mask.flatten())) # Test now if there is no assigned mask during creation nd = NDData([[0.0, 1.0], [2.0, 3.0]]) registered_img, footp = aa.apply_transform(transf, nd, nd, propagate_mask=True) err = np.linalg.norm(registered_img - np.array([[2.0, 0.0], [3.0, 1.0]])) self.assertLess(err, 1e-6) err_mask = footp == np.array([[False, False], [False, False]]) self.assertTrue(all(err_mask.flatten()))
def test_masked_nddata(convfunc): arr = np.zeros((11, 11)) arr[4, 5] = arr[6, 5] = arr[5, 4] = arr[5, 6] = 0.2 arr[5, 5] = 1.5 ndd_base = NDData(arr) mask = arr < 0 # this is all False mask[5, 5] = True ndd_mask = NDData(arr, mask=mask) arrnan = arr.copy() arrnan[5, 5] = np.nan ndd_nan = NDData(arrnan) test_kernel = Gaussian2DKernel(1) result_base = convfunc(ndd_base, test_kernel) result_nan = convfunc(ndd_nan, test_kernel) result_mask = convfunc(ndd_mask, test_kernel) assert np.allclose(result_nan, result_mask) assert not np.allclose(result_base, result_mask) assert not np.allclose(result_base, result_nan) # check to make sure the mask run doesn't talk back to the initial array assert np.sum(np.isnan(ndd_base.data)) != np.sum(np.isnan(ndd_nan.data))
def test_register_nddata(self): nd_image = NDData(self.image, mask=self.image_mask) nd_image_ref = NDData(self.image_ref, mask=self.image_ref_mask) registered_img, footp = aa.register(source=nd_image, target=nd_image_ref) self.assertIsInstance(registered_img, np.ndarray) self.assertIsInstance(footp, np.ndarray) self.assertIs(footp.dtype, np.dtype("bool")) fraction = self.compare_image(registered_img) self.assertGreater(fraction, 0.85)
def test_append_nddata_to_root_no_name(testfile2): ad = astrodata.open(testfile2) lbefore = len(ad) ones = np.ones((10, 10)) hdu = fits.ImageHDU(ones) nd = NDData(hdu.data) nd.meta['header'] = hdu.header ad.append(nd) assert len(ad) == (lbefore + 1)
def test_append_nddata_to_root_with_arbitrary_name(testfile2): ad = astrodata.open(testfile2) assert len(ad) == 6 ones = np.ones((10, 10)) hdu = fits.ImageHDU(ones) nd = NDData(hdu.data) nd.meta['header'] = hdu.header hdu.header['EXTNAME'] = 'ARBITRARY' with pytest.raises(ValueError): ad.append(nd)
def test_append_nddata_to_root_no_name(test_path): test_filename = 'GMOS/N20160524S0119.fits' ad = astrodata.open(os.path.join(test_path, test_filename)) lbefore = len(ad) ones = np.ones((100, 100)) hdu = ImageHDU(ones) nd = NDData(hdu.data) nd.meta['header'] = hdu.header ad.append(nd) assert len(ad) == (lbefore + 1)
def test_append_tables2(): """Check that slices do not report extension tables.""" ad = astrodata.create({}) ad.append(NDData(np.zeros((4, 5)), meta={'header': {}})) ad.append(NDData(np.zeros((4, 5)), meta={'header': {}})) ad.append(NDData(np.zeros((4, 5)), meta={'header': {}})) ad.append(Table([[1]]), name='TABLE1', add_to=ad[0].nddata) ad.append(Table([[1]]), name='TABLE2', add_to=ad[1].nddata) ad.append(Table([[1]]), name='TABLE3', add_to=ad[2].nddata) assert ad.exposed == set() assert ad[1].exposed == {'TABLE2'} assert ad[1:].exposed == set()
def test_append_nddata_to_root_with_arbitrary_name(test_path): test_filename = 'GMOS/N20160524S0119.fits' ad = astrodata.open(os.path.join(test_path, test_filename)) lbefore = len(ad) ones = np.ones((100, 100)) hdu = ImageHDU(ones) nd = NDData(hdu.data) nd.meta['header'] = hdu.header hdu.header['EXTNAME'] = 'ARBITRARY' with pytest.raises(ValueError) as excinfo: ad.append(nd)
def test_write_and_read(tmpdir, capsys): ad = astrodata.create({}) nd = NDData(data=[[1, 2], [3, 4]], uncertainty=VarianceUncertainty(np.ones((2, 2))), mask=np.identity(2), meta={'header': fits.Header()}) ad.append(nd) tbl = Table([np.zeros(10), np.ones(10)], names=('a', 'b')) with pytest.raises(ValueError, match='Tables should be set directly as attribute'): ad.append(tbl, name='BOB') ad.BOB = tbl tbl = Table([np.zeros(5) + 2, np.zeros(5) + 3], names=('c', 'd')) match = "Cannot append table 'BOB' because it would hide a top-level table" with pytest.raises(ValueError, match=match): ad[0].BOB = tbl ad[0].BOB2 = tbl ad[0].MYVAL_WITH_A_VERY_LONG_NAME = np.arange(10) match = "You can only append NDData derived instances at the top level" with pytest.raises(TypeError, match=match): ad[0].MYNDD = NDData(data=np.ones(10), meta={'header': fits.Header()}) testfile = str(tmpdir.join('testfile.fits')) ad.write(testfile) ad = astrodata.open(testfile) ad.info() captured = capsys.readouterr() assert captured.out.splitlines()[3:] == [ 'Pixels Extensions', 'Index Content Type Dimensions Format', '[ 0] science NDAstroData (2, 2) int64', ' .variance ADVarianceUncerta (2, 2) float64', ' .mask ndarray (2, 2) uint16', ' .BOB2 Table (5, 2) n/a', ' .MYVAL_WITH_A_VERY_LO ndarray (10,) int64', '', 'Other Extensions', ' Type Dimensions', '.BOB Table (10, 2)' ] assert_array_equal(ad[0].nddata.data[0], nd.data[0]) assert_array_equal(ad[0].nddata.variance[0], nd.uncertainty.array[0]) assert_array_equal(ad[0].nddata.mask[0], nd.mask[0])
def basic_fits_to_nddata(filename, exten=0): """ Read a single FITS extension into a `~astropy.nddata.NDData` object. This is an *extremely* simple reader that reads data from only a single FITS extension. Note the the primary FITS header will always be included in the `~astropy.nddata.NDData` meta `dict`, regardless of the value of ``exten``. Parameters ---------- filename : str The path to a FITS file. exten : int, optional The FITS extension number for array to place in the NDData object. The default is 0. Returns ------- nddata : `~astropy.nddata.NDData` An `~astropy.nddata.NDData` object with a ``data`` attribute containing the FITS data array and a ``meta`` attribute, containing the FITS header as a python `dict`. """ with fits.open(filename) as hdulist: header = hdulist[0].header header += hdulist[exten].header data = hdulist[exten].data return NDData(data, meta=header)
def run(self, cube): """ Run the indexing algorithm on a given data cube. Parameters ---------- data : (M,N,Z) numpy.ndarray or astropy.nddata.NDData or astropy.nddata.NDDataRef Astronomical data cube. Returns ------- List of ROI with the cube slice, segmented images for each resolution and ROI table. """ if type(cube) is NDData or type(cube) is NDDataRef: if cube.wcs: wcs = cube.wcs else: wcs = None data = cube.data else: data = cube wcs = None c = [] ROI = namedtuple('RegionsOfInterest', ['cube_slice', 'segmented_images', 'table']) params = {"P": self.config["P"], "PRECISION": self.config["PRECISION"]} gms = GMS(params) spectra, slices = acalib.core.spectra_sketch( data, self.config["SAMPLES"], self.config["RANDOM_STATE"]) pp_slices = [] for slice in slices: pp_slice = acalib.core.vel_stacking(cube, slice) labeled_images = gms.run(pp_slice) if wcs is not None: freq_min = float(wcs.all_pix2world(0, 0, slice.start, 1)[2]) freq_max = float(wcs.all_pix2world(0, 0, slice.stop, 1)[2]) else: freq_min = None freq_max = None table = acalib.core.measure_shape(pp_slice, labeled_images, freq_min, freq_max) if len(table) > 0: c.append( ROI(cube_slice=pp_slice, segmented_images=labeled_images, table=table)) if wcs: wcs = wcs.dropaxis(2) for i, roi in enumerate(c): for j, im in enumerate(roi.segmented_images): c[i].segmented_images[j] = NDData(data=im, wcs=wcs) return c
def test_nddata_stats_class(): nddata = NDData(np.arange(10)) stats = NDDataStats(nddata) assert_allclose(stats.mean, 4.5) assert_allclose(stats.median, 4.5) assert_allclose(stats.std, 2.8722813232690143) assert_allclose(stats.mad_std, 3.7065055462640051)
def make_stars_guess( image: np.ndarray, star_finder: photutils.StarFinderBase, cutout_size: int = config.cutout_size) -> photutils.psf.EPSFStars: """ Given an image, extract stars as EPSFStars for psf fitting :param image: yes :param cutout_size: how big should the regions around each star used for fitting be? :param star_finder: which starfinder to use? :return: instance of exctracted EPSFStars """ # The idea here is to run a "greedy" starfinder that finds a lot more candidates than we need and then # to filter out the bright and isolated stars peaks_tbl = star_finder(image) peaks_tbl.rename_columns(['xcentroid', 'ycentroid'], ['x', 'y']) peaks_tbl = cut_edges(peaks_tbl, cutout_size, image.shape[0]) # TODO this gets medianed away with the image combine approach, so more star more good? # stars_tbl = cut_close_stars(peaks_tbl, cutoff_dist=3) stars_tbl = peaks_tbl image_no_background = image - np.median(image) stars = extract_stars(NDData(image_no_background), stars_tbl, size=cutout_size) return stars
def test_nddata_input(): data = np.arange(400).reshape((20, 20)) error = np.sqrt(data) mask = np.zeros((20, 20), dtype=bool) mask[8:13, 8:13] = True unit = 'adu' wcs = make_wcs(data.shape) try: skycoord = wcs.pixel_to_world(10, 10) except AttributeError: # for Astropy < 3.1 skycoord = pixel_to_skycoord(10, 10, wcs) aper = SkyCircularAperture(skycoord, r=0.7*u.arcsec) tbl1 = aperture_photometry(data*u.adu, aper, error=error*u.adu, mask=mask, wcs=wcs) uncertainty = StdDevUncertainty(error) nddata = NDData(data, uncertainty=uncertainty, mask=mask, wcs=wcs, unit=unit) tbl2 = aperture_photometry(nddata, aper) for column in tbl1.columns: if column == 'sky_center': # cannot test SkyCoord equality continue assert_allclose(tbl1[column], tbl2[column])
def interp_focus(self, focus): if not 0 <= focus <= self.nfoc - 1: raise ValueError('Focus level {} not in range \ [0, {}]'.format(focus, self.nfoc)) if focus != int(focus): left = np.floor(focus) right = np.ceil(focus) delta = right - focus weights = np.array([delta, 1 - delta]) # Right bound exclusive interp_data = np.sum(self.data[int(left):int(right) + 1] * weights[:, None, None, None], axis=0) foc_model = GriddedPSFModel( NDData(data=interp_data, meta=self.meta)) # self.interp_model._data = interp_data else: # self.interp_model._data = self.data[int(focus)] foc_model = self.models[int(focus)] self.interp_model = foc_model self._focus_level = focus return self.interp_model
def basic_fits_to_nddata(filename, exten=0): """ Read a single FITS extension into a `~astropy.nddata.NDData` object. This is an *extremely* simple reader that reads data from only a single FITS extension. Parameters ---------- filename : str The path to a FITS file. exten : int, optional The FITS extension number for the ``data`` array. Default is 0. Returns ------- nddata : `~astropy.nddata.NDData` An `~astropy.nddata.NDData` object with a ``data`` attribute containing the FITS data array and a ``meta`` attribute, containing the FITS header as a python `dict`. """ hdulist = fits.open(filename) header = hdulist[exten].header data = hdulist[exten].data return NDData(data, meta=header)
def setup_class(self): data = np.ones((100, 40, 40), dtype=np.float) self.data = NDData(data, unit=u.adu) self.radius = 3 self.position = [(20, 20), (30, 30)] self.true_flux = np.pi * self.radius * self.radius self.fluxunit = u.adu
def cutouts(image, stars, size=15): """Custom version to extract stars cutouts Parameters ---------- Parameters ---------- image: np.ndarray or path stars: np.ndarray stars positions with shape (n,2) size: int size of the cuts around stars (in pixels), by default 15 Returns ------- np.ndarray of shape (size, size) """ if isinstance(image, str): image = fits.getdata(image) stars_in = np.logical_and( np.all(stars < np.array(image.shape) - size - 2, axis=1), np.all(stars > np.ones(2) * size + 2, axis=1)) stars = stars[stars_in] # with warnings.catch_warnings(): warnings.simplefilter("ignore") stars_tbl = Table([stars[:, 0], stars[:, 1]], names=["x", "y"]) stars = extract_stars(NDData(data=image), stars_tbl, size=size) return np.argwhere(stars_in).flatten(), stars
def test_build_ad_multiple_extensions(tmp_path): """Build an AD object with multiple extensions and check that we retrieve everything in the correct order after writing. """ shape = (4, 5) testfile = tmp_path / 'test.fits' ad = astrodata.create({}) for i in range(1, 4): nd = NDData(np.zeros(shape) + i, uncertainty=VarianceUncertainty(np.ones(shape)), mask=np.zeros(shape, dtype='uint16')) ad.append(nd) ad[-1].OBJCAT = Table([[i]]) ad[-1].MYARR = np.zeros(10) + i ad.REFCAT = Table([['ref']]) ad.write(testfile) ad2 = astrodata.open(testfile) for ext, ext2 in zip(ad, ad2): assert_array_equal(ext.data, ext2.data) assert_array_equal(ext.MYARR, ext2.MYARR) assert_array_equal(ext.OBJCAT['col0'], ext2.OBJCAT['col0'])
def test_epsf_build_with_noise(): oversampling = 4 size = 25 sigma = 0.5 # should be "truth" ePSF m = IntegratedGaussianPRF(sigma=sigma, x_0=12.5, y_0=12.5, flux=1) yy, xx = np.mgrid[0:size * oversampling + 1, 0:size * oversampling + 1] xx = xx / oversampling yy = yy / oversampling truth_epsf = m(xx, yy) Nstars = 16 # one star per oversampling=4 point, roughly xdim = np.ceil(np.sqrt(Nstars)).astype(int) ydim = np.ceil(Nstars / xdim).astype(int) xarray = np.arange((size - 1) / 2 + 2, (size - 1) / 2 + 2 + xdim * size, size) yarray = np.arange((size - 1) / 2 + 2, (size - 1) / 2 + 2 + ydim * size, size) xarray, yarray = np.meshgrid(xarray, yarray) xarray, yarray = xarray.ravel(), yarray.ravel() np.random.seed(seed=76312) xpos = np.random.uniform(-0.5, 0.5, Nstars) ypos = np.random.uniform(-0.5, 0.5, Nstars) amps = np.random.uniform(50, 1000, Nstars) sources = Table() sources['amplitude'] = amps sources['x_0'] = xarray[:Nstars] + xpos sources['y_0'] = yarray[:Nstars] + ypos sources['sigma'] = [sigma] * Nstars stars_tbl = Table() stars_tbl['x'] = sources['x_0'] stars_tbl['y'] = sources['y_0'] data = make_gaussian_prf_sources_image((size * ydim + 4, size * xdim + 4), sources) data += 20 # counts/s data *= 100 # seconds data = apply_poisson_noise(data).astype(float) data /= 100 data -= 20 nddata = NDData(data=data) stars = extract_stars(nddata, stars_tbl, size=size) for star in stars: star.cutout_center = centroid_com(star.data) epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=5, progress_bar=False, norm_radius=7.5, recentering_func=centroid_com, shift_val=0.5) epsf, fitted_stars = epsf_builder(stars) assert_allclose(epsf.data, truth_epsf, rtol=1e-1, atol=5e-2)
def epsf_from_model(input_model, n_images, stars_per_image, fitshape, oversampling=1, σ=0, λ=None, smoothing='quartic', epsf_iters=5, seed=0): size = 128 * int(np.ceil(np.sqrt(stars_per_image))) border = 32 rng = np.random.default_rng(seed) stars = [] for i in range(n_images): img, xy_list = gen_image(input_model, stars_per_image, size, border, 'random', σ, λ, rng) stars += list( extract_stars(NDData(img), Table(xy_list, names=['x', 'y']), size=np.array(fitshape))) stars = EPSFStars(stars) builder = EPSFBuilder(oversampling=oversampling, smoothing_kernel=smoothing, maxiters=epsf_iters) epsf, fitted_stars = builder(stars) return epsf, reference_image(input_model, fitshape, oversampling)
def __init__(self, nddata, position, shape): if isinstance(position, SkyCoord): if nddata.wcs is None: raise ValueError('nddata must contain WCS if the input ' 'position is a SkyCoord') x, y = skycoord_to_pixel(position, nddata.wcs, mode='all') position = (y, x) data = np.asanyarray(nddata.data) print(data.shape, shape, position) slices_large, slices_small = overlap_slices(data.shape, shape, position) self.slices_large = slices_large self.slices_small = slices_small data = nddata.data[slices_large] mask = None uncertainty = None if nddata.mask is not None: mask = nddata.mask[slices_large] if nddata.uncertainty is not None: uncertainty = nddata.uncertainty[slices_large] self.nddata = NDData(data, mask=mask, uncertainty=uncertainty)
def extract(self): epsf_builder = EPSFBuilder(oversampling=4, maxiters=3, progress_bar=True) for file in self.params.inFiles: logging.info("Extracting PSFs from file {}".format(file)) psf_file = PSFfile(file, self.params.tmpDir, frame_shape=(self.box_size, self.box_size)) frame_number = fits.getheader(file)['NAXIS3'] for frame_index in range(frame_number): print("\rExtracting PSF from frame {}/{}".format( frame_index + 1, frame_number), end='') with fits.open(file) as hdulist: frame = hdulist[0].data[frame_index] stars = extract_stars(NDData(data=frame), self.star_table, size=self.box_size) # Compute instantaneous PSF # epsf, fitted_stars = epsf_builder(stars) epsf = np.zeros(stars[0].data.shape) for star in stars: epsf += star.data psf_file.update_frame(frame_index, epsf) print('\r')
def eje_mayor(borde): """ encuentra los puntos separados a una mayor distancia en el borde de la elipse (eje mayor) Args: - borde: puntos correspondientes al borde de la elipse return: - info_elipse = [Dimension,x1,y1,x2,y2] == arreglo con el valor numerico del eje mayor y los puntos extremos de este eje cambios: - astropy.units - dato utilizado para representar "borde" """ borde = NDData(borde) info_elipse = np.zeros((4,)) info_elipse = NDData(info_elipse) info_elipse.meta['distance'] = 0 for i in range (0, len(borde.data)-2): if ( i % 2 != 0): continue for j in range (i+2, len(borde.data)-1): if ( j % 2 != 0): continue dis = distancia(borde.data[i],borde.data[i+1],borde.data[j],borde.data[j+1]) if dis > info_elipse.meta['distance']: info_elipse.meta['distance'] = dis # Distancia del eje mayor info_elipse.data[0] = borde.data[i] # (X1, info_elipse.data[1] = borde.data[i+1] # Y1) Eje mayor info_elipse.data[2] = borde.data[j] # (X2, info_elipse.data[3] = borde.data[j+1] # Y2) Eje mayor return info_elipse
def test_combine_nddata(dtype): data = [NDData(data=np.array([val], dtype=dtype)) for val in TEST_VALUES] out = combine_arrays(data, method='mean', clipping_method='sigclip') assert out.data.dtype == np.float64 assert out.mask is None assert out.uncertainty is None assert np.isclose(out.data[0], 2.2) assert out.meta['REJMAP'][0] == 1
def test_parse_nddata_simple(self, imviz_helper): with pytest.raises(ValueError, match='Imviz cannot load this NDData with ndim=1'): parse_data(imviz_helper.app, NDData([1, 2, 3, 4]), show_in_viewer=False) ndd = NDData([[1, 2], [3, 4]]) parse_data(imviz_helper.app, ndd, data_label='some_data', show_in_viewer=False) data = imviz_helper.app.data_collection[0] comp = data.get_component('DATA') assert data.label == 'some_data[DATA]' assert data.shape == (2, 2) assert comp.data.shape == (2, 2) assert len(imviz_helper.app.data_collection) == 1
def test_append_table_and_write(tmp_path): testfile = tmp_path / 'test.fits' ad = astrodata.create({}) ad.append(NDData(np.zeros((4, 5)))) ad[0].TABLE1 = Table([[1]]) ad.write(testfile) ad.write(testfile, overwrite=True) ad = astrodata.open(testfile) assert ad[0].exposed == {'TABLE1'}
def test_append_tables(): """If both ad and ad[0] have a TABLE1, check that ad[0].TABLE1 return the extension table. """ nd = NDData(np.zeros((4, 5)), meta={'header': {}}) ad = astrodata.create({}) ad.append(nd) ad.append(Table([[1]])) ad.append(Table([[2]]), add_to=ad[0].nddata) assert ad[0].TABLE2['col0'][0] == 2
def __init__(self, flux, dispersion=None, dispersion_unit=None, error=None, mask=None, wcs=None, meta=None, units=None, copy=True, validate=True): #needed to change order from (dispersion, flux) -> (flux, dispersion) #as dispersion=None for wcs. #added some WCS classes as I was not sure how to deal with both wcs and NDData.__init__(self, data=flux, error=error, mask=mask, wcs=wcs, meta=meta, units=units, copy=copy, validate=validate) if wcs==None: self.dispersion = dispersion self.dispersion_unit = dispersion_unit else: self.wcs = wcs self.dispersion = wcs.get_lookup_table() self.dispersion_unit = wcs.units[0]
def nddata_cutout2d(nddata, position, size, mode='trim', fill_value=np.nan): """ Create a 2D cutout of a `~astropy.nddata.NDData` object. Specifically, cutouts will made for the ``nddata.data`` and ``nddata.mask`` (if present) arrays. If ``nddata.wcs`` exists, then it will also be updated. Note that cutouts will not be made for ``nddata.uncertainty`` (if present) because they are general objects and not arrays. Parameters ---------- nddata : `~astropy.nddata.NDData` The 2D `~astropy.nddata.NDData` from which the cutout is taken. position : tuple or `~astropy.coordinates.SkyCoord` The position of the cutout array's center with respect to the ``nddata.data`` array. The position can be specified either as a ``(x, y)`` tuple of pixel coordinates or a `~astropy.coordinates.SkyCoord`, in which case ``nddata.wcs`` must exist. size : int, array-like, `~astropy.units.Quantity` The size of the cutout array along each axis. If ``size`` is a scalar number or a scalar `~astropy.units.Quantity`, then a square cutout of ``size`` will be created. If ``size`` has two elements, they should be in ``(ny, nx)`` order. Scalar numbers in ``size`` are assumed to be in units of pixels. ``size`` can also be a `~astropy.units.Quantity` object or contain `~astropy.units.Quantity` objects. Such `~astropy.units.Quantity` objects must be in pixel or angular units. For all cases, ``size`` will be converted to an integer number of pixels, rounding the the nearest integer. See the ``mode`` keyword for additional details on the final cutout size. mode : {'trim', 'partial', 'strict'}, optional The mode used for creating the cutout data array. For the ``'partial'`` and ``'trim'`` modes, a partial overlap of the cutout array and the input ``nddata.data`` array is sufficient. For the ``'strict'`` mode, the cutout array has to be fully contained within the ``nddata.data`` array, otherwise an `~astropy.nddata.utils.PartialOverlapError` is raised. In all modes, non-overlapping arrays will raise a `~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode, positions in the cutout array that do not overlap with the ``nddata.data`` array will be filled with ``fill_value``. In ``'trim'`` mode only the overlapping elements are returned, thus the resulting cutout array may be smaller than the requested ``size``. fill_value : number, optional If ``mode='partial'``, the value to fill pixels in the cutout array that do not overlap with the input ``nddata.data``. ``fill_value`` must have the same ``dtype`` as the input ``nddata.data`` array. Returns ------- result : `~astropy.nddata.NDData` A `~astropy.nddata.NDData` object with cutouts for the data and mask, if input. Examples -------- >>> from astropy.nddata import NDData >>> import astropy.units as u >>> from astroimtools import nddata_cutout2d >>> data = np.random.random((500, 500)) >>> unit = u.electron / u.s >>> mask = (data > 0.7) >>> meta = {'exptime': 1234 * u.s} >>> nddata = NDData(data, mask=mask, unit=unit, meta=meta) >>> cutout = nddata_cutout2d(nddata, (100, 100), (10, 10)) >>> cutout.data.shape (10, 10) >>> cutout.mask.shape (10, 10) >>> cutout.unit Unit("electron / s") """ from astropy.nddata.utils import Cutout2D if not isinstance(nddata, NDData): raise ValueError('nddata input must be an NDData object') if isinstance(position, SkyCoord): if nddata.wcs is None: raise ValueError('nddata must contain WCS if the input ' 'position is a SkyCoord') position = skycoord_to_pixel(position, nddata.wcs, mode='all') data_cutout = Cutout2D(np.asanyarray(nddata.data), position, size, wcs=nddata.wcs, mode=mode, fill_value=fill_value) # need to create a new NDData instead of copying/replacing nddata_out = NDData(data_cutout.data, unit=nddata.unit, uncertainty=nddata.uncertainty, meta=nddata.meta) if nddata.wcs is not None: nddata_out.wcs = data_cutout.wcs if nddata.mask is not None: mask_cutout = Cutout2D(np.asanyarray(nddata.mask), position, size, mode=mode, fill_value=fill_value) nddata_out.mask = mask_cutout.data return nddata_out
def to_griddedpsfmodel(HDUlist_or_filename=None, ext=0): """ Create a photutils GriddedPSFModel object from either a FITS file or an HDUlist object. The input must have header keywords "DET_YX{}" and "OVERSAMP" (will be present if psf_grid() is used to create the file). Parameters ---------- HDUlist_or_filename : string Either a fits.HDUList object or a filename of a FITS file on disk ext : int Extension in that FITS file Returns ------- model : GriddedPSFModel Photutils object with 3D data array and metadata with specified grid_xypos and oversampling keys """ try: from photutils import GriddedPSFModel except ImportError: raise ImportError("This method requires photutils >= 0.6") if isinstance(HDUlist_or_filename, str): HDUlist = fits.open(HDUlist_or_filename) elif isinstance(HDUlist_or_filename, fits.HDUList): HDUlist = HDUlist_or_filename else: raise ValueError('Input must be a filename or HDUlist') data = HDUlist[ext].data header = HDUlist[ext].header # Check necessary keys are there if not any("DET_YX" in key for key in header.keys()): raise KeyError("You are missing 'DET_YX{}' keys: which are the detector locations of the PSFs") if 'OVERSAMP' not in header.keys(): raise KeyError("You are missing 'OVERSAMP' key: which is the oversampling factor of the PSFs") # Convert header to meta dict header = header.copy(strip=True) header.pop('COMMENT', None) header.pop('', None) header.pop('HISTORY', None) meta = OrderedDict((a, (b, c)) for (a, b, c) in header.cards) ndd = NDData(data, meta=meta, copy=True) # Edit meta dictionary for GriddedPSFLibrary specifics ndd.meta['grid_xypos'] = [((float(ndd.meta[key][0].split(',')[1].split(')')[0])), (float(ndd.meta[key][0].split(',')[0].split('(')[1]))) for key in ndd.meta.keys() if "DET_YX" in key] # from (y,x) to (x,y) if 'oversampling' not in ndd.meta: ndd.meta['oversampling'] = ndd.meta['OVERSAMP'][0] # pull the value # Turn all metadata keys into lowercase ndd.meta = {key.lower(): ndd.meta[key] for key in ndd.meta} # Create model model = GriddedPSFModel(ndd) return model
def rotar(matriz, NAXIS1, NAXIS2, angulo): """ rota el objeto de estudio (matriz), en un angulo determinado, por medio de una matriz de rotacion args: - matriz: imagen del objeto - NAXIS1: dimension 1 de la matriz - NAXIS2: dimension 2 de la matriz - angulo: angulo de rotacion return: - matriz rotada cambios: - rotacion 270: transpose() """ matriz = NDData(matriz) if (angulo > 360 or angulo < 1): print "<Error: Imagen no rotada, angulo no permitido>" return matriz # ------ PARA 0 NO ES NECESARIO ROTAR ------ # if (angulo == 0 or angulo ==360): return matriz # ------ PARA 90, 180 y 270 ES UNA SIMPLE TRASLACION DE PUNTOS ------ # if (angulo == 90): matriz_final = np.zeros((NAXIS2,NAXIS1)) matriz_final = NDData(matriz_final) for i in range(NAXIS1): for j in range(NAXIS2): matriz_final.data[NAXIS2 - j -1][i] = matriz.data[i][j] return matriz_final if (angulo == 180): matriz_final = np.zeros((NAXIS1,NAXIS2)) matriz_final = NDData(matriz_final) for i in range(NAXIS1): for j in range(NAXIS2): matriz_final.data[NAXIS1 - i - 1][NAXIS2 - j -1] = matriz.data[i][j] return matriz_final if (angulo == 270): matriz_final = np.zeros((NAXIS2,NAXIS1)) matriz_final = NDData(matriz_final) for i in range(NAXIS1): for j in range(NAXIS2): matriz_final.data[j][i] = matriz.data[i][j] return matriz_final else: coseno = math.cos((angulo*math.pi)/180) seno = math.sin((angulo*math.pi)/180) punto_central_x = int(round(NAXIS1/2)) punto_central_y = int(round(NAXIS2/2)) # --- Para rotar sobre el centro de la imagen, hay que hacer una pequena traslacion --- # # --- Conociendo la distancia del origen al centro de la imagen es suficiente --- # distancia_centro = int(round(info_imagen.distancia(0,0,punto_central_x,punto_central_y))) - 1 # --- PUNTO MAS NEGATIVO EN X Y EN Y ---------------------- # # --- ESTO ES PARA DEJAR TODAS LAS POSICIONES POSITIVAS --- # vec = [0,0,NAXIS1,NAXIS2,NAXIS1,0,0,NAXIS2] fila_mas_negativa = columna_mas_negativa = 0 fila_mas_positiva = columna_mas_positiva = 0 for i in range(7): alfa = (vec[i]-distancia_centro)*coseno - (vec[i+1]-distancia_centro)*seno beta = (vec[i]-distancia_centro)*seno + (vec[i+1]-distancia_centro)*coseno if (alfa < fila_mas_negativa): fila_mas_negativa = int(math.ceil(alfa)) if (alfa > fila_mas_positiva): fila_mas_positiva = int(math.ceil(alfa)) if (beta < columna_mas_negativa): columna_mas_negativa = int(math.ceil(beta)) if (beta > columna_mas_positiva): columna_mas_positiva = int(math.ceil(beta)) distancia_1 = fila_mas_positiva + abs(fila_mas_negativa) distancia_2 = columna_mas_positiva + abs(columna_mas_negativa) matriz_final = np.zeros((distancia_1+1,distancia_2+1)) matriz_final = NDData(matriz_final) for x in range(NAXIS1): for y in range(NAXIS2): # ---- a X e Y hay que restarle y luego sumarle la traslacion -- # a = ((x-distancia_centro)*coseno - (y-distancia_centro)*seno ) + abs(fila_mas_negativa) b = ((x-distancia_centro)*seno + (y-distancia_centro)*coseno ) + abs(columna_mas_negativa) bandera_decimal_a = 100 bandera_decimal_b = 100 if( a - int(a) != 0): bandera_decimal_a = 101 if( b - int(b) != 0): bandera_decimal_b = 110 #Ya que en python no existe switch, se hace artesanalmente suma_banderas = bandera_decimal_a + bandera_decimal_b while(1): porcentaje_columna_derecha = porcentaje_columna_izquierda = 0 porcentaje_fila_abajo = porcentaje_fila_arriba = 0 porcentaje_fila_arriba = abs(abs(a) - int(abs(a))) porcentaje_fila_abajo = 1 - porcentaje_fila_arriba porcentaje_columna_derecha = abs(abs(b) - int(abs(b))) porcentaje_columna_izquierda = 1 - porcentaje_columna_derecha #Solo A es decimal if(suma_banderas == 201): matriz_final.data[int(a)][b] += porcentaje_fila_abajo*matriz.data[x][y] matriz_final.data[math.ceil(a)][b] += porcentaje_fila_arriba*matriz.data[x][y] break #Solo B es decimal if(suma_banderas == 210): matriz_final.data[a][int(b)] += porcentaje_columna_izquierda*matriz.data[x][y] matriz_final.data[a][math.ceil(b)] += porcentaje_columna_derecha*matriz.data[x][y] break #Ambos son decimales if(suma_banderas == 211): matriz_final.data[int(a)][int(b)] += porcentaje_fila_abajo*porcentaje_columna_izquierda*matriz.data[x][y] matriz_final.data[math.ceil(a)][math.ceil(b)] += porcentaje_fila_arriba*porcentaje_columna_derecha*matriz.data[x][y] matriz_final.data[int(a)][math.ceil(b)] += porcentaje_fila_abajo*porcentaje_columna_derecha*matriz.data[x][y] matriz_final.data[math.ceil(a)][int(b)] += porcentaje_fila_arriba*porcentaje_columna_izquierda*matriz.data[x][y] break #Ambos son enteros if(suma_banderas == 200): matriz_final.data[a][b] = matriz.data[x][y] break return matriz_final
def deproyectar(x,y,NAXIS1,NAXIS2,borde,matriz,radio): """ calcula y transforma la elipse a su proyeccion correspondiente en un circulo args: - x, y = coordenadas (?) - NAXIS1, NAXIS2 = dimensiones de la matriz - borde: vector de borde de la elipse - matriz: imagen de estudio - radio: radio desde el centro de la elipse a x,y return: - imagen deproyectada. """ matriz = NDData(matriz) borde = NDData(borde) matriz_final = np.zeros(((y+(math.cos(90*math.pi/180))*radio)*2+2,radio*2+2)) matriz_final = NDData(matriz_final) for i in range(NAXIS1): for j in range(NAXIS2): a = b = c = d = 0 for r in range (len(borde.data)-1): if ( r % 2 != 0): continue if i < borde.data[r]: a = 1 if i > borde.data[r]: b = 10 if j > borde.data[r+1]: c = 100 if j < borde.data[r+1]: d = 1000 if i == borde.data[r] and j == borde.data[r+1]: a,b,c,d = 1,10,100,1000 break if (a + b + c + d != 1111 or x == i and y == j): continue angulo = math.asin(abs(j - abs(y))/distancia(x,y,i,j))*180/math.pi # Para mejor comprension, hay que leer como fila columna while(1): if i > x and j > y: angulo = angulo break if i < x and j > y: angulo = 360 - angulo break if i < x and j < y: angulo = 180 + angulo break if i > x and j < y: angulo = 180 - angulo break if i == x and j > y: angulo = 0 break if i == x and j < y: angulo = 180 break if i > x and j == y: angulo = 90 break if i < x and j == y: angulo = 270 break fil = x + (math.sin(angulo*math.pi/180))*radio col = y + (math.cos(angulo*math.pi/180))*radio matriz_final.data[int(fil),int(col)] = matriz.data[i][j] matriz_final.data[int(fil),math.ceil(col)] = matriz.data[i][j] matriz_final.data[math.ceil(fil),int(col)] = matriz.data[i][j] matriz_final.data[math.ceil(fil),math.ceil(col)] = matriz.data[i][j] return matriz_final