def test_nddata_init_data_maskedarray(): with NumpyRNGContext(456): NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5) # Another test (just copied here) with NumpyRNGContext(12345): a = np.random.randn(100) marr = np.ma.masked_where(a > 0, a) nd = NDData(marr) # check that masks and data match assert_array_equal(nd.mask, marr.mask) assert_array_equal(nd.data, marr.data) # check that they are both by reference marr.mask[10] = ~marr.mask[10] marr.data[11] = 123456789 assert_array_equal(nd.mask, marr.mask) assert_array_equal(nd.data, marr.data) # or not if we choose copy=True nd = NDData(marr, copy=True) marr.mask[10] = ~marr.mask[10] marr.data[11] = 0 assert nd.mask[10] != marr.mask[10] assert nd.data[11] != marr.data[11]
def gen_position_flux(size, number, low, high, rng_seed=123): """Generate x, y, and flux lists for stars.""" for i in range(number): with NumpyRNGContext(rng_seed): x = np.random.randint(0, size[0], number) with NumpyRNGContext(rng_seed + i): y = np.random.randint(0, size[1], number) flux = np.random.randint(low, high, number) return x, y, flux
def generate_gaussian_cube(shape=(100, 25, 25), sigma=8., amp=1., noise=None, spec_scale=1 * u.km / u.s, pixel_scale=1 * u.arcsec, beamfwhm=3 * u.arcsec, v0=None, vel_surface=None, seed=247825498): ''' Generate a SpectralCube with Gaussian profiles. The peak velocity positions can be given with `vel_surface`. Otherwise, the peaks of the profiles are randomly assigned positions in the cubes. This is primarily to test shuffling and stacking of spectra, rather than trying to be being physically meaningful. Returns ------- spec_cube : SpectralCube The generated cube. mean_positions : array The peak positions in the cube. ''' test_cube = np.empty(shape) mean_positions = np.empty(shape[1:]) spec_middle = int(shape[0] / 2) spec_quarter = int(shape[0] / 4) if v0 is None: v0 = 0 with NumpyRNGContext(seed): spec_inds = np.mgrid[-spec_middle:spec_middle] * spec_scale.value spat_inds = np.indices(shape[1:]) for y, x in zip(spat_inds[0].flatten(), spat_inds[1].flatten()): # Lock the mean to within 25% from the centre if vel_surface is not None: mean_pos = vel_surface[y, x] else: mean_pos = \ np.random.uniform(low=spec_inds[spec_quarter], high=spec_inds[spec_quarter + spec_middle]) test_cube[:, y, x] = gaussian(spec_inds, amp, mean_pos, sigma) mean_positions[y, x] = mean_pos + v0 if noise is not None: test_cube[:, y, x] += np.random.normal(0, noise, shape[0]) test_hdu = generate_hdu(test_cube, pixel_scale, spec_scale, beamfwhm, spec_inds[0] + v0) spec_cube = SpectralCube.read(test_hdu) mean_positions = mean_positions * spec_scale.unit return spec_cube, mean_positions
def setup_class(self): # Co-ordinate grid in wavelength & pixels along the slit: wav = np.arange(3000, 10000, 50) * u.AA slit = np.arange(30) wav, slit = np.meshgrid(wav, slit) obj = (cont_model(wav).value * Gaussian1D(amplitude=1., mean=15.8, stddev=2.)(slit)) # A continuum level makes for a more stable comparison of fit vs data: self.bglev = 30. sky = sky_model(wav.value) + self.bglev data = obj + sky # Add some noise: std = np.sqrt(36. + data) with NumpyRNGContext(_RANDOM_SEED): data += np.random.normal(0., 1., size=data.shape) * std # Make a copy that also has some bad pixels masked; the first masked # region is too long to get rejected automatically and test comparisons # will fail unless it is masked correctly: masked_data = np.ma.masked_array(data, mask=False, copy=True) badpix = np.ma.masked_array(1000., mask=True) masked_data[4:6, 80:93] = badpix masked_data[24:27, 24:27] = badpix self.obj, self.sky, self.data, self.std = obj, sky, data, std self.masked_data = masked_data self.weights = 1. / std
def test_fitters_with_weights(fitter): """Issue #5737 """ fitter = fitter() if isinstance(fitter, _NLLSQFitter): pytest.xfail("This test is poorly designed and causes issues for " "scipy.optimize.least_squares based fitters") Xin, Yin = np.mgrid[0:21, 0:21] with NumpyRNGContext(_RANDOM_SEED): zsig = np.random.normal(0, 0.01, size=Xin.shape) # Non-linear model g2 = models.Gaussian2D(10, 10, 9, 2, 3) z = g2(Xin, Yin) gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig) assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2)) # Linear model p2 = models.Polynomial2D(3) p2.parameters = np.arange(10)/1.2 z = p2(Xin, Yin) with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'): pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig) assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_apply_minmax_clip(self): _min, _max = (0, 20) outliers = ((1, 2), (6, 2), (2, 9), (5, 2)) outvalues = [1e6, -2e3, 5e3, -7e2] expect = np.zeros((10, 10)) with NumpyRNGContext(123): data = np.ma.MaskedArray(np.random.normal(loc=10, scale=1, size=[10, 10]), mask=np.zeros((10, 10))) for p, v in zip(outliers, outvalues): data[p] = v expect[p] = 1 data[0:2, 0:3].mask = 1 expect[0:2, 0:3] = 1 # force assign the buffer comb = ImCombiner() comb._buffer = data # with these limits, only the outliers must be masked comb.set_minmax_clip(_min, _max) comb._apply_rejection() # original mask must be kept assert_equal(comb._buffer.mask, expect)
def calc_velstd_withnan(cum, dt_cum): """ Calculate std of velocity by bootstrap for each point which may include nan. Inputs: cum : Cumulative phase block for each point (n_pt, n_im) Can include nan. dt_cum : Cumulative days for each image (n_im) Returns: vstd : Std of Velocity for each point (n_pt) """ global bootcount, bootnum n_pt, n_im = cum.shape bootnum = 100 bootcount = 0 vstd = np.zeros((n_pt), dtype=np.float32) G = np.stack((np.ones_like(dt_cum), dt_cum), axis=1) data = cum.transpose().copy() ixs_day = np.arange(n_im) mask = (~np.isnan(data)) data[np.isnan(data)] = 0 velinv = lambda x: censored_lstsq2(G[x, :], data[x, :], mask[x, :])[1] with NumpyRNGContext(1): bootresult = bootstrap(ixs_day, bootnum, bootfunc=velinv) vstd = np.nanstd(bootresult, axis=0) print('') return vstd
def test_conditional_abunmatch1(): with NumpyRNGContext(43): x = np.random.normal(loc=0, scale=0.1, size=100) y = np.linspace(10, 20, 100) model_y = conditional_abunmatch(x, y, seed=43) msg = "monotonic cam does not preserve mean" assert np.allclose(model_y.mean(), y.mean(), rtol=0.1), msg
def test_compound_fitting_with_units(): x = np.linspace(-5, 5, 15) * u.Angstrom y = np.linspace(-5, 5, 15) * u.Angstrom fitter = fitting.LevMarLSQFitter() m = models.Gaussian2D(10*u.Hz, 3*u.Angstrom, 4*u.Angstrom, 1*u.Angstrom, 2*u.Angstrom) p = models.Planar2D(3*u.Hz/u.Angstrom, 4*u.Hz/u.Angstrom, 1*u.Hz) model = m + p z = model(x, y) res = fitter(model, x, y, z) assert isinstance(res(x, y), np.ndarray) assert all([res[i]._has_units for i in range(2)]) model = models.Gaussian2D() + models.Planar2D() res = fitter(model, x, y, z) assert isinstance(res(x, y), np.ndarray) assert all([res[i]._has_units for i in range(2)]) # A case of a mixture of models with and without units model = models.BlackBody(temperature=3000 * u.K) * models.Const1D(amplitude=1.0) x = np.linspace(1, 3, 10000) * u.micron with NumpyRNGContext(12345): n = np.random.normal(3) y = model(x) res = fitter(model, x, y * (1 + n)) # The large rtol here is due to different results on linux and macosx, likely # the model is ill-conditioned. np.testing.assert_allclose(res.parameters, [3000, 2.1433621e+00, 2.647347e+00], rtol=0.4)
def test_param_cov(self): """ Tests that the 'param_cov' fit_info entry gets the right answer for *linear* least squares, where the answer is exact """ a = 2 b = 100 with NumpyRNGContext(_RANDOM_SEED): x = np.linspace(0, 1, 100) # y scatter is amplitude ~1 to make sure covarience is # non-negligible y = x*a + b + np.random.randn(len(x)) # first compute the ordinary least squares covariance matrix X = np.vstack([x, np.ones(len(x))]).T beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T) s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) / (len(y) - len(beta))) olscov = np.linalg.inv(np.matmul(X.T, X)) * s2 # now do the non-linear least squares fit mod = models.Linear1D(a, b) fitter = LevMarLSQFitter() with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'): fmod = fitter(mod, x, y) assert_allclose(fmod.parameters, beta.ravel()) assert_allclose(olscov, fitter.fit_info['param_cov'])
def test_linear_fit_model_set_common_weight(): """Tests fitting multiple models simultaneously.""" init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2) x = np.arange(10) y_expected = init_model(x, model_set_axis=False) assert y_expected.shape == (2, 10) # Add a bit of random noise with NumpyRNGContext(_RANDOM_SEED): y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape) fitter = LinearLSQFitter() weights = np.ones(10) weights[[0, -1]] = 0 fitted_model = fitter(init_model, x, y, weights=weights) assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1) # Check that using null weights raises an error # ValueError: On entry to DLASCL parameter number 4 had an illegal value with pytest.raises(ValueError, match='Found NaNs in the coefficient matrix'): with pytest.warns(RuntimeWarning, match=r'invalid value encountered in.*divide'): fitted_model = fitter(init_model, x, y, weights=np.zeros(10))
def run_lenstool_parallel(folder,ini,ncores): backgx = np.loadtxt(folder+'/background_galaxies_main.lenstool') infile = open(folder+'/background_galaxies_main.lenstool', 'r') header = infile.readline()[2:-2] index = np.arange(len(backgx)) with NumpyRNGContext(1): bootresult = (bootstrap(index, ncores)).astype(int) total_folders = [] for j in np.arange(ini,ini+ncores): os.system('rm -r '+folder+'_'+str(j)) os.system('mkdir '+folder+'_'+str(j)) os.system('cp -r '+folder+'/* '+folder+'_'+str(j)+'/') total_folders = np.append(total_folders, folder+'_'+str(j)) lenstool_catalogue = backgx[bootresult[j-ini,:]] lenstool_catalogue[:,0] = np.arange(1,len(backgx)+1) np.savetxt( folder+'_'+str(j)+'/background_galaxies_main.lenstool',lenstool_catalogue,\ fmt='%i %f %f %f %f %f %f %f', header=header) pool = Pool(processes=(ncores)) salida=np.array(pool.map(run_lenstool, total_folders))
def ccd_data(request): """ Return a CCDData object with units of ADU. The size of the data array is 100x100 but can be changed using the marker @pytest.mark.data_size(N) on the test function, where N should be the desired dimension. Data values are initialized to random numbers drawn from a normal distribution with mean of 0 and scale 1. The scale can be changed with the marker @pytest.marker.scale(s) on the test function, where s is the desired scale. The mean can be changed with the marker @pytest.marker.scale(m) on the test function, where m is the desired mean. """ size = value_from_markers('data_size', request) scale = value_from_markers('data_scale', request) mean = value_from_markers('data_mean', request) with NumpyRNGContext(DEFAULTS['seed']): data = np.random.normal(loc=mean, size=[size, size], scale=scale) fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd
def setup_class(self): with NumpyRNGContext(12345): self.data = np.random.normal(np.array([1, 2, 3, 4])[:, np.newaxis], np.array([3, 2, 4, 5])[:, np.newaxis], (4, 10000)) self.distr = Distribution(self.data * u.kpc)
def _error_map(self, boot_n, data, nbins, box_size_hMpc, cosmo): if box_size_hMpc is None: raise ValueError('You need to specify a box_size_hMpc value ' 'for the bootstrap analysis.') cube_shape = self.kappa.shape + (boot_n, ) # add extra dimension for each map kE_err_cube = np.zeros(cube_shape) kB_err_cube = np.zeros(cube_shape) index = np.arange(len(data)) with NumpyRNGContext(seed=1): index_boot = bootstrap(index, boot_n).astype(int) for i in range(boot_n): if isinstance(data, pd.DataFrame): b_data = data.iloc[i] else: b_data = data[i] # assuming numpy array b_kappa = self._kappa_map(b_data, nbins, box_size_hMpc, cosmo, save_ref=False) kE_err_cube[:, :, i] = b_kappa.real kB_err_cube[:, :, i] = b_kappa.imag kE_err = np.std(kE_err_cube, axis=2) kB_err = np.std(kB_err_cube, axis=2) error_map = kE_err + 1j * kB_err return error_map
def test_fitting_with_outlier_removal_niter(): """ Test that FittingWithOutlierRemoval stops prior to reaching niter if the set of masked points has converged and correctly reports the actual number of iterations performed. """ # 2 rows with some noise around a constant level and 1 deviant point: x = np.arange(25) with NumpyRNGContext(_RANDOM_SEED): y = np.random.normal(loc=10., scale=1., size=(2, 25)) y[0, 14] = 100. # Fit 2 models with up to 5 iterations (should only take 2): fitter = FittingWithOutlierRemoval( fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5, sigma_lower=3., sigma_upper=3., maxiters=1 ) model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y) # Confirm that only the deviant point was rejected, in 2 iterations: assert_equal(np.where(mask), [[0], [14]]) assert fitter.fit_info['niter'] == 2 # Refit just the first row without any rejection iterations, to ensure # there are no regressions for that special case: fitter = FittingWithOutlierRemoval( fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0, sigma_lower=3., sigma_upper=3., maxiters=1 ) model, mask = fitter(models.Chebyshev1D(2), x, y[0]) # Confirm that there were no iterations or rejected points: assert mask.sum() == 0 assert fitter.fit_info['niter'] == 0
def test_separate_mask(self): with NumpyRNGContext(12345): mask1 = BooleanArrayMask(np.random.random((5, 20, 30)) > 0.2, wcs=self.wcs) mask2 = [ BooleanArrayMask(np.random.random((5, 20, 30)) > 0.4, wcs=self.wcs) for i in range(4) ] mask3 = BooleanArrayMask(np.random.random((5, 20, 30)) > 0.2, wcs=self.wcs) stokes_data = dict(I=SpectralCube(self.data[0], wcs=self.wcs, mask=mask2[0]), Q=SpectralCube(self.data[1], wcs=self.wcs, mask=mask2[1]), U=SpectralCube(self.data[2], wcs=self.wcs, mask=mask2[2]), V=SpectralCube(self.data[3], wcs=self.wcs, mask=mask2[3])) cube1 = StokesSpectralCube(stokes_data, mask=mask1) assert_equal(cube1.I.mask.include(), (mask1 & mask2[0]).include()) assert_equal(cube1.Q.mask.include(), (mask1 & mask2[1]).include()) assert_equal(cube1.U.mask.include(), (mask1 & mask2[2]).include()) assert_equal(cube1.V.mask.include(), (mask1 & mask2[3]).include()) cube2 = cube1.I.with_mask(mask3) assert_equal(cube2.mask.include(), (mask1 & mask2[0] & mask3).include())
def test_with_bounding_box(): """ Test the option to evaluate a model respecting its bunding_box. """ p = models.Polynomial2D(2) & models.Polynomial2D(2) m = models.Mapping((0, 1, 0, 1)) | p with NumpyRNGContext(1234567): m.parameters = np.random.rand(12) m.bounding_box = ((3, 9), (1, 8)) x, y = np.mgrid[:10, :10] a, b = m(x, y) aw, bw = m(x, y, with_bounding_box=True) ind = (~np.isnan(aw)).nonzero() assert_allclose(a[ind], aw[ind]) assert_allclose(b[ind], bw[ind]) aw, bw = m(x, y, with_bounding_box=True, fill_value=1000) ind = (aw != 1000).nonzero() assert_allclose(a[ind], aw[ind]) assert_allclose(b[ind], bw[ind]) # test the order of bbox is not reversed for 1D models p = models.Polynomial1D(1, c0=12, c1=2.3) p.bounding_box = (0, 5) assert (p(1) == p(1, with_bounding_box=True))
def test_matching_method(): from astropy.coordinates import ICRS, SkyCoord from astropy.utils import NumpyRNGContext from astropy.coordinates.matching import match_coordinates_3d, match_coordinates_sky with NumpyRNGContext(987654321): cmatch = ICRS( np.random.rand(20) * 360. * u.degree, (np.random.rand(20) * 180. - 90.) * u.degree) ccatalog = ICRS( np.random.rand(100) * 360. * u.degree, (np.random.rand(100) * 180. - 90.) * u.degree) idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog) idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) # should be the same as above because there's no distance, but just make sure this method works idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog) idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog) npt.assert_array_equal(idx1, idx2) assert_allclose(d2d1, d2d2) assert_allclose(d3d1, d3d2) assert len(idx1) == len(d2d1) == len(d3d1) == 20
def create_ccd(size=50, scale=1.0, mean=0.0, seed=123): """Create a fake ccd for data testing data processing """ with NumpyRNGContext(seed): data = np.random.normal(loc=mean, size=[size, size], scale=scale) ccd = CCDData(data, unit=u.adu) return ccd
def setup_method(self, method): with NumpyRNGContext(12345): self.data = Data(cube=np.random.random((30, 50, 20))) self.data_collection = DataCollection([self.data]) ga = GlueApplication(self.data_collection) self.viewer = ga.new_data_viewer(ImageViewer) self.viewer.add_data(self.data)
def bootstrapping(bootarr, bootfunc): # gives multiple velocity dispersion with NumpyRNGContext(1): bootresult = bootstrap(bootarr, bootnum=100, samples=len(bootarr) - 1, bootfunc=bootfunc) return bootresult
def test_bad_compound_without_units(model): with pytest.raises(ValueError): x = np.linspace(-5, 5, 10) * u.Angstrom with NumpyRNGContext(12345): y = np.random.sample(10) fitter = fitting.LevMarLSQFitter() res_fit = fitter(model, x, y * u.Hz)
def setup_method(self, method): with NumpyRNGContext(12345): self.data = Data(**dict((name, random_with_nan(100, nan_index=idx + 1)) for idx, name in enumerate('abcdefgh'))) self.data_collection = DataCollection([self.data]) ga = GlueApplication(self.data_collection) self.viewer = ga.new_data_viewer(HistogramViewer) self.viewer.add_data(self.data) self.viewer.state.x_att = self.data.id['a']
def test_logical(): assert_allclose(res, np.greater(a)) with NumpyRNGContext(0x1337): compareto = np.random.randn(10) with NumpyRNGContext(0x1338): val = np.random.randn(10) with NumpyRNGContext(0x1339): x = np.random.randn(10) l = models.Logical('GT', .5, 10) #x = np.arange(10, dtype=np.float) res = l(x) y = x.copy() y[np.greater(x, .5)] = 10 assert_allclose(res, npres) l = models.Logical('lt', compareto, val) cond = np.less(x, compareto) y = x.copy() y[cond] = val[cond] assert_allclose(res, npres)
def test_uniform_spherical_random_volume_input(): with NumpyRNGContext(42): sph = uniform_spherical_random_volume(size=100, max_radius=1) assert len(sph) == 100 assert sph.distance.unit == u.dimensionless_unscaled assert sph.distance.max() <= 1. sph = uniform_spherical_random_volume(size=100, max_radius=4 * u.pc) assert len(sph) == 100 assert sph.distance.max() <= 4 * u.pc
def add_cosmicrays(data, scale, threshold, ncrays=NCRAYS): size = data.shape[0] with NumpyRNGContext(125): crrays = np.random.randint(0, size, size=(ncrays, 2)) # use (threshold + 1) below to make sure cosmic ray is well above the # threshold no matter what the random number generator returns crflux = (10 * scale * np.random.random(NCRAYS) + (threshold + 5) * scale) for i in range(ncrays): y, x = crrays[i] data.data[y, x] = crflux[i]
def setup_method(self, method): self.data = Data(label='d1') self.data.coords = SimpleCoordinates() with NumpyRNGContext(12345): self.data['x'] = random_with_nan(48, 5).reshape((6, 4, 2)) self.data['y'] = random_with_nan(48, 12).reshape((6, 4, 2)) self.data_collection = DataCollection([self.data]) self.app = GlueApplication(self.data_collection) self.viewer = self.app.new_data_viewer(ProfileViewer) self.viewer.add_data(self.data)
def _boot_error(self, shear, cero, weight, nboot): index=np.arange(len(shear)) with NumpyRNGContext(seed=1): bootresult = bootstrap(index, nboot) index_boot = bootresult.astype(int) shear_boot = shear[index_boot] cero_boot = cero[index_boot] weight_boot = weight[index_boot] shear_means = np.average(shear_boot, weights=weight_boot, axis=1) cero_means = np.average(cero_boot, weights=weight_boot, axis=1) return np.std(shear_means), np.std(cero_means)
def test_fittable_compound(): m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4) x = np.arange(10) y_real = m(x) dy = 0.005 with NumpyRNGContext(1234567): n = np.random.normal(0., dy, x.shape) y_noisy = y_real + n pfit = LevMarLSQFitter() new_model = pfit(m, x, y_noisy) y_fit = new_model(x) assert_allclose(y_fit, y_real, atol=dy)