def test_4D(self): np.random.seed(0) a = np.random.rand(12, 12, 12, 12) self.assertTrue( np.allclose(rolling_sum(a, 3), rolling_window(a, 3).sum(axis=(4, 5, 6, 7)))) self.assertTrue( np.allclose(rolling_sum(a, 9), rolling_window(a, 9).sum(axis=(4, 5, 6, 7))))
def test_2D(self): np.random.seed(0) a = np.random.rand(20, 20) self.assertTrue( np.allclose(rolling_sum(a, 3), rolling_window(a, 3).sum(axis=(2, 3)))) self.assertTrue( np.allclose(rolling_sum(a, 9), rolling_window(a, 9).sum(axis=(2, 3))))
def test_3D(self): np.random.seed(0) a = np.random.rand(15, 15, 15) self.assertTrue( np.allclose(rolling_sum(a, 3), rolling_window(a, 3).sum(axis=(3, 4, 5)))) self.assertTrue( np.allclose(rolling_sum(a, 9), rolling_window(a, 9).sum(axis=(3, 4, 5))))
def test_assumptions(self): with self.assertRaises(ValueError): # window_size bigger than dimensions of array should raise ValueError rolling_sum(np.array([1, 2, 3]), 5)
def test_reduce(self): np.random.seed(0) a = np.random.rand(5, 5) self.assertTrue(rolling_sum(a, window_size=5, reduce=True), a.sum())
def test_5D(self): np.random.seed(0) a = np.random.rand(10, 10, 10, 10, 10) self.assertTrue( np.allclose(rolling_sum(a, 3), rolling_window(a, 3).sum(axis=(5, 6, 7, 8, 9))))
x = t.ppf(0.95, df=df) r = x / np.sqrt(df + x**2) sign_threshold[n] = x / np.sqrt(df + x**2) mp.Raster.set_window_size(window_size) M_corr.window_size = 1 mp.Raster.set_tiles((10, 10)) for i in M_th: progress_bar((i + 1) / M_th.c_tiles) th = M_th[i] wtd = M_wtd[i] fapar = M_fapar[i] fapar[th < 3] = np.nan corr = M_corr[i] count_values = rolling_sum(np.logical_and(~np.isnan(fapar), ~np.isnan(wtd)), window_size=15) threshold = sign_threshold[count_values] significance = np.full_like(corr, 0, dtype=np.float64) significance[np.isnan(corr)] = np.nan significance[corr < -threshold] = -1 significance[corr > threshold] = 1 M_sig[i] = significance mp.Raster.close()
mp.Raster.set_window_size(15) mp.Raster.set_tiles((20, 20)) d_p_pet = {} d_wtd = {} for i in M_th: progress_bar((i+1) / M_th.c_tiles) th = M_th[i] fapar = M_fapar[i] fapar[th < 3] = np.nan p_pet = M_p_pet[i] wtd = M_wtd[i] # P_PET nans = np.logical_or(np.isnan(fapar), np.isnan(p_pet)) mask = (rolling_sum(nans, window_size=15) == 0) a = rolling_window(fapar, window_size=15)[mask] b = rolling_window(p_pet, window_size=15)[mask] if a.size == 0: continue sample_size = min(100, a.shape[0]) x = correlation_threshold_inference(a[np.random.choice(np.arange(a.shape[0]), sample_size, replace=False)], b[np.random.choice(np.arange(b.shape[0]), sample_size, replace=False)]) d_p_pet[i] = x # WTD nans = np.logical_or(np.isnan(fapar), np.isnan(wtd))