def test_make_basis_ortho_normalize(): # Test the effect of ortho and normalize parameters model_params = {'ordar': 5, 'ordriv': 2, 'criterion': False} for normalize in (True, False): for ortho in (True, False): dar = dar_no_fit(ortho, normalize, **model_params) basis = dar.basis_ basis.shape = (basis.shape[0], -1) product = np.dot(basis, basis.T) n_basis = product.shape[0] n_samples = _sigin.size # Test that the diagonal is constant if normalized ref = np.ones(n_basis) * n_samples if normalize: assert_array_almost_equal(product.flat[::n_basis + 1], ref) else: assert_array_not_almost_equal(product.flat[::n_basis + 1], ref) # Test that the rest is zero if orthogonalized product.flat[::n_basis + 1] = 0 ref = np.zeros((n_basis, n_basis)) if ortho: assert_array_almost_equal(product, ref) else: assert_array_not_almost_equal(product, ref)
def test_random_state(): # Test that the random state controls reproducibility sig_0 = simulate_pac_default(random_state=0) sig_1 = simulate_pac_default(random_state=1) sig_2 = simulate_pac_default(random_state=0) assert_raises(AssertionError, assert_array_almost_equal, sig_0, sig_1) assert_array_almost_equal(sig_0, sig_2)
def test_empty_mask(): # Test that using an empty mask does not change the results mask = np.zeros(n_points, dtype=bool) plkg_0 = fast_peak_locking(mask=mask) plkg_1 = fast_peak_locking(low_sig=signal[~mask]) assert_array_almost_equal(plkg_0.time_frequency_, plkg_1.time_frequency_) assert_array_almost_equal(plkg_0.time_average_, plkg_1.time_average_)
def test_empty_mask(): # Test that using an empty mask does not change the results mask = np.zeros(n_points, dtype=bool) for method in ALL_PAC_METRICS: comod_0 = fast_comod(mask=mask, method=method) comod_1 = fast_comod(low_sig=signal[~mask], method=method) assert_array_almost_equal(comod_0, comod_1, decimal=7)
def test_empty_mask(): # Test that using an empty mask does not change the results mask = np.zeros(n_points, dtype=bool) est_0 = fast_delay(mask=mask) est_1 = fast_delay() assert_array_almost_equal(est_0.neg_log_likelihood_, est_1.neg_log_likelihood_)
def test_make_basis_new_sigdriv(): # Test that make_basis works the same with a new sigdriv, # using stored orthonormalization transform. model_params = {'ordar': 5, 'ordriv': 2, 'criterion': False} for normalize in (True, False): for ortho in (True, False): for this_sigdriv, this_sigdriv_imag in ([_sigdriv, _sigdriv_imag], [_noise, _noise[::-1]]): dar = dar_no_fit(ortho=ortho, normalize=normalize, sigdriv=this_sigdriv, sigdriv_imag=this_sigdriv_imag, **model_params) newbasis = dar.make_basis(sigdriv=this_sigdriv, sigdriv_imag=this_sigdriv_imag) assert_array_almost_equal(newbasis, dar.basis_) # Different result if we change a parameter dar_ortho = dar_no_fit(not ortho, normalize, **model_params) dar_norma = dar_no_fit(ortho, not normalize, **model_params) for dar2 in [dar_ortho, dar_norma]: assert_raises(AssertionError, assert_array_almost_equal, dar.basis_, dar2.basis_)
def test_weighting_with_zeros(): # Test that split = int(n_points // 2) train_weights = np.ones_like(_sigin) train_weights[split:] = 0 sigin_half = _sigin[:split] sigdriv_half = _sigdriv[:split] sigdriv_imag_half = _sigdriv_imag[:split] for klass in ALL_MODELS: print(klass.__name__) model_1 = fast_fitted_model(klass=klass, train_weights=train_weights) model_0 = fast_fitted_model(klass=klass, sigin=sigin_half, sigdriv=sigdriv_half, sigdriv_imag=sigdriv_imag_half, train_weights=None) assert_array_almost_equal(model_1.AR_, model_0.AR_) assert_array_almost_equal(model_1.G_, model_0.G_) for train in [False, True]: assert_array_almost_equal(model_0.log_likelihood(train=train), model_1.log_likelihood(train=train), decimal=5)
def test_weighting_with_ones(): # Test that weighting with ones is identical as no weighting factor = 1.5 train_weights = np.ones_like(_sigin) * factor for klass in ALL_MODELS: model_2 = fast_fitted_model(klass=klass, train_weights=train_weights, test_weights=train_weights) model_1 = fast_fitted_model(klass=klass, train_weights=train_weights, test_weights=None) model_0 = fast_fitted_model(klass=klass, train_weights=None, test_weights=None) assert_array_almost_equal(model_1.AR_, model_0.AR_, decimal=5) assert_array_almost_equal(model_2.AR_, model_0.AR_, decimal=5) assert_array_almost_equal(model_1.G_, model_0.G_, decimal=5) assert_array_almost_equal(model_2.G_, model_0.G_, decimal=5) for train in [False, True]: assert_array_almost_equal(model_0.log_likelihood(train=train)[0] * factor, model_1.log_likelihood(train=train)[0], decimal=4) assert_array_almost_equal(model_1.log_likelihood(train=train)[0], model_2.log_likelihood(train=train)[0], decimal=4)
def test_norm(): # Test that norm and squared_norm are consistent rng = np.random.RandomState(0) for sig in (rng.randn(10), rng.randn(4, 3), [1 + 1j, 3, 6], -9): assert_array_almost_equal(norm(sig)**2, squared_norm(sig))