def test_fit_2hp(self): # carry out fit S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) T = bl.tm.CombinedTransitionModel( bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean'), bl.tm.RegimeSwitch('log10pMin', [-3, -1])) S.setTM(T) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [ 5.80970506e-03, 1.12927905e-01, 4.44501254e-02, 1.00250119e-02, 1.72751309e-05 ], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [0.96492471, 2.09944204, 2.82451616, 3.72702495, 5.0219119], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -10.7601875492, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') np.testing.assert_allclose( np.array([x, p]), [[0., 0.2], [0.48943645, 0.51056355]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.') # test joint hyper-parameter distribution x, y, p = S.getJointHyperParameterDistribution(['log10pMin', 'sigma']) np.testing.assert_allclose( np.array([x, y]), [[-3., -1.], [0., 0.2]], rtol=1e-05, err_msg='Erroneous parameter values in joint hyper-parameter ' 'distribution.') np.testing.assert_allclose( p, [[0.00701834, 0.0075608], [0.48241812, 0.50300274]], rtol=1e-05, err_msg='Erroneous probability values in joint hyper-parameter ' 'distribution.')
def test_fit_1cp_1bp_2hp(self): # carry out fit S = bl.ChangepointStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) T = bl.tm.SerialTransitionModel( bl.tm.Static(), bl.tm.ChangePoint('ChangePoint', [0, 1]), bl.tm.CombinedTransitionModel( bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean'), bl.tm.RegimeSwitch('log10pMin', [-3, -1])), bl.tm.BreakPoint('BreakPoint', 'all'), bl.tm.Static()) S.setTM(T) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.01243717, 0.03016095, 0.016939, 0.00024909, 0.00024909], rtol=1e-04, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [0.96802204, 1.95705078, 3.47078681, 4.22225665, 4.22225665], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -15.072007461556161, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') np.testing.assert_allclose( np.array([x, p]), [[0., 0.2], [0.4963324, 0.5036676]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.') # test duration distribution d, p = S.getDurationDistribution(['ChangePoint', 'BreakPoint']) np.testing.assert_allclose( np.array([d, p]), [[1., 2., 3.], [0.01039273, 0.49395867, 0.49564861]], rtol=1e-05, err_msg='Erroneous values in duration distribution.')
def test_step_add2TM_2hp_prior_hyperpriors_TMprior(self): # carry out fit S = bl.OnlineStudy(storeHistory=True) S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1./s)) T1 = bl.tm.CombinedTransitionModel(bl.tm.GaussianRandomWalk('s1', [0.25, 0.5], target='mean', prior=stats.Exponential('e', 0.5)), bl.tm.GaussianRandomWalk('s2', bl.cint(0, 0.2, 2), target='sigma', prior=np.array([0.2, 0.8])) ) T2 = bl.tm.Independent() S.addTransitionModel('T1', T1) S.addTransitionModel('T2', T2) S.setTransitionModelPrior([0.9, 0.1]) data = np.array([1, 2, 3, 4, 5]) for d in data: S.step(d) # test transition model distributions np.testing.assert_allclose(S.getCurrentTransitionModelDistribution(local=False)[1], [0.49402616, 0.50597384], rtol=1e-05, err_msg='Erroneous transition model probabilities.') np.testing.assert_allclose(S.getCurrentTransitionModelDistribution(local=True)[1], [0.81739495, 0.18260505], rtol=1e-05, err_msg='Erroneous local transition model probabilities.') # test hyper-parameter distributions np.testing.assert_allclose(S.getCurrentHyperParameterDistribution('s2')[1], [0.19047162, 0.80952838], rtol=1e-05, err_msg='Erroneous hyper-parameter distribution.') # test parameter distributions np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5], [0.05825921, 0.20129444, 0.07273516, 0.02125759, 0.0039255], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose(S.getParameterMeanValues('mean'), [1.0771838, 1.71494272, 2.45992376, 3.34160617, 4.39337253], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -9.46900822686, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_prior_sympy(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian( 'mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=[stats.Uniform('u', 0, 6), stats.Exponential('e', 2.)])) S.setTM(bl.tm.GaussianRandomWalk('sigma', 0.1, target='mean')) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.00909976, 0.0089861, 0.00887967, 0.00881235, 0.00880499], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.9942575, 2.99646768, 3., 3.00353232, 3.0057425], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -12.4324853153, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_2hp(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) T = bl.tm.CombinedTransitionModel( bl.tm.GaussianRandomWalk('sigma', 0.1, target='mean'), bl.tm.RegimeSwitch('log10pMin', -3)) S.setTM(T) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.02976422, 0.15404218, 0.10859567, 0.02553673, 0.00054109], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [1.08288559, 2.24388932, 2.38033179, 2.98934128, 4.64547841], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -14.3305753098, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_0hp(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) S.setTM(bl.tm.Static()) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.00707902, 0.00707902, 0.00707902, 0.00707902, 0.00707902], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose(S.getParameterMeanValues('mean'), [3., 3., 3., 3., 3.], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -16.1946904707, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_1hp(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) S.setTM(bl.tm.GaussianRandomWalk('sigma', 0.1, target='mean')) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.00722368, 0.00712209, 0.00702789, 0.00696926, 0.00696322], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.99313985, 2.99573566, 3., 3.00426434, 3.00686015], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -16.1865343702, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_prior_array(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=np.ones((20, 20)))) S.setTM(bl.tm.GaussianRandomWalk('sigma', 0.1, target='mean')) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.04317995, 0.04296549, 0.04275526, 0.04262151, 0.04262491], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.66415455, 2.66519273, 2.66664847, 2.66788051, 2.66828383], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -10.9827282104, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_prior_function(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1. / s)) S.setTM(bl.tm.GaussianRandomWalk('sigma', 0.1, target='mean')) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.01591204, 0.01579036, 0.01567361, 0.01559665, 0.01558591], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.99576496, 2.99741879, 3., 3.00258121, 3.00423504], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -11.9842221343, decimal=5, err_msg='Erroneous log-evidence value.')
def test_optimize(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM(bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) T = bl.tm.CombinedTransitionModel(bl.tm.GaussianRandomWalk('sigma', 1.07, target='mean'), bl.tm.RegimeSwitch('log10pMin', -3.90)) S.setTM(T) S.optimize() # test parameter distributions np.testing.assert_allclose(S.getParameterDistributions('mean', density=False)[1][:, 5], [4.52572851e-04, 1.67790320e-03, 2.94525791e-07, 1.49841548e-08, 1.10238422e-09], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose(S.getParameterMeanValues('mean'), [0.95899404, 1.93816557, 2.99999968, 4.06183394, 5.04100612], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -8.01030368139, decimal=5, err_msg='Erroneous log-evidence value.') # test optimized hyper-parameter values np.testing.assert_almost_equal(S.getHyperParameterValue('sigma'), 1.06576569677, decimal=5, err_msg='Erroneous log-evidence value.') np.testing.assert_almost_equal(S.getHyperParameterValue('log10pMin'), -4.04001476542, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_0hp(self): # carry out fit (this test is designed to fall back on the fit method of the Study class) S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) S.setTM(bl.tm.Static()) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.00707902, 0.00707902, 0.00707902, 0.00707902, 0.00707902], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose(S.getParameterMeanValues('mean'), [3., 3., 3., 3., 3.], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -16.1946904707, decimal=5, err_msg='Erroneous log-evidence value.')
def test_step_set1TM_0hp(self): # carry out fit S = bl.OnlineStudy(storeHistory=True) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) S.setTM(bl.tm.Static()) data = np.array([1, 2, 3, 4, 5]) for d in data: S.step(d) # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean')[1][:, 5], [0.0053811, 0.38690331, 0.16329865, 0.04887604, 0.01334921], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [0.96310103, 1.5065597, 2.00218465, 2.500366, 3.], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -16.1946904707, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_1hp(self): # carry out fit S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) S.setTM( bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean')) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.01042107, 0.00766233, 0.00618352, 0.00554651, 0.00548637], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.88534505, 2.93135361, 3., 3.06864639, 3.11465495], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -16.0629517262, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') print(np.array([x, p])) np.testing.assert_allclose( np.array([x, p]), [[0., 0.2], [0.43828499, 0.56171501]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
def test_fit_hyperprior_function(self): # carry out fit S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) S.setTM( bl.tm.GaussianRandomWalk('sigma', bl.cint(0.1, 0.3, 2), target='mean', prior=lambda s: 1. / s)) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.04071021, 0.00783661, 0.00527211, 0.00484169, 0.00480379], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.68460027, 2.81872578, 3., 3.18127422, 3.31539973], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -15.9898700147, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') np.testing.assert_allclose( np.array([x, p]), [[0.1, 0.3], [0.61609973, 0.38390027]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
def test_fit_hyperprior_array(self): # carry out fit S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) S.setTM( bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean', prior=np.array([0.2, 0.8]))) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.01205759, 0.00794796, 0.00574501, 0.00479608, 0.00470649], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.82920111, 2.89773902, 3., 3.10226098, 3.17079889], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -15.9915077133, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') np.testing.assert_allclose( np.array([x, p]), [[0., 0.2], [0.16322581, 0.83677419]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
def test_fit_hyperprior_sympy(self): # carry out fit S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) S.setTM( bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 2), target='mean', prior=stats.Exponential('e', 1.))) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.01012545, 0.00761074, 0.00626273, 0.00568207, 0.00562725], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [2.89548676, 2.93742566, 3., 3.06257434, 3.10451324], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -17.0866290887, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') np.testing.assert_allclose( np.array([x, p]), [[0., 0.2], [0.487971, 0.512029]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.')
def test_save_load(self): S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20))) S.setTM(bl.tm.Static()) S.fit() bl.save('study.bl', S) S = bl.load('study.bl')
def test_dynamichyperparameter(self): S = bl.OnlineStudy(storeHistory=True) S.setOM(bl.om.Poisson('rate', bl.oint(0, 6, 50))) S.add( 'gradual', bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 5), target='rate')) S.add('static', bl.tm.Static()) for d in np.arange(5): S.step(d) p = S.eval('exp(0.99*log(sigma@2))+1 > 1.1') np.testing.assert_almost_equal( p, 0.61228433813735061, decimal=5, err_msg='Erroneous parsing result for inequality.') S = bl.OnlineStudy(storeHistory=False) S.setOM(bl.om.Poisson('rate', bl.oint(0, 6, 50))) S.add( 'gradual', bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 5), target='rate')) S.add('static', bl.tm.Static()) for d in np.arange(3): S.step(d) p = S.eval('exp(0.99*log(sigma))+1 > 1.1') np.testing.assert_almost_equal( p, 0.61228433813735061, decimal=5, err_msg='Erroneous parsing result for inequality.')
def test_scipy_2p(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) L = bl.om.SciPy(scipy.stats.norm, 'loc', bl.cint(0, 7, 200), 'scale', bl.oint(0, 1, 200)) S.setOM(L) S.setTM(bl.tm.Static()) S.fit() # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -13.663836264357225, decimal=5, err_msg='Erroneous log-evidence value.')
def test_optimize(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) T = bl.tm.CombinedTransitionModel( bl.tm.GaussianRandomWalk('sigma', 1.07, target='mean'), bl.tm.RegimeSwitch('log10pMin', -3.90)) S.setTM(T) S.optimize() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [ 4.525547e-04, 1.677968e-03, 2.946498e-07, 1.499508e-08, 1.102637e-09 ], rtol=1e-05, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [0.95899404, 1.93816557, 2.99999968, 4.06183394, 5.04100612], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -8.010466752050611, decimal=5, err_msg='Erroneous log-evidence value.') # test optimized hyper-parameter values np.testing.assert_almost_equal(S.getHyperParameterValue('sigma'), 1.065854087589326, decimal=5, err_msg='Erroneous log-evidence value.') np.testing.assert_almost_equal(S.getHyperParameterValue('log10pMin'), -4.039735868499399, decimal=5, err_msg='Erroneous log-evidence value.')
def test_statichyperparameter(self): S = bl.HyperStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM(bl.om.Poisson('rate', bl.oint(0, 6, 50))) S.setTM( bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.2, 5), target='rate')) S.fit() p = S.eval('exp(0.99*log(sigma))+1 > 1.1') np.testing.assert_almost_equal( p, 0.60696006616644793, decimal=5, err_msg='Erroneous parsing result for inequality.')
def test_sympy_2p(self): # carry out fit S = bl.Study() S.loadData(np.array([1, 2, 3, 4, 5])) mu = Symbol('mu') std = Symbol('std', positive=True) normal = sympy.stats.Normal('norm', mu, std) L = bl.om.SymPy(normal, 'mu', bl.cint(0, 7, 200), 'std', bl.oint(0, 1, 200), prior=lambda x, y: 1.) S.setOM(L) S.setTM(bl.tm.Static()) S.fit() # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -13.663836264357226, decimal=5, err_msg='Erroneous log-evidence value.')
def test_fit_hyperpriors(self): # carry out fit S = bl.ChangepointStudy() S.loadData(np.array([1, 2, 3, 4, 5])) S.setOM( bl.om.Gaussian('mean', bl.cint(0, 6, 20), 'sigma', bl.oint(0, 2, 20), prior=lambda m, s: 1 / s**3)) T = bl.tm.SerialTransitionModel( bl.tm.Static(), bl.tm.ChangePoint('ChangePoint', [0, 1], prior=np.array([0.3, 0.7])), bl.tm.CombinedTransitionModel( bl.tm.GaussianRandomWalk('sigma', bl.oint(0, 0.2, 2), target='mean', prior=lambda s: 1. / s), bl.tm.RegimeSwitch('log10pMin', [-3, -1])), bl.tm.BreakPoint('BreakPoint', 'all', prior=stats.Normal('Normal', 3., 1.)), bl.tm.Static()) S.setTM(T) S.fit() # test parameter distributions np.testing.assert_allclose( S.getParameterDistributions('mean', density=False)[1][:, 5], [0.03372851, 0.05087598, 0.02024129, 0.00020918, 0.00020918], rtol=1e-04, err_msg='Erroneous posterior distribution values.') # test parameter mean values np.testing.assert_allclose( S.getParameterMeanValues('mean'), [0.9894398, 1.92805399, 3.33966456, 4.28759449, 4.28759449], rtol=1e-05, err_msg='Erroneous posterior mean values.') # test model evidence value np.testing.assert_almost_equal(S.logEvidence, -15.709534690217343, decimal=5, err_msg='Erroneous log-evidence value.') # test hyper-parameter distribution x, p = S.getHyperParameterDistribution('sigma') np.testing.assert_allclose( np.array([x, p]), [[0.06666667, 0.13333333], [0.66515107, 0.33484893]], rtol=1e-05, err_msg='Erroneous values in hyper-parameter distribution.') # test duration distribution d, p = S.getDurationDistribution(['ChangePoint', 'BreakPoint']) np.testing.assert_allclose( np.array([d, p]), [[1., 2., 3.], [0.00373717, 0.40402616, 0.59223667]], rtol=1e-05, err_msg='Erroneous values in duration distribution.')