Esempio n. 1
0
 def test_model(self, model, expected_score):
     # assemblies
     objectome = load_assembly()
     probabilities = Path(__file__).parent / f'{model}-probabilities.nc'
     probabilities = BehavioralAssembly(xr.load_dataarray(probabilities))
     # metric
     i2n = I2n()
     score = i2n(probabilities, objectome)
     score = score.sel(aggregation='center')
     assert score == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score}"
Esempio n. 2
0
 def test_model(self, model, expected_score):
     # assemblies
     objectome = load_assembly()
     probabilities = pd.read_pickle(
         os.path.join(os.path.dirname(__file__),
                      f'{model}-probabilities.pkl'))['data']
     probabilities = BehavioralAssembly(probabilities)
     # metric
     i2n = I2n()
     score = i2n(probabilities, objectome)
     score = score.sel(aggregation='center')
     assert score == approx(
         expected_score,
         abs=0.005), f"expected {expected_score}, but got {score}"
Esempio n. 3
0
 def test_ceiling(self):
     objectome = load_assembly()
     i2n = I2n()
     ceiling = i2n.ceiling(objectome)
     assert ceiling.sel(aggregation='center') == approx(.4786, abs=.0064)
     assert ceiling.sel(aggregation='error') == approx(.00537, abs=.0015)