def compute_score(self, observation: float, prediction: float, verbose: bool = False) -> FloatScore: # print("observation = {}".format(observation)) # print("prediction = {}".format(prediction)) self.figures = [] runtime = FloatScore(prediction - observation) runtime.description = "Time (in seconds) required to complete simulation" return runtime
def add_mean(self): is_transposed = isinstance(self.index[0], Test) if is_transposed: sm = self.T else: sm = self tests = [Test({}, name="Mean")] + sm.tests mean_scores = [FloatScore(sm[model].mean()) for model in sm.models] mean_scores = np.array(mean_scores).reshape(-1, 1) scores = np.hstack([mean_scores, sm.values]) sm_mean = ScoreMatrix(tests=tests, models=sm.models, scores=scores) if is_transposed: sm_mean = sm_mean.T return sm_mean
def test_Test(self): pv = config["PREVALIDATE"] config["PREVALIDATE"] = 1 with self.assertRaises(ObservationError): t = Test(None) with self.assertRaises(ObservationError): class Test2(Test): observation_schema = None score_type = ZScore units = pq.pA def generate_prediction(self): return 1 t = Test2({"mean": 5 * pq.pA}) t = Test({}) self.assertRaises(ObservationError, t.validate_observation, "I am not an observation") t.observation_schema = {} t.validate_observation({0: 0, 1: 1}) Test.observation_schema = [{}, {}] self.assertListEqual(t.observation_schema_names(), ["Schema 1", "Schema 2"]) config["PREVALIDATE"] = pv self.assertRaises(ParametersError, t.validate_params, None) self.assertRaises(ParametersError, t.validate_params, "I am not an observation") t.params_schema = {} t.validate_params({0: 1, 1: 2}) self.assertRaises(Error, t.check_capabilities, "I am not a model") t.condition_model(Model()) self.assertRaises(NotImplementedError, t.generate_prediction, Model()) self.assertRaises(NotImplementedError, t.optimize, Model()) self.assertTrue(t.compute_score({0: 2, 1: 2}, {0: 2, 1: 2}).score) self.assertFalse(t.compute_score({0: -2, 1: 2}, {0: 2, 1: -2}).score) t.score_type = None self.assertRaises(NotImplementedError, t.compute_score, {}, {}) t.score_type = BooleanScore self.assertRaises(InvalidScoreError, t.check_score_type, FloatScore(0.5))
def test_Test(self): config_set("PREVALIDATE", True) with self.assertRaises(ObservationError): t = Test(None) config_set("PREVALIDATE", False) t = Test(None) self.assertRaises(ObservationError, t.validate_observation, None) self.assertRaises(ObservationError, t.validate_observation, "I am not an observation") self.assertRaises(ObservationError, t.validate_observation, {"mean": None}) t = Test([0, 1]) t.observation_schema = {} t.validate_observation({0: 0, 1: 1}) Test.observation_schema = [{}, {}] self.assertListEqual(t.observation_schema_names(), ["Schema 1", "Schema 2"]) self.assertRaises(ParametersError, t.validate_params, None) self.assertRaises(ParametersError, t.validate_params, "I am not an observation") t.params_schema = {} t.validate_params({0: 1, 1: 2}) self.assertRaises(Error, t.check_capabilities, "I am not a model") t.condition_model(Model()) self.assertRaises(NotImplementedError, t.generate_prediction, Model()) self.assertRaises(NotImplementedError, t.optimize, Model()) self.assertTrue(t.compute_score({0: 2, 1: 2}, {0: 2, 1: 2}).score) self.assertFalse(t.compute_score({0: -2, 1: 2}, {0: 2, 1: -2}).score) t.score_type = None self.assertRaises(NotImplementedError, t.compute_score, {}, {}) t.score_type = BooleanScore self.assertRaises(InvalidScoreError, t.check_score_type, FloatScore(0.5)) self.assertRaises(ObservationError, t.judge, [Model(), Model()])
def test_regular_score_types_2(self): BooleanScore(True) BooleanScore(False) score = BooleanScore.compute(5, 5) self.assertEqual(score.norm_score, 1) score = BooleanScore.compute(4, 5) self.assertEqual(score.norm_score, 0) self.assertEqual(1, BooleanScore(True).norm_score) self.assertEqual(0, BooleanScore(False).norm_score) t = RangeTest([2, 3]) score.test = t score.describe() score.description = "Lorem Ipsum" score.describe() score = FloatScore(3.14) self.assertRaises(InvalidScoreError, score.check_score, Quantity([1, 2, 3], "J")) obs = np.array([1.0, 2.0, 3.0]) pred = np.array([1.0, 2.0, 4.0]) score = FloatScore.compute_ssd(obs, pred) self.assertEqual(str(score), "1") self.assertEqual(score.score, 1.0) score = RatioScore(1.2) self.assertEqual(1, RatioScore(1.0).norm_score) self.assertEqual(0, RatioScore(1e12).norm_score) self.assertEqual(0, RatioScore(1e-12).norm_score) self.assertEqual(str(score), "Ratio = 1.20") self.assertRaises(InvalidScoreError, RatioScore, -1.0) score = RatioScore.compute({"mean": 4.0, "std": 1.0}, {"value": 2.0}) self.assertEqual(score.score, 0.5)
def test_regular_score_types_2(self): BooleanScore(True) BooleanScore(False) score = BooleanScore.compute(5, 5) self.assertEqual(score.norm_score, 1) score = BooleanScore.compute(4, 5) self.assertEqual(score.norm_score, 0) t = RangeTest([2, 3]) score.test = t score.describe() score.description = "Lorem Ipsum" score.describe() score = FloatScore(3.14) obs = np.array([1.0, 2.0, 3.0]) pred = np.array([1.0, 2.0, 4.0]) score = FloatScore.compute_ssd(obs, pred) self.assertEqual(score.score, 1.0) RatioScore(1.2) score = RatioScore.compute({'mean': 4., 'std': 1.}, {'value': 2.}) self.assertEqual(score.score, 0.5)
def compute_score(self, prediction1, prediction2): """Implementation of sciunit.Test.score_prediction.""" score = FloatScore(prediction1 - prediction2) score.description = "Difference between model predictions" return score