def compute_score(self,
                   observation: float,
                   prediction: float,
                   verbose: bool = False) -> FloatScore:
     # print("observation = {}".format(observation))
     # print("prediction = {}".format(prediction))
     self.figures = []
     runtime = FloatScore(prediction - observation)
     runtime.description = "Time (in seconds) required to complete simulation"
     return runtime
Example #2
0
    def test_regular_score_types_2(self):
        BooleanScore(True)
        BooleanScore(False)
        score = BooleanScore.compute(5, 5)
        self.assertEqual(score.norm_score, 1)
        score = BooleanScore.compute(4, 5)
        self.assertEqual(score.norm_score, 0)

        self.assertEqual(1, BooleanScore(True).norm_score)
        self.assertEqual(0, BooleanScore(False).norm_score)

        t = RangeTest([2, 3])
        score.test = t
        score.describe()
        score.description = "Lorem Ipsum"
        score.describe()

        score = FloatScore(3.14)
        self.assertRaises(InvalidScoreError, score.check_score,
                          Quantity([1, 2, 3], "J"))

        obs = np.array([1.0, 2.0, 3.0])
        pred = np.array([1.0, 2.0, 4.0])
        score = FloatScore.compute_ssd(obs, pred)
        self.assertEqual(str(score), "1")
        self.assertEqual(score.score, 1.0)

        score = RatioScore(1.2)
        self.assertEqual(1, RatioScore(1.0).norm_score)
        self.assertEqual(0, RatioScore(1e12).norm_score)
        self.assertEqual(0, RatioScore(1e-12).norm_score)

        self.assertEqual(str(score), "Ratio = 1.20")

        self.assertRaises(InvalidScoreError, RatioScore, -1.0)
        score = RatioScore.compute({"mean": 4.0, "std": 1.0}, {"value": 2.0})

        self.assertEqual(score.score, 0.5)
Example #3
0
    def test_regular_score_types_2(self):
        BooleanScore(True)
        BooleanScore(False)
        score = BooleanScore.compute(5, 5)
        self.assertEqual(score.norm_score, 1)
        score = BooleanScore.compute(4, 5)
        self.assertEqual(score.norm_score, 0)

        t = RangeTest([2, 3])
        score.test = t
        score.describe()
        score.description = "Lorem Ipsum"
        score.describe()

        score = FloatScore(3.14)
        obs = np.array([1.0, 2.0, 3.0])
        pred = np.array([1.0, 2.0, 4.0])
        score = FloatScore.compute_ssd(obs, pred)
        self.assertEqual(score.score, 1.0)

        RatioScore(1.2)
        score = RatioScore.compute({'mean': 4., 'std': 1.}, {'value': 2.})
        self.assertEqual(score.score, 0.5)
Example #4
0
 def add_mean(self):
     is_transposed = isinstance(self.index[0], Test)
     if is_transposed:
         sm = self.T
     else:
         sm = self
     tests = [Test({}, name="Mean")] + sm.tests
     mean_scores = [FloatScore(sm[model].mean()) for model in sm.models]
     mean_scores = np.array(mean_scores).reshape(-1, 1)
     scores = np.hstack([mean_scores, sm.values])
     sm_mean = ScoreMatrix(tests=tests, models=sm.models, scores=scores)
     if is_transposed:
         sm_mean = sm_mean.T
     return sm_mean
Example #5
0
    def test_Test(self):
        pv = config["PREVALIDATE"]
        config["PREVALIDATE"] = 1
        with self.assertRaises(ObservationError):
            t = Test(None)

        with self.assertRaises(ObservationError):

            class Test2(Test):
                observation_schema = None
                score_type = ZScore
                units = pq.pA

                def generate_prediction(self):
                    return 1

            t = Test2({"mean": 5 * pq.pA})

        t = Test({})
        self.assertRaises(ObservationError, t.validate_observation,
                          "I am not an observation")
        t.observation_schema = {}
        t.validate_observation({0: 0, 1: 1})
        Test.observation_schema = [{}, {}]
        self.assertListEqual(t.observation_schema_names(),
                             ["Schema 1", "Schema 2"])
        config["PREVALIDATE"] = pv

        self.assertRaises(ParametersError, t.validate_params, None)
        self.assertRaises(ParametersError, t.validate_params,
                          "I am not an observation")
        t.params_schema = {}
        t.validate_params({0: 1, 1: 2})

        self.assertRaises(Error, t.check_capabilities, "I am not a model")
        t.condition_model(Model())
        self.assertRaises(NotImplementedError, t.generate_prediction, Model())
        self.assertRaises(NotImplementedError, t.optimize, Model())

        self.assertTrue(t.compute_score({0: 2, 1: 2}, {0: 2, 1: 2}).score)
        self.assertFalse(t.compute_score({0: -2, 1: 2}, {0: 2, 1: -2}).score)
        t.score_type = None
        self.assertRaises(NotImplementedError, t.compute_score, {}, {})

        t.score_type = BooleanScore
        self.assertRaises(InvalidScoreError, t.check_score_type,
                          FloatScore(0.5))
    def bind_score(self, score: FloatScore, model: sciunit.Model,
                   observation: float, prediction: float):
        # create output directory
        self.target_dir = os.path.join(os.path.abspath(self.output_dir),
                                       "validation_davison2000unit", self.name,
                                       model.name)
        if not os.path.exists(self.target_dir):
            os.makedirs(self.target_dir)

        # create relevant output files
        # 1. JSON data: observation, prediction, score
        validation_data = {
            "pred_label": score.model.name,
            "observation": observation,
            "prediction": prediction,
            "score": score.score,
        }
        with open(os.path.join(self.target_dir, 'run_time.json'), 'w') as f:
            json.dump(validation_data, f, indent=4)
        self.figures.append(os.path.join(self.target_dir, 'run_time.json'))

        # 2. JSON data: save Vm vs t trace
        with open(os.path.join(self.target_dir, 'run_time_trace.json'),
                  'w') as f:
            json.dump(self.traces, f, indent=4)
        self.figures.append(
            os.path.join(self.target_dir, 'run_time_trace.json'))

        # 3. Vm trace as pdf: somatic Vm trace during simulation
        params = {
            "title": "Somatic Vm: Stimulus at Soma",
            "xlabel": "Time (ms)",
            "ylabel": "Membrane potential (mV)"
        }
        traces_plot = plots.Traces(name="run_time_trace",
                                   score=score,
                                   params=params)
        file_traces_plot = traces_plot.save_file()
        self.figures.append(file_traces_plot)

        score.related_data["figures"] = self.figures
        return score
Example #7
0
 def compute_score(self, observation, prediction):
     # Sum of the difference between the curves.
     o = observation
     p = prediction
     interped = self.interp_IV_curves(o['v'], o['i'], p['v'], p['i'])
     
     if self.scale:
         def f(sf):
             score = FloatScore.compute_ssd(interped['i_obs'],
                                            (10**sf)*interped['i_pred'])
             return score.score.magnitude
         result = minimize(f,0.0)
         scale_factor = 10**result.x
         interped['i_pred'] *= scale_factor
     else:
         scale_factor = 1
     
     score = FloatScore.compute_ssd(interped['i_obs'],interped['i_pred'])
     score.related_data['scale_factor'] = scale_factor
     self.interped = interped
     return score
Example #8
0
    def compute_score(self, observation, prediction):
        # Sum of the difference between the curves.
        o = observation
        p = prediction
        interped = self.interp_IV_curves(o['v'], o['i'], p['v'], p['i'])

        if self.scale:
            def f(sf):
                score = FloatScore.compute_ssd(interped['i_obs'],
                                               (10**sf)*interped['i_pred'])
                return score.score.magnitude
            result = minimize(f, 0.0)
            scale_factor = 10**result.x
            interped['i_pred'] *= scale_factor
        else:
            scale_factor = 1

        score = FloatScore.compute_ssd(interped['i_obs'], interped['i_pred'])
        score.related_data['scale_factor'] = scale_factor
        self.interped = interped
        return score
Example #9
0
    def test_Test(self):
        config_set("PREVALIDATE", True)
        with self.assertRaises(ObservationError):
            t = Test(None)
        config_set("PREVALIDATE", False)

        t = Test(None)
        self.assertRaises(ObservationError, t.validate_observation, None)
        self.assertRaises(ObservationError, t.validate_observation,
                          "I am not an observation")
        self.assertRaises(ObservationError, t.validate_observation,
                          {"mean": None})
        t = Test([0, 1])
        t.observation_schema = {}
        t.validate_observation({0: 0, 1: 1})
        Test.observation_schema = [{}, {}]
        self.assertListEqual(t.observation_schema_names(),
                             ["Schema 1", "Schema 2"])

        self.assertRaises(ParametersError, t.validate_params, None)
        self.assertRaises(ParametersError, t.validate_params,
                          "I am not an observation")
        t.params_schema = {}
        t.validate_params({0: 1, 1: 2})

        self.assertRaises(Error, t.check_capabilities, "I am not a model")
        t.condition_model(Model())
        self.assertRaises(NotImplementedError, t.generate_prediction, Model())
        self.assertRaises(NotImplementedError, t.optimize, Model())

        self.assertTrue(t.compute_score({0: 2, 1: 2}, {0: 2, 1: 2}).score)
        self.assertFalse(t.compute_score({0: -2, 1: 2}, {0: 2, 1: -2}).score)
        t.score_type = None
        self.assertRaises(NotImplementedError, t.compute_score, {}, {})

        t.score_type = BooleanScore
        self.assertRaises(InvalidScoreError, t.check_score_type,
                          FloatScore(0.5))
        self.assertRaises(ObservationError, t.judge, [Model(), Model()])
Example #10
0
    def test_regular_score_types_2(self):
        BooleanScore(True)
        BooleanScore(False)
        score = BooleanScore.compute(5,5)
        self.assertEqual(score.sort_key,1)
        score = BooleanScore.compute(4,5)
        self.assertEqual(score.sort_key,0)
        
        t = RangeTest([2,3])
        score.test = t
        score.describe()
        score.description = "Lorem Ipsum"
        score.describe()

        score = FloatScore(3.14)
        obs = np.array([1.0,2.0,3.0])
        pred = np.array([1.0,2.0,4.0])
        score = FloatScore.compute_ssd(obs,pred)
        self.assertEqual(score.score,1.0)
        
        RatioScore(1.2)
        score = RatioScore.compute({'mean':4.,'std':1.},{'value':2.})
        self.assertEqual(score.score,0.5)
Example #11
0
 def f(sf):
     score = FloatScore.compute_ssd(interped['i_obs'],
                                    (10**sf)*interped['i_pred'])
     return score.score.magnitude
Example #12
0
 def compute_score(self, prediction1, prediction2):
     """Implementation of sciunit.Test.score_prediction."""
     score = FloatScore(prediction1 - prediction2)
     score.description = "Difference between model predictions"
     return score
Example #13
0
 def compute_score(self, prediction1, prediction2):
     """Implementation of sciunit.Test.score_prediction."""
     score = FloatScore(prediction1 - prediction2)
     score.description = "Difference between model predictions"
     return score
Example #14
0
 def f(sf):
     score = FloatScore.compute_ssd(interped['i_obs'],
                                    (10**sf)*interped['i_pred'])
     return score.score.magnitude