def _init_metrics(self): """ Starts up the metrics and statistics watchers. One watcher is created for each of the learners to be evaluated. """ self.mean_eval_measurements = [] self.current_eval_measurements = [] if self._task_type == constants.CLASSIFICATION: for i in range(self.n_models): self.mean_eval_measurements.append( ClassificationMeasurements()) self.current_eval_measurements.append( WindowClassificationMeasurements( window_size=self.n_sliding)) elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION: for i in range(self.n_models): self.mean_eval_measurements.append( MultiTargetClassificationMeasurements()) self.current_eval_measurements.append( WindowMultiTargetClassificationMeasurements( window_size=self.n_sliding)) elif self._task_type == constants.REGRESSION: for i in range(self.n_models): self.mean_eval_measurements.append(RegressionMeasurements()) self.current_eval_measurements.append( WindowRegressionMeasurements(window_size=self.n_sliding)) elif self._task_type == constants.MULTI_TARGET_REGRESSION: for i in range(self.n_models): self.mean_eval_measurements.append( MultiTargetRegressionMeasurements()) self.current_eval_measurements.append( WindowMultiTargetRegressionMeasurements( window_size=self.n_sliding)) # Running time self.running_time_measurements = [] for i in range(self.n_models): self.running_time_measurements.append(RunningTimeMeasurements()) # Evaluation data buffer self._data_dict = {} for metric in self.metrics: data_ids = [constants.MEAN, constants.CURRENT] if metric == constants.TRUE_VS_PREDICTED: data_ids = [constants.Y_TRUE, constants.Y_PRED] elif metric == constants.DATA_POINTS: data_ids = ['X', 'target_values', 'prediction'] elif metric == constants.RUNNING_TIME: data_ids = [ 'training_time', 'testing_time', 'total_running_time' ] elif metric == constants.MODEL_SIZE: data_ids = ['model_size'] self._data_dict[metric] = data_ids self._data_buffer = EvaluationDataBuffer(data_dict=self._data_dict)
def test_running_time_measurements(): rtm = RunningTimeMeasurements() for i in range(1000): # Test training time rtm.compute_training_time_begin() time.sleep(0.0005) rtm.compute_training_time_end() # Test testing time rtm.compute_testing_time_begin() time.sleep(0.0002) rtm.compute_testing_time_end() # Update statistics rtm.update_time_measurements() expected_info = 'RunningTimeMeasurements: sample_count: 1000 - ' \ 'Total running time: {} - ' \ 'training_time: {} - ' \ 'testing_time: {}'.format( rtm.get_current_total_running_time(), rtm.get_current_training_time(), rtm.get_current_testing_time(), ) assert expected_info == rtm.get_info()