Beispiel #1
0
 def test_metric_called_n_signals_times(self):
     metric = Mock(return_value=1.0)
     # noinspection PyTypeChecker
     hyper_score_ar(MockRegressor,
                    self.dataset,
                    metric,
                    n_models=2,
                    n_features=2)
     self.assertEqual(metric.call_count, self.n_signals)
Beispiel #2
0
    def test_monitor_sequence_passed_to_transform_when_step_is_default(self):
        monitor = ["a", "b"]
        hyper_score_ar(self.regressor_class,
                       self.dataset,
                       self.metric,
                       monitor=monitor)

        self.regressors[0].transform.assert_called()
        call = self.regressors[0].transform.call_args_list[0]
        self.assertIn("monitor", call[1])
        self.assertEqual(call[1]["monitor"], monitor)
Beispiel #3
0
    def test_progress_called(self):
        mock_progress = mock.MagicMock()

        hyper_score_ar(
            MockRegressor,
            [SimpleNamespace(y=[1, 2], usage_seq=[0, 0])],
            lambda x, y: 1.0,
            n_models=2,
            n_features=3,
            progress=mock_progress,
        )
        mock_progress.assert_called()
Beispiel #4
0
 def test_returns_tuple(self):
     res = hyper_score_ar(MockRegressor,
                          self.dataset,
                          lambda x, y: 1.0,
                          n_models=2,
                          n_features=2)
     self.assertEqual(len(res), 2)
Beispiel #5
0
    def setUp(self):
        self.rng = np.random.default_rng(0)
        self.n_signals = 4
        self.n_samples = 35
        self.n_models = 3
        self.dataset = [
            SimpleNamespace(
                y=self.rng.normal(size=self.n_samples),
                usage_seq=self.rng.integers(0,
                                            self.n_models,
                                            size=self.n_samples),
            ) for _ in range(self.n_signals)
        ]
        self.metric = lambda x, y: 1.0

        self.regressors = [Mock(n_features=4) for _ in range(self.n_signals)]
        for regressor in self.regressors:
            regressor.transform.return_value = (
                self.rng.uniform(size=(self.n_samples, self.n_models)),
                SimpleNamespace(),
            )

        self.regressor_class = Mock(side_effect=self.regressors)
        self.res = hyper_score_ar(self.regressor_class, self.dataset,
                                  self.metric)
Beispiel #6
0
    def setUp(self):
        self.rng = np.random.default_rng(0)
        self.n_signals = 5
        self.n_samples = 34
        self.n_models = 4

        self.ar_order = 3
        self.models = self.rng.normal(size=(self.n_signals, self.n_models,
                                            self.ar_order))

        self.dataset = [
            SimpleNamespace(
                y=self.rng.normal(size=self.n_samples),
                usage_seq=self.rng.integers(0,
                                            self.n_models,
                                            size=self.n_samples),
                armas=[SimpleNamespace(a=model) for model in sig_models],
            ) for sig_models in self.models
        ]

        self.regressor_class = Mock(side_effect=lambda *args, **kwargs: Mock(
            n_features=self.ar_order,
            transform=Mock(return_value=(
                self.rng.uniform(size=(self.n_samples, self.n_models)),
                SimpleNamespace(),
            ), ),
            **kwargs,
        ))

        self.metric = lambda x, y: 1.0
        self.res = hyper_score_ar(self.regressor_class,
                                  self.dataset,
                                  self.metric,
                                  initial_weights="oracle_ar")
Beispiel #7
0
    def setUp(self):
        self.rng = np.random.default_rng(3)
        self.n_signals = 4
        self.n_samples = 35
        self.n_models = 3
        self.dataset = [
            SimpleNamespace(
                y=self.rng.normal(size=self.n_samples),
                usage_seq=self.rng.integers(0,
                                            self.n_models,
                                            size=self.n_samples),
            ) for _ in range(self.n_signals)
        ]

        self.regressors = []
        for i in range(self.n_signals):
            regressor = Mock(n_features=4)
            regressor.transform.return_value = (
                self.rng.uniform(size=(self.n_samples, self.n_models)),
                SimpleNamespace(),
            )
            self.regressors.append(regressor)

        self.regressor_class = Mock(side_effect=self.regressors)
        self.metric_outputs = np.linspace(0, 1, self.n_signals)
        self.metric = Mock(side_effect=self.metric_outputs)

        # noinspection PyTypeChecker
        self.summary, self.details = hyper_score_ar(self.regressor_class,
                                                    self.dataset,
                                                    self.metric,
                                                    test_fraction=1.0)
Beispiel #8
0
 def test_transform_second_output_is_returned_in_details_history(self):
     _, details = hyper_score_ar(self.regressor_class, self.dataset,
                                 self.metric)
     for i, history in enumerate(details.history):
         for key, value in self.history_out[i].items():
             self.assertTrue(hasattr(history, key))
             self.assertEqual(getattr(history, key), value)
Beispiel #9
0
    def setUp(self):
        self.rng = np.random.default_rng(2)
        self.n_signals = 4
        self.n_samples = 41
        self.n_models = 3
        self.dataset = [
            SimpleNamespace(
                y=self.rng.normal(size=self.n_samples),
                usage_seq=self.rng.integers(0,
                                            self.n_models,
                                            size=self.n_samples),
            ) for _ in range(self.n_signals)
        ]

        self.regressors = []
        for i in range(self.n_signals):
            regressor = Mock(n_features=4)
            regressor.transform.return_value = (
                np.zeros((self.n_samples, self.n_models)),
                SimpleNamespace(),
            )
            self.regressors.append(regressor)

        self.regressor_class = Mock(side_effect=self.regressors)
        self.metric = lambda x, y: 1.0
        self.kwargs = {"foo": "bar"}
        self.fit_kws = {"bar": 3}

        with mock.patch("bioslds.batch.transform_ar") as mock_transform_ar:
            mock_transform_ar.side_effect = [
                _.transform.return_value for _ in self.regressors
            ]
            hyper_score_ar(
                self.regressor_class,
                self.dataset,
                self.metric,
                fit_kws=self.fit_kws,
                **self.kwargs,
            )

            self.mock_transform_ar = mock_transform_ar

        for call in self.regressor_class.call_args_list:
            for key, value in self.kwargs.items():
                self.assertIn(key, call[1])
                self.assertEqual(call[1][key], value)
Beispiel #10
0
    def test_appropriate_test_samples_are_passed_to_metric(self):
        samples = 23
        # noinspection PyTypeChecker
        hyper_score_ar(self.regressor_class,
                       self.dataset,
                       self.metric,
                       test_samples=samples)

        call = self.metric.call_args_list[1]
        usage1 = self.dataset[1].usage_seq
        r1 = self.regressors[1].transform.return_value[0]
        inferred1 = r1.argmax(axis=1)

        labels_true_exp = usage1[-samples:]
        labels_pred_exp = inferred1[-samples:]

        np.testing.assert_equal(call[0][0], labels_true_exp)
        np.testing.assert_equal(call[0][1], labels_pred_exp)
Beispiel #11
0
 def test_r_is_special_value_in_monitor(self):
     _, details = hyper_score_ar(
         self.regressor_class,
         self.dataset,
         self.metric,
         monitor="r",
     )
     for i, history in enumerate(details.history):
         self.assertTrue(hasattr(history, "r"))
         np.testing.assert_allclose(history.r, self.r[i])
Beispiel #12
0
    def test_appropriate_test_fraction_is_passed_to_metric(self):
        fraction = 0.6
        # noinspection PyTypeChecker
        hyper_score_ar(self.regressor_class,
                       self.dataset,
                       self.metric,
                       test_fraction=fraction)

        call = self.metric.call_args_list[0]
        usage0 = self.dataset[0].usage_seq
        r0 = self.regressors[0].transform.return_value[0]
        inferred0 = r0.argmax(axis=1)

        n = int(fraction * len(r0))
        labels_true_exp = usage0[-n:]
        labels_pred_exp = inferred0[-n:]

        np.testing.assert_equal(call[0][0], labels_true_exp)
        np.testing.assert_equal(call[0][1], labels_pred_exp)
Beispiel #13
0
    def test_progress_trial_passed_to_transform_ra(self):
        regressor = Mock(n_features=4)
        regressor.transform.return_value = (np.zeros(
            (10, 2)), SimpleNamespace())
        regressor_class = Mock(return_value=regressor)

        mock_progress = Mock()
        hyper_score_ar(
            regressor_class,
            [SimpleNamespace(y=[1, 2], usage_seq=[0, 0])],
            lambda x, y: 1.0,
            n_models=2,
            n_features=3,
            progress_trial=mock_progress,
        )

        regressor.transform.assert_called()
        call = regressor.transform.call_args_list[0]
        self.assertIn("progress", call[1])
        self.assertIs(call[1]["progress"], mock_progress)
Beispiel #14
0
    def test_default_test_fraction_is_twenty_percent(self):
        # noinspection PyTypeChecker
        res_def = hyper_score_ar(self.regressor_class, self.dataset,
                                 self.metric)[0]

        regressor_class = Mock(side_effect=self.regressors)
        # noinspection PyTypeChecker
        res_20 = hyper_score_ar(regressor_class,
                                self.dataset,
                                self.metric,
                                test_fraction=0.2)[0]

        regressor_class = Mock(side_effect=self.regressors)
        # noinspection PyTypeChecker
        res_50 = hyper_score_ar(regressor_class,
                                self.dataset,
                                self.metric,
                                test_fraction=0.5)[0]

        self.assertNotEqual(res_20, res_50)
        self.assertEqual(res_def, res_20)
Beispiel #15
0
 def test_r_monitor_with_nontrivial_step(self):
     step = 3
     _, details = hyper_score_ar(
         self.regressor_class,
         self.dataset,
         self.metric,
         monitor="r",
         monitor_step=step,
     )
     for i, history in enumerate(details.history):
         self.assertTrue(hasattr(history, "r"))
         np.testing.assert_allclose(history.r, self.r[i][::step])
Beispiel #16
0
 def fct(**kwargs):
     crt_res = hyper_score_ar(
         CrosscorrelationRegressor,
         *common_hyper_args,
         nsm_rate=kwargs["rate"],
         xcorr_rate=1 / kwargs["exp_streak"],
         **common_hyper_kws,
     )
     if economy:
         del crt_res[1].regressors
         if len(monitor) == 0:
             del crt_res[1].history
     return crt_res
Beispiel #17
0
 def fct(**kwargs):
     crt_res = hyper_score_ar(
         CepstralRegressor,
         *common_hyper_args,
         initial_weights="oracle_ar",
         cepstral_order=kwargs["cepstral_order"],
         cepstral_kws={"rate": kwargs["rate"]},
         **common_hyper_kws,
     )
     if economy:
         del crt_res[1].regressors
         if len(monitor) == 0:
             del crt_res[1].history
     return crt_res
Beispiel #18
0
    def test_when_monitor_step_is_not_one_an_attribute_monitor_is_created(
            self):
        monitor = ["a", "b"]
        step = 3

        with mock.patch(
                "bioslds.batch.AttributeMonitor") as MockAttributeMonitor:
            MockAttributeMonitor.history = SimpleNamespace()
            hyper_score_ar(
                self.regressor_class,
                self.dataset,
                self.metric,
                monitor=monitor,
                monitor_step=step,
            )
            MockAttributeMonitor.assert_called()
            call = MockAttributeMonitor.call_args_list[0]

            self.assertEqual(len(call[0]), 1)
            self.assertEqual(call[0][0], monitor)

            self.assertIn("step", call[1])
            self.assertEqual(call[1]["step"], step)
Beispiel #19
0
 def fct(**kwargs):
     crt_res = hyper_score_ar(
         make_bio_wta_with_stable_initial,
         *common_hyper_args,
         rate=kwargs["rate"],
         trans_mat=1 - 1 / kwargs["exp_streak"],
         temperature=kwargs["temperature"],
         error_timescale=kwargs["timescale"],
         **common_hyper_kws,
     )
     if economy:
         del crt_res[1].regressors
         if len(monitor) == 0:
             del crt_res[1].history
     return crt_res
Beispiel #20
0
    def check_regressor_calls(
        self, rng: Union[None, int, np.random.RandomState, np.random.Generator]
    ) -> Tuple[Mock, Tuple]:
        regressor = Mock(n_features=4)
        regressor.transform.return_value = (
            self.rng.uniform(size=(self.n_samples, self.n_models)),
            SimpleNamespace(),
        )

        regressor_class = Mock(return_value=regressor)

        kwargs = {}
        if rng is not None:
            kwargs["rng"] = rng
        res = hyper_score_ar(regressor_class, self.dataset, self.metric,
                             **kwargs)
        return regressor_class, res
    two_ar3.orders,
    dwell_times=two_ar3.dwell_times,
    min_dwell=two_ar3.min_dwell,
    fix_scale=two_ar3.fix_scale,
    rng=two_ar3.seed,
    arma_kws={"max_pole_radius": two_ar3.max_pole_radius},
)

# %%
t0 = time.time()
two_ar3.result_biowta = hyper_score_ar(
    BioWTARegressor,
    two_ar3.dataset,
    two_ar3.metric,
    n_models=two_ar3.n_models,
    n_features=two_ar3.n_features,
    rate=two_ar3.rate_biowta,
    # trans_mat=1 - 1 / two_ar3.streak_biowta,
    progress=tqdm,
    monitor=["r", "weights_", "prediction_"],
)
t1 = time.time()
print(f"Median accuracy score BioWTA: {two_ar3.result_biowta[0]:.2}. "
      f"(Took {t1 - t0:.2f} seconds.)")

# %%
t0 = time.time()
two_ar3.result_xcorr = hyper_score_ar(
    CrosscorrelationRegressor,
    two_ar3.dataset,
    two_ar3.metric,
Beispiel #22
0
 def test_transform_details_has_history_for_each_signal(self):
     _, details = hyper_score_ar(self.regressor_class, self.dataset,
                                 self.metric)
     self.assertTrue(hasattr(details, "history"))
     self.assertEqual(len(details.history), self.n_signals)