def test_permutation_test_not_fitted(self):
    """Ensures permutation test can't be run before fitting the model."""
    model = models.InferenceElasticNet(random_state=18)
    data = _prepare_data_and_target()

    with self.assertRaises(RuntimeError):
      model.permutation_test(data)
  def test_permutation_test(self):
    """Ensures the permutation test computes the expected results."""
    data = _prepare_data_and_target()
    model = models.InferenceElasticNet(random_state=18)
    model.fit(data)
    expected_result = pd.DataFrame(
        data=[[-0.20383230, np.nan, np.nan, np.nan, True],
              [-0.13463647, np.nan, np.nan, np.nan, True],
              [0.010821799, np.nan, np.nan, np.nan, True],
              [0.010061159, np.nan, np.nan, np.nan, True],
              [0.000000000, np.nan, np.nan, np.nan, False],
              [0.000000000, np.nan, np.nan, np.nan, False]],
        columns=[
            'effect', 'bootstrap_std', 'confidence_interval',
            'significant_bootstrap', 'significant_permutation'],
        index=[1, 'Intercept', 0, 4, 3, 2])

    model.permutation_test(n_permutations=5, verbose=False, n_jobs=1)
    result = model.get_results()

    pd.testing.assert_frame_equal(
        result,
        expected_result,
        check_dtype=False,
        check_index_type=False)
  def test_predict(self):
    data = _prepare_data_and_target()
    model = models.InferenceElasticNet(random_state=18)
    model.fit(data)

    predictions = model.predict(data)

    self.assertIsInstance(predictions, pd.Series)
    self.assertEqual(len(predictions), len(data.data))
    pd.testing.assert_index_equal(predictions.index, data.data.index)
  def test_metrics_calculates_r2_and_mape(self):
    data = _prepare_data_and_target()
    model = models.InferenceElasticNet(random_state=18)
    model.fit(data)

    fit_metrics = ('rmse', 'r2')
    metrics = model.calculate_fit_metrics(data, fit_metrics=fit_metrics)

    self.assertTrue(0 < metrics['r2'] < 1)
    self.assertLess(0, metrics['rmse'])
    self.assertCountEqual(metrics.keys(), fit_metrics)
  def test_fit(self):
    data = _prepare_data_and_target()
    model = models.InferenceElasticNet(random_state=18)
    expected_result = pd.DataFrame(
        data=[[-0.203832],
              [-0.134636],
              [0.0108217],
              [0.0100611],
              [0.0000000],
              [0.0000000],],
        columns=['effect'],
        index=[1, 'Intercept', 0, 4, 3, 2])

    model.fit(data)
    result = model.get_results()

    pd.testing.assert_frame_equal(
        result[['effect']],
        expected_result,
        check_less_precise=2,
        check_index_type=False)
  def test_fit_bootstrap(self):
    data = _prepare_data_and_target()
    model = models.InferenceElasticNet(random_state=18)
    expected_result = pd.DataFrame(
        data=[[-0.173, 0.2715, 0.4477, False],
              [-0.135, 0.0784, 0.1287, True],
              [0.0445, 0.0893, 0.1473, False],
              [0.0357, 0.0548, 0.0883, False],
              [0.0123, 0.0384, 0.0643, False],
              [-0.010, 0.0332, 0.0541, False]],
        columns=['effect', 'bootstrap_std',
                 'confidence_interval', 'significant_bootstrap'],
        index=['Intercept', 1, 0, 4, 2, 3])
    model.fit(data)

    model.fit_bootstrap(bootstraps=10, n_jobs=1, verbose=False)
    result = model.get_results()

    pd.testing.assert_frame_equal(
        result[expected_result.columns],
        expected_result,
        check_less_precise=1,
        check_index_type=False)
  def test_fit_with_data_not_ready_throw_error(self):
    data = _prepare_data_and_target(ready_for_modelling=False)
    model = models.InferenceElasticNet(random_state=18)

    with self.assertRaises(data_preparation.InferenceDataError):
      model.fit(data)