def test_stationarity_tests(self): series_1 = constant_timeseries(start=0, end=9999).stack( constant_timeseries(start=0, end=9999)) series_2 = TimeSeries.from_values( np.random.uniform(0, 1, (1000, 2, 1000))) series_3 = gaussian_timeseries(start=0, end=9999) # Test univariate with self.assertRaises(AssertionError): stationarity_tests(series_1) with self.assertRaises(AssertionError): stationarity_test_adf(series_1) with self.assertRaises(AssertionError): stationarity_test_kpss(series_1) # Test deterministic with self.assertRaises(AssertionError): stationarity_tests(series_2) with self.assertRaises(AssertionError): stationarity_test_adf(series_2) with self.assertRaises(AssertionError): stationarity_test_kpss(series_2) # Test basics self.assertTrue(stationarity_test_kpss(series_3)[1] > 0.05) self.assertTrue(stationarity_test_adf(series_3)[1] < 0.05) self.assertTrue(stationarity_tests)
def test_input_transformed(self): # given test_input = constant_timeseries(value=1) mock = self.DataTransformerMock() # when transformed = mock.transform(test_input) expected = constant_timeseries(value=11) self.assertEqual(transformed, expected)
def test_granger_causality(self): series_cause_1 = constant_timeseries(start=0, end=9999).stack( constant_timeseries(start=0, end=9999)) series_cause_2 = gaussian_timeseries(start=0, end=9999) series_effect_1 = constant_timeseries(start=0, end=999) series_effect_2 = TimeSeries.from_values(np.random.uniform( 0, 1, 10000)) series_effect_3 = TimeSeries.from_values( np.random.uniform(0, 1, (1000, 2, 1000))) series_effect_4 = constant_timeseries(start=pd.Timestamp("2000-01-01"), length=10000) # Test univariate with self.assertRaises(AssertionError): granger_causality_tests(series_cause_1, series_effect_1, 10, verbose=False) with self.assertRaises(AssertionError): granger_causality_tests(series_effect_1, series_cause_1, 10, verbose=False) # Test deterministic with self.assertRaises(AssertionError): granger_causality_tests(series_cause_1, series_effect_3, 10, verbose=False) with self.assertRaises(AssertionError): granger_causality_tests(series_effect_3, series_cause_1, 10, verbose=False) # Test Frequency with self.assertRaises(ValueError): granger_causality_tests(series_cause_2, series_effect_4, 10, verbose=False) # Test granger basics tests = granger_causality_tests(series_effect_2, series_effect_2, 10, verbose=False) self.assertTrue(tests[1][0]["ssr_ftest"][1] > 0.99) tests = granger_causality_tests(series_cause_2, series_effect_2, 10, verbose=False) self.assertTrue(tests[1][0]["ssr_ftest"][1] > 0.01)
def test_single_timeseries_no_horizon_no_n(self): with self.assertRaises(AttributeError): # even if the default axis is 0, but since it is a single timeseries, default axis is 1 train_test_split( constant_timeseries(value=123, length=10), test_size=2, vertical_split_type=MODEL_AWARE, )
def test_multi_timeseries_variable_ts_length_one_ts_too_small(self): data = [ constant_timeseries(123, 21), constant_timeseries(123, 100), constant_timeseries(123, 1000) ] with self.assertRaisesRegex( AttributeError, "Not enough data to create training and test sets"): train_set, test_set = train_test_split( data, axis=1, test_size=2, input_size=1, horizon=18, vertical_split_type=MODEL_AWARE)
class MappersTestCase(unittest.TestCase): @staticmethod def func(x): return x + 10 @staticmethod def inverse_func(x): return x - 10 @staticmethod def ts_func(ts, x): return x - ts.month @staticmethod def inverse_ts_func(ts, x): return x + ts.month plus_ten = Mapper(func.__func__) plus_ten_invertible = InvertibleMapper(func.__func__, inverse_func.__func__) subtract_month = Mapper(ts_func.__func__) subtract_month_invertible = InvertibleMapper(ts_func.__func__, inverse_ts_func.__func__) lin_series = linear_timeseries(start_value=1, length=12, freq='MS', start_ts=pd.Timestamp('2000-01-01'), end_value=12) # noqa: E501 zeroes = constant_timeseries(value=0.0, length=12, freq='MS', start_ts=pd.Timestamp('2000-01-01')) tens = constant_timeseries(value=10.0, length=12, freq='MS', start_ts=pd.Timestamp('2000-01-01')) def test_mapper(self): transformed = self.plus_ten.transform(self.zeroes) self.assertEqual(transformed, self.tens) def test_invertible_mapper(self): transformed = self.plus_ten_invertible.transform(self.lin_series) back = self.plus_ten_invertible.inverse_transform(transformed) self.assertEqual(back, self.lin_series) def test_mapper_with_timestamp(self): transformed = self.subtract_month.transform(self.lin_series) self.assertEqual(transformed, self.zeroes) def test_invertible_mapper_with_timestamp(self): transformed = self.subtract_month_invertible.transform(self.lin_series) back = self.subtract_month_invertible.inverse_transform(transformed) self.assertEqual(back, self.lin_series)
def test_routine(start, end=None, length=None): # testing for constant value constant_ts = constant_timeseries(start=start, end=end, value=value, length=length) value_set = set(constant_ts.values().flatten()) self.assertTrue(len(value_set) == 1) self.assertEqual(len(constant_ts), length_assert)
def test_multi_timeseries_variable_ts_length_sunny_day(self): data = [ constant_timeseries(123, 10), constant_timeseries(123, 100), constant_timeseries(123, 1000) ] train_set, test_set = train_test_split(data, axis=1, test_size=2, input_size=1, horizon=2, vertical_split_type=MODEL_AWARE) train_lengths = [len(ts) for ts in train_set] test_lengths = [len(ts) for ts in test_set] self.assertTrue( train_lengths == [7, 97, 997] and test_lengths == [4, 4, 4], "Wrong shapes: training set shape: {}; test set shape {}".format( train_lengths, test_lengths))
def test_transformers_not_modified(self): # given mock = self.DataTransformerMock1() p = Pipeline([mock], copy=True) # when p.transform(constant_timeseries(1, 10)) # then self.assertFalse(mock.transform_called)
def test_single_timeseries_sunny_day(self): train_set, test_set = train_test_split(constant_timeseries(123, 10), test_size=2, input_size=1, horizon=2, vertical_split_type=MODEL_AWARE) self.assertTrue( len(train_set) == 7 and len(test_set) == 4, "Wrong shapes: training set shape: {}; test set shape {}".format( len(train_set), len(test_set)))
def test_multi_ts(self): series1 = constant_timeseries(0., 3) series2 = constant_timeseries(1., 3) data = [series1, series2] mapper1 = InvertibleMapper(fn=lambda x: x + 10, inverse_fn=lambda x: x - 10) mapper2 = InvertibleMapper(fn=lambda x: x * 10, inverse_fn=lambda x: x / 10) transformers = [mapper1, mapper2] p = Pipeline(transformers) # when transformed = p.transform(data) back = p.inverse_transform(transformed) # then self.assertEqual(data, back)
def test_fit(self): large_ts = tg.constant_timeseries(length=100, value=1000) small_ts = tg.constant_timeseries(length=100, value=10) # Test basic fit and predict model = TCNModel( input_chunk_length=12, output_chunk_length=1, n_epochs=10, num_layers=1 ) model.fit(large_ts[:98]) pred = model.predict(n=2).values()[0] # Test whether model trained on one series is better than one trained on another model2 = TCNModel( input_chunk_length=12, output_chunk_length=1, n_epochs=10, num_layers=1 ) model2.fit(small_ts[:98]) pred2 = model2.predict(n=2).values()[0] self.assertTrue(abs(pred2 - 10) < abs(pred - 10)) # test short predict pred3 = model2.predict(n=1) self.assertEqual(len(pred3), 1)
def test_inverse_transform(self): # given data = constant_timeseries(0., 3) transformers = [self.PlusTenTransformer(), self.TimesTwoTransformer()] p = Pipeline(transformers) # when transformed = p.transform(data) back = p.inverse_transform(transformed) # then self.assertEqual(data, back)
def test_fit(self): # given data = constant_timeseries(0, 3) transformers = [self.DataTransformerMock1() for _ in range(10) ] + [self.DataTransformerMock2() for _ in range(10)] p = Pipeline(transformers) # when p.fit(data) # then for i in range(10): self.assertFalse(transformers[i].fit_called) for i in range(10, 20): self.assertTrue(transformers[i].fit_called)
def test_map_with_timestamp(self): series = linear_timeseries(start_value=1, length=12, freq='MS', start_ts=pd.Timestamp('2000-01-01'), end_value=12) # noqa: E501 zeroes = constant_timeseries(value=0.0, length=12, freq='MS', start_ts=pd.Timestamp('2000-01-01')) def function(ts, x): return x - ts.month new_series = series.map(function) self.assertEqual(new_series, zeroes)
def test_transform_fit(self): # given data = constant_timeseries(value=0, length=3) transformers = [self.DataTransformerMock1() for _ in range(10) ] + [self.DataTransformerMock2() for _ in range(10)] p = Pipeline(transformers) # when _ = p.fit_transform(data) # then for t in transformers: self.assertTrue(t.transform_called) for i in range(10): self.assertFalse(transformers[i].fit_called) for i in range(10, 20): self.assertTrue(transformers[i].fit_called)
def test_transform(self): # given mock1 = self.DataTransformerMock1() mock2 = self.DataTransformerMock2() data = constant_timeseries(0, 3) transformers = [mock1] * 10 + [mock2] * 10 p = Pipeline(transformers) # when p.fit(data) transformed = p.transform(data) # then self.assertEqual(63, len(transformed)) self.assertEqual([0] * 3 + [1] * 30 + [2] * 30, list(transformed.values())) for t in transformers: self.assertTrue(t.transform_called) self.assertFalse(t.inverse_transform_called)
def test_fit_skips_superfluous_transforms(self): # given data = constant_timeseries(0, 100) transformers = [self.DataTransformerMock1() for _ in range(10)]\ + [self.DataTransformerMock2()]\ + [self. DataTransformerMock1() for _ in range(10)] p = Pipeline(transformers) # when p.fit(data) # then for i in range(10): self.assertTrue(transformers[i].transform_called) self.assertTrue(transformers[10].fit_called) self.assertFalse(transformers[10].transform_called) for i in range(11, 21): self.assertFalse(transformers[i].transform_called)
def test_pipeline_partial_inverse(self): series = constant_timeseries(0., 3) def plus_ten(x): return x + 10 mapper = Mapper(fn=plus_ten) mapper_inv = InvertibleMapper(fn=lambda x: x + 2, inverse_fn=lambda x: x - 2) series_plus_ten = mapper.transform(series) pipeline = Pipeline([mapper, mapper_inv]) transformed = pipeline.transform(series) # should fail, since partial is False by default with self.assertRaises(ValueError): pipeline.inverse_transform(transformed) back = pipeline.inverse_transform(transformed, partial=True) # while the +/- 2 is inverted, the +10 operation is not self.assertEqual(series_plus_ten, back)
class ProbabilisticRegressionModelsTestCase(DartsBaseTestClass): models_cls_kwargs_errs = [ ( LightGBMModel, { "lags": 2, "likelihood": "quantile", "random_state": 42 }, 0.4, ), ( LightGBMModel, { "lags": 2, "likelihood": "quantile", "quantiles": [0.1, 0.3, 0.5, 0.7, 0.9], "random_state": 42, }, 0.4, ), ( LightGBMModel, { "lags": 2, "likelihood": "poisson", "random_state": 42 }, 0.6, ), ( LinearRegressionModel, { "lags": 2, "likelihood": "quantile", "random_state": 42 }, 0.6, ), ( LinearRegressionModel, { "lags": 2, "likelihood": "poisson", "random_state": 42 }, 0.6, ), ] constant_ts = tg.constant_timeseries(length=200, value=0.5) constant_noisy_ts = constant_ts + tg.gaussian_timeseries(length=200, std=0.1) constant_multivar_ts = constant_ts.stack(constant_ts) constant_noisy_multivar_ts = constant_noisy_ts.stack(constant_noisy_ts) num_samples = 5 def test_fit_predict_determinism(self): for model_cls, model_kwargs, _ in self.models_cls_kwargs_errs: # whether the first predictions of two models initiated with the same random state are the same model = model_cls(**model_kwargs) model.fit(self.constant_noisy_multivar_ts) pred1 = model.predict(n=10, num_samples=2).values() model = model_cls(**model_kwargs) model.fit(self.constant_noisy_multivar_ts) pred2 = model.predict(n=10, num_samples=2).values() self.assertTrue((pred1 == pred2).all()) # test whether the next prediction of the same model is different pred3 = model.predict(n=10, num_samples=2).values() self.assertTrue((pred2 != pred3).any()) def test_probabilistic_forecast_accuracy(self): for model_cls, model_kwargs, err in self.models_cls_kwargs_errs: self.helper_test_probabilistic_forecast_accuracy( model_cls, model_kwargs, err, self.constant_ts, self.constant_noisy_ts, ) if issubclass(model_cls, GlobalForecastingModel): self.helper_test_probabilistic_forecast_accuracy( model_cls, model_kwargs, err, self.constant_multivar_ts, self.constant_noisy_multivar_ts, ) def helper_test_probabilistic_forecast_accuracy( self, model_cls, model_kwargs, err, ts, noisy_ts): model = model_cls(**model_kwargs) model.fit(noisy_ts[:100]) pred = model.predict(n=100, num_samples=100) # test accuracy of the median prediction compared to the noiseless ts mae_err_median = mae(ts[100:], pred) self.assertLess(mae_err_median, err) # test accuracy for increasing quantiles between 0.7 and 1 (it should ~decrease, mae should ~increase) tested_quantiles = [0.7, 0.8, 0.9, 0.99] mae_err = mae_err_median for quantile in tested_quantiles: new_mae = mae(ts[100:], pred.quantile_timeseries(quantile=quantile)) self.assertLess(mae_err, new_mae + 0.1) mae_err = new_mae # test accuracy for decreasing quantiles between 0.3 and 0 (it should ~decrease, mae should ~increase) tested_quantiles = [0.3, 0.2, 0.1, 0.01] mae_err = mae_err_median for quantile in tested_quantiles: new_mae = mae(ts[100:], pred.quantile_timeseries(quantile=quantile)) self.assertLess(mae_err, new_mae + 0.1) mae_err = new_mae
def ts_transform(series: TimeSeries) -> TimeSeries: return series.append_values(constant_timeseries(2, 3).values())
def ts_transform(data: TimeSeries) -> TimeSeries: return data.append_values(constant_timeseries(1, 3).values())
def make_dataset(rows, cols): return [constant_timeseries(i, cols) for i in range(rows)]
class ProbabilisticTorchModelsTestCase(DartsBaseTestClass): np.random.seed(0) constant_ts = tg.constant_timeseries(length=200, value=0.5) constant_noisy_ts = constant_ts + tg.gaussian_timeseries(length=200, std=0.1) constant_multivar_ts = constant_ts.stack(constant_ts) constant_noisy_multivar_ts = constant_noisy_ts.stack(constant_noisy_ts) num_samples = 5 def test_fit_predict_determinism(self): for model_cls, model_kwargs, _ in models_cls_kwargs_errs: # whether the first predictions of two models initiated with the same random state are the same model = model_cls(**model_kwargs) model.fit(self.constant_noisy_ts) pred1 = model.predict(n=10, num_samples=2).values() model = model_cls(**model_kwargs) model.fit(self.constant_noisy_ts) pred2 = model.predict(n=10, num_samples=2).values() self.assertTrue((pred1 == pred2).all()) # test whether the next prediction of the same model is different pred3 = model.predict(n=10, num_samples=2).values() self.assertTrue((pred2 != pred3).any()) def test_probabilistic_forecast_accuracy(self): for model_cls, model_kwargs, err in models_cls_kwargs_errs: self.helper_test_probabilistic_forecast_accuracy( model_cls, model_kwargs, err, self.constant_ts, self.constant_noisy_ts) if issubclass(model_cls, GlobalForecastingModel): self.helper_test_probabilistic_forecast_accuracy( model_cls, model_kwargs, err, self.constant_multivar_ts, self.constant_noisy_multivar_ts, ) def helper_test_probabilistic_forecast_accuracy(self, model_cls, model_kwargs, err, ts, noisy_ts): model = model_cls(**model_kwargs) model.fit(noisy_ts[:100]) pred = model.predict(n=100, num_samples=100) # test accuracy of the median prediction compared to the noiseless ts mae_err_median = mae(ts[100:], pred) self.assertLess(mae_err_median, err) # test accuracy for increasing quantiles between 0.7 and 1 (it should ~decrease, mae should ~increase) tested_quantiles = [0.7, 0.8, 0.9, 0.99] mae_err = mae_err_median for quantile in tested_quantiles: new_mae = mae(ts[100:], pred.quantile_timeseries(quantile=quantile)) self.assertLess(mae_err, new_mae + 0.1) mae_err = new_mae # test accuracy for decreasing quantiles between 0.3 and 0 (it should ~decrease, mae should ~increase) tested_quantiles = [0.3, 0.2, 0.1, 0.01] mae_err = mae_err_median for quantile in tested_quantiles: new_mae = mae(ts[100:], pred.quantile_timeseries(quantile=quantile)) self.assertLess(mae_err, new_mae + 0.1) mae_err = new_mae """ More likelihood tests """ if TORCH_AVAILABLE: np.random.seed(42) torch.manual_seed(42) real_series = TimeSeries.from_values(np.random.randn(100, 2) + [0, 5]) vals = real_series.all_values() real_pos_series = TimeSeries.from_values( np.where(vals > 0, vals, -vals)) discrete_pos_series = TimeSeries.from_values( np.random.randint(low=0, high=11, size=(100, 2))) binary_series = TimeSeries.from_values( np.random.randint(low=0, high=2, size=(100, 2))) bounded_series = TimeSeries.from_values( np.random.beta(2, 5, size=(100, 2))) simplex_series = bounded_series["0"].stack(1.0 - bounded_series["0"]) lkl_series = ( (GaussianLikelihood(), real_series, 0.1, 3), (PoissonLikelihood(), discrete_pos_series, 2, 2), (NegativeBinomialLikelihood(), discrete_pos_series, 0.5, 0.5), (BernoulliLikelihood(), binary_series, 0.15, 0.15), (GammaLikelihood(), real_pos_series, 0.3, 0.3), (GumbelLikelihood(), real_series, 0.2, 3), (LaplaceLikelihood(), real_series, 0.3, 4), (BetaLikelihood(), bounded_series, 0.1, 0.1), (ExponentialLikelihood(), real_pos_series, 0.3, 2), (DirichletLikelihood(), simplex_series, 0.3, 0.3), (GeometricLikelihood(), discrete_pos_series, 1, 1), (CauchyLikelihood(), real_series, 3, 11), (ContinuousBernoulliLikelihood(), bounded_series, 0.1, 0.1), (HalfNormalLikelihood(), real_pos_series, 0.3, 8), (LogNormalLikelihood(), real_pos_series, 0.3, 1), (WeibullLikelihood(), real_pos_series, 0.2, 2.5), (QuantileRegression(), real_series, 0.2, 1), ) def test_likelihoods_and_resulting_mean_forecasts(self): def _get_avgs(series): return np.mean(series.all_values()[:, 0, :]), np.mean( series.all_values()[:, 1, :]) for lkl, series, diff1, diff2 in self.lkl_series: model = RNNModel(input_chunk_length=5, likelihood=lkl) model.fit(series, epochs=50) pred = model.predict(n=50, num_samples=50) avgs_orig, avgs_pred = _get_avgs(series), _get_avgs(pred) self.assertLess( abs(avgs_orig[0] - avgs_pred[0]), diff1, "The difference between the mean forecast and the mean series is larger " "than expected on component 0 for distribution {}".format( lkl), ) self.assertLess( abs(avgs_orig[1] - avgs_pred[1]), diff2, "The difference between the mean forecast and the mean series is larger " "than expected on component 1 for distribution {}".format( lkl), ) def test_stochastic_inputs(self): model = RNNModel(input_chunk_length=5) model.fit(self.constant_ts, epochs=2) # build a stochastic series target_vals = self.constant_ts.values() stochastic_vals = np.random.normal(loc=target_vals, scale=1.0, size=(len(self.constant_ts), 100)) stochastic_vals = np.expand_dims(stochastic_vals, axis=1) stochastic_series = TimeSeries.from_times_and_values( self.constant_ts.time_index, stochastic_vals) # A deterministic model forecasting a stochastic series # should return stochastic samples preds = [ model.predict(series=stochastic_series, n=10) for _ in range(2) ] # random samples should differ self.assertFalse( np.alltrue(preds[0].values() == preds[1].values()))
if __name__ == "__main__": data_frequencies = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly'] for freq in data_frequencies[::-1]: # Load TimeSeries from M4 ts_train = pkl.load(open("dataset/train_" + freq + ".pkl", "rb")) ts_test = pkl.load(open("dataset/test_" + freq + ".pkl", "rb")) mase_all = [] smape_all = [] m = int(info_dataset.Frequency[freq[0] + '1']) for train, test in _build_tqdm_iterator(zip(ts_train, ts_test), verbose=True): # remove seasonality train_des = train seasonOut = 1 season = constant_timeseries(length=len(train), freq=train.freq_str, start_ts=train.start_time()) if m > 1: if check_seasonality(train, m=m, max_lag=2 * m): pass _, season = extract_trend_and_seasonality(train, m, model=ModelMode.MULTIPLICATIVE) train_des = remove_from_series(train, season, model=ModelMode.MULTIPLICATIVE) seasonOut = season[-m:].shift(m) seasonOut = seasonOut.append_values(seasonOut.values())[:len(test)] # model selection naiveSeason = NaiveSeasonal(K=m) naive2 = NaiveSeasonal(K=1) ses = ExponentialSmoothing(trend=None, seasonal=None, seasonal_periods=m) holt = ExponentialSmoothing(seasonal=None, damped=False, trend='additive', seasonal_periods=m) damp = ExponentialSmoothing(seasonal=None, damped=True, trend='additive', seasonal_periods=m) fourtheta = FourTheta.select_best_model(train, [1, 2, 3], m)
def make_dataset(rows, cols): return [constant_timeseries(value=i, length=cols) for i in range(rows)]
class MappersTestCase(unittest.TestCase): @staticmethod def func(x): return x + 10 @staticmethod def inverse_func(x): return x - 10 @staticmethod def ts_func(ts, x): return x - ts.month @staticmethod def inverse_ts_func(ts, x): return x + ts.month plus_ten = Mapper(func.__func__) plus_ten_invertible = InvertibleMapper(func.__func__, inverse_func.__func__) subtract_month = Mapper(ts_func.__func__) subtract_month_invertible = InvertibleMapper(ts_func.__func__, inverse_ts_func.__func__) lin_series = linear_timeseries( start_value=1, length=12, freq="MS", start=pd.Timestamp("2000-01-01"), end_value=12, ) # noqa: E501 zeroes = constant_timeseries(value=0.0, length=12, freq="MS", start=pd.Timestamp("2000-01-01")) tens = constant_timeseries(value=10.0, length=12, freq="MS", start=pd.Timestamp("2000-01-01")) twenties = constant_timeseries(value=20.0, length=12, freq="MS", start=pd.Timestamp("2000-01-01")) def test_mapper(self): test_cases = [ (self.zeroes, self.tens), ([self.zeroes, self.tens], [self.tens, self.twenties]), ] for to_transform, expected_output in test_cases: transformed = self.plus_ten.transform(to_transform) self.assertEqual(transformed, expected_output) def test_invertible_mapper(self): test_cases = [(self.zeroes), ([self.zeroes, self.tens])] for data in test_cases: transformed = self.plus_ten_invertible.transform(data) back = self.plus_ten_invertible.inverse_transform(transformed) self.assertEqual(back, data) def test_mapper_with_timestamp(self): test_cases = [ (self.lin_series, self.zeroes), ([self.lin_series, self.lin_series], [self.zeroes, self.zeroes]), ] for to_transform, expected_output in test_cases: transformed = self.subtract_month.transform(to_transform) if isinstance(to_transform, list): expected_output = [ o.with_columns_renamed(o.components[0], t.components[0]) for t, o in zip(transformed, expected_output) ] else: expected_output = expected_output.with_columns_renamed( expected_output.components[0], transformed.components[0]) self.assertEqual(transformed, expected_output) def test_invertible_mapper_with_timestamp(self): test_cases = [(self.lin_series), ([self.lin_series, self.lin_series])] for data in test_cases: transformed = self.subtract_month_invertible.transform(data) back = self.subtract_month_invertible.inverse_transform( transformed) self.assertEqual(back, data) def test_invertible_mappers_on_stochastic_series(self): vals = np.random.rand(10, 2, 100) + 2 series = TimeSeries.from_values(vals) imapper = InvertibleMapper(np.log, np.exp) tr = imapper.transform(series) inv_tr = imapper.inverse_transform(tr) np.testing.assert_almost_equal(series.all_values(copy=False), inv_tr.all_values(copy=False))