def optimize_value_creation_test(self): """Testing the first part of optimize_forecasting_method.""" self.bfm._requiredParameters = ["param1", "param2", "param3", "param4", "param5"] try: GridSearch(SMAPE, -1).optimize_forecasting_method(self.timeSeries, self.bfm) # we looped down to the NotImplemetedError of BaseMethod.execute except NotImplementedError: pass else: assert False # pragma: no cover self.bfm._parameterIntervals = { "param3": [0.0, 1.0, True, True], "param4": [0.0, 1.0, True, True], "param5": [0.0, 1.0, True, True] } try: GridSearch(SMAPE, -5).optimize_forecasting_method(self.timeSeries, self.bfm) # we looped down to the NotImplemetedError of BaseMethod.execute except NotImplementedError: pass else: assert False # pragma: no cover
def inner_optimization_result_accuracy_test(self): """Test for the correct result of a GridSearch optimization.""" fm = ExponentialSmoothing() startingPercentage = 0.0 endPercentage = 100.0 # manually select the best alpha self.timeSeries.normalize("second") results = [] for smoothingFactor in [alpha / 100.0 for alpha in xrange(1, 100)]: # pragma: no cover fm.set_parameter("smoothingFactor", smoothingFactor) resultTS = self.timeSeries.apply(fm) error = SMAPE() error.initialize(self.timeSeries, resultTS) results.append([error, smoothingFactor]) bestManualResult = min(results, key=lambda item: item[0].get_error(startingPercentage, endPercentage)) # automatically determine the best alpha using GridSearch gridSearch = GridSearch(SMAPE, precision=-4) # used, because we test a submethod here gridSearch._startingPercentage = startingPercentage gridSearch._endPercentage = endPercentage result = gridSearch.optimize_forecasting_method(self.timeSeries, fm) # the grid search should have determined the same alpha bestManualAlpha = bestManualResult[1] errorManualResult = bestManualResult[0].get_error() bestGridSearchAlpha = result[1]["smoothingFactor"] errorGridSearchResult = result[0].get_error() assert errorManualResult > errorGridSearchResult
def create_generator_test(self): """Test the parameter generation function.""" # initialize a correct result precision = 10**-2 values_one = [i * precision for i in range(1, 100)] values_two = [i * precision for i in range(201)] generator_one = GridSearch( SMAPE, precision=-2)._generate_next_parameter_value( "parameter_one", self.bfm) generator_two = GridSearch( SMAPE, precision=-2)._generate_next_parameter_value( "parameter_two", self.bfm) generated_one = [val for val in generator_one] generated_two = [val for val in generator_two] assert len(values_one) == len(generated_one) assert len(values_two) == len(generated_two) for idx in range(len(values_one)): value = str(values_one[idx])[:12] assert str(value) == str(generated_one[idx])[:len(value)] for idx in range(len(values_two)): value = str(values_two[idx])[:12] assert str(value) == str(generated_two[idx])[:len(value)]
def outer_optimization_result_test(self): """Test the multiple method optimization.""" fm1 = ExponentialSmoothing() fm2 = HoltMethod() self.timeSeries.normalize("second") # automatically determine the best alpha using GridSearch gridSearch = GridSearch(SMAPE, precision=-2) result = gridSearch.optimize(self.timeSeries, [fm1, fm2])
def optimize_exception_test(self): """Test for exception while calling GridSearch.optimize.""" try: GridSearch(SMAPE, -2).optimize(self.timeSeries) except ValueError: pass else: assert False # pragma: no cover try: GridSearch(SMAPE, -1).optimize(self.timeSeries, [self.bfm]) # we looped down to the NotImplemetedError of BaseMethod.execute except NotImplementedError: pass else: assert False # pragma: no cover
def optimization_loop_test(self): """Testing the optimozation loop.""" gridSearch = GridSearch(SMAPE, precision=-2) def crap_execute(ignoreMe): ts = self.timeSeries.to_twodim_list() ts = TimeSeries.from_twodim_list(ts) for entry in ts: entry[0] += 0.1 return ts self.bfm.execute = crap_execute result = gridSearch.optimization_loop(self.timeSeries, self.bfm, [], {}) assert result == []