def run( self, run_mode, initial_guess=None, bounds=None, signal_window=None, optimization_method=None, ): self.logger.run_mode = run_mode # 1. define control # `time_grid` defines the time grid for the direct simulation self.time_grid = TimeSeries.from_parameters( Decimal("0"), self.timer["T"], self.timer["dt"] ) # `midpoint_grid` defines the time grid for the adjoint simulation self.midpoint_grid = TimeSeries.from_parameters( Decimal("0.5") * Decimal(self.timer["dt"]), self.timer["T"] - Decimal("0.5") * Decimal(self.timer["dt"]), self.timer["dt"], ) # We define a control space, a PiecewiseLinearBasis in this case self.basis = PiecewiseLinearBasis( np.array([float(key) for key in self.time_grid.keys()]), width=signal_window, reduced_basis=True, ) # We prepare the initial guess and bounds on the control if bounds is not None: low_bound = [bounds[0] for _ in range(len(self.time_grid))] top_bound = [bounds[1] for _ in range(len(self.time_grid))] top_bound = self.basis.discretize(top_bound) low_bound = self.basis.discretize(low_bound) bounds = list(zip(low_bound, top_bound)) if initial_guess is None: initial_guess = np.zeros(len(self.basis.basis)) if run_mode == "optimization": res = self.minimize( initial_guess, bounds, optimization_method=optimization_method ) elif run_mode == "single_run": res = self._objective_state(initial_guess) elif run_mode == "direct_adjoint": res = self._objective_state(initial_guess) grad = self._jacobian(res) else: log.warning(f"The run mode {run_mode} is not implemented") res = None return res
def test_interpolate_to_keys(): grid, mid_point = numeric_series() interpolated = TimeSeries.interpolate_to_keys(grid, mid_point) assert interpolated == mid_point assert interpolated._first == mid_point._first assert interpolated._last == mid_point._last assert interpolated.first == mid_point.first assert interpolated.last == mid_point.last interpolated = TimeSeries.interpolate_to_keys(mid_point, grid) assert interpolated._first == grid.first assert interpolated._last == grid.last assert interpolated.first == 0.0 assert interpolated.last == 0.0
def test_create_timeseries(): series_empty = TimeSeries() assert len(series_empty) == 0 assert series_empty._first is None assert series_empty.first is None assert series_empty._last is None assert series_empty.last is None series_one = TimeSeries(1, 0) assert series_one assert series_one[decimal.Decimal(0)] == 1 assert len(series_one) == 1 assert series_one.first == series_one.last == 1 assert series_one._first == series_one._last == 0
def wrapped(self, state): continuous_gradient = function(self, state) gradient_time_series = TimeSeries.from_list( continuous_gradient, self.midpoint_grid ) du = TimeSeries.interpolate_to_keys(gradient_time_series, self.time_grid) discrete_grad = self.basis.discretize(du.values()) gradient_norm = (gradient_time_series * du).integrate() log.info(f"gradient norm: {gradient_norm}") discrete_grad_norm = ( gradient_time_series * TimeSeries.from_list( self.basis.extrapolate(discrete_grad), self.time_grid ) ).integrate() log.info("discrete gradient norm: {}".format(discrete_grad_norm)) return discrete_grad
def test_from_parameters(): start = decimal.Decimal("1") stop = decimal.Decimal("2") step = decimal.Decimal("0.1") series = TimeSeries.from_parameters(start, stop, step) assert series.first == 0 assert series.last == 0 for i in range(11): assert series[start + i * step] == 0 assert series[stop] == 0 midpoint_grid = TimeSeries.from_parameters( start + decimal.Decimal("0.5") * step, stop - decimal.Decimal("0.5") * step, step, ) assert len(midpoint_grid) == 10 assert start + decimal.Decimal("0.5") * step in midpoint_grid assert stop - decimal.Decimal("0.5") * step in midpoint_grid
def test_create_from_dict(): for dictionary in dicts(): series = TimeSeries.from_dict(dictionary) assert len(series) == len(dictionary) for el in dictionary: assert series[decimal.Decimal(el)] == dictionary[el] assert series[el] == dictionary[el] assert series.first == dictionary[min(dictionary)] assert series.last == dictionary[max(dictionary)] assert series._first == min(dictionary) assert series._last == max(dictionary)
from firecrest.misc.time_storage import PiecewiseLinearBasis, TimeSeries import matplotlib.pyplot as plt import numpy as np from decimal import Decimal timer = {"dt": Decimal("0.01"), "T": Decimal("1.2")} func = TimeSeries.from_dict({ Decimal(k) * Decimal(timer["dt"]): 10 + 40.0 * (k * float(timer["dt"])) * (k * float(timer["dt"]) - 0.5) * (k * float(timer["dt"]) - 1) for k in range(int(timer["T"] / Decimal(timer["dt"])) + 1) }) x = np.array([float(key) for key in func.keys()]) y = np.array([val for val in func.values()]) basis = PiecewiseLinearBasis(x, 0.4) plt.plot(x, y, "k") plt.plot(x, basis.project(y), "--", color="k") for b in basis.basis: plt.plot(x, b, "-.", color="gray") plt.grid(True) plt.show()
def test_multiply(): series, mid_point = numeric_series() multiplied = series * series assert len(multiplied) == len(series) assert multiplied == TimeSeries.from_dict({i: i ** 2 for i in series}) assert mid_point * series == mid_point * mid_point
def test_create_from_list(): for series in numeric_series() + dicts(): values = series.values() new_series = TimeSeries.from_list(values, series) assert new_series == series
def numeric_series(): return [ TimeSeries.from_dict({i: i for i in range(10)}), TimeSeries.from_dict({i + 0.5: i + 0.5 for i in range(9)}), ]