def test_simulation(self): output_names = self.ideal.columns.tolist() model = Model(self.fmu_path) model.inputs_from_df(self.inp) model.specify_outputs(output_names) model.parameters_from_df(self.known_df) res1 = model.simulate(reset=True) res2 = model.simulate(reset=False) self.assertTrue(res1.equals(res2), "Dataframes not equal") input_size = self.inp.index.size result_size = res1.index.size self.assertTrue(input_size == result_size, "Result size different than input")
class Model(object): """ Model for static parameter estimation """ def __init__(self, fmu_path, opts=None): self.logger = logging.getLogger(type(self).__name__) self.model = FmiModel(fmu_path, opts=opts) # Log level try: self.model.model.set_log_level(FMI_WARNING) except AttributeError as e: self.logger.error(e.message) self.logger.error('Proceeding with standard log level...') # Simulation count self.sim_count = 0 def set_input(self, df, exclude=list()): """ Sets inputs. :param df: Dataframe, time given in seconds :param exclude: list of strings, names of columns to be excluded :return: None """ self.model.inputs_from_df(df, exclude) def set_param(self, df): """ Sets parameters. It is possible to set only a subset of model parameters. :param df: Dataframe with header and a single row of data :return: None """ self.model.parameters_from_df(df) def set_outputs(self, outputs): """ Sets output variables. :param outputs: list of strings :return: None """ self.model.specify_outputs(outputs) def simulate(self, com_points=None): # TODO: com_points should be adjusted to the number of samples self.sim_count += 1 self.info('Simulation count = ' + str(self.sim_count)) return self.model.simulate(com_points=com_points) def info(self, txt): class_name = self.__class__.__name__ if VERBOSE: if isinstance(txt, str): print('[' + class_name + '] ' + txt) else: print('[' + class_name + '] ' + repr(txt))
def _get_model_instance(self, fmu_path, inputs, known_pars, est, output_names): self.logger.debug("Getting model instance...") self.logger.debug(f"inputs = {inputs}") self.logger.debug(f"known_pars = {known_pars}") self.logger.debug(f"est = {est}") self.logger.debug(f"estpars_2_df(est) = {estpars_2_df(est)}") self.logger.debug(f"output_names = {output_names}") model = Model(fmu_path) model.inputs_from_df(inputs) model.parameters_from_df(known_pars) model.parameters_from_df(estpars_2_df(est)) model.specify_outputs(output_names) self.logger.debug(f"Model instance initialized: {model}") self.logger.debug(f"Model instance initialized: {model.model}") res = model.simulate() self.logger.debug(f"test result: {res}") return model
def validate(self, vp=None): """ Performs a simulation with estimated parameters for the previously selected validation period. Other period can be chosen with the `vp` argument. User chosen `vp` in this method does not override the validation period chosen during instantiation of this class. Parameters ---------- vp: tuple or None Validation period given as a tuple of start and stop time in seconds. Returns ------- dict Validation error, keys: 'tot', '<var1>', '<var2>', ... pandas.DataFrame Simulation result """ # Get estimates est = self.final est.index = [0] # Reset index (needed by model.set_param()) self.logger.info("Validation of parameters: {}".format( str(est.iloc[0].to_dict()))) # Slice data if vp is None: start, stop = self.vp[0], self.vp[1] else: start, stop = vp[0], vp[1] inp_slice = self.inp.loc[start:stop] ideal_slice = self.ideal.loc[start:stop] # Initialize IC parameters and add to known if self.ic_param: for par in self.ic_param: ic = ideal_slice[self.ic_param[par]].iloc[0] self.known[par] = ic # Initialize model model = Model(self.fmu_path) model.set_input(inp_slice) model.set_param(est) model.set_param(self.known) model.set_outputs(list(self.ideal.columns)) # Simulate and get error try: result = model.simulate() except Exception as e: msg = "Problem found inside FMU. Did you set all parameters?" self.logger.error(str(e)) self.logger.error(msg) raise e err = modestpy.estim.error.calc_err(result, ideal_slice) # Create validation plot ax = plot_comparison(result, ideal_slice, f=None) fig = figures.get_figure(ax) fig.set_size_inches(Estimation.FIG_SIZE) fig.savefig(os.path.join(self.workdir, "validation.png"), dpi=Estimation.FIG_DPI) # Remove temp dirs self._clean() # Return return err, result
inp = inp.set_index('time') # inp.to_csv(os.path.join('examples', 'sin', 'resources', 'input.csv')) # True parameters a = 3. b = 1.5 par = pd.DataFrame(index=[0]) par['a'] = a par['b'] = b # par.to_csv(os.path.join('examples', 'sin', 'resources', # 'true_parameters.csv'), index=False) model.inputs_from_df(inp) model.parameters_from_df(par) model.specify_outputs(['y']) ideal = model.simulate(com_points=inp.index.size - 1) # ideal.to_csv(os.path.join('examples', 'sin', 'resources', 'ideal.csv')) # Estimation ============================================== # Working directory workdir = os.path.join('examples', 'sin', 'workdir') if not os.path.exists(workdir): os.mkdir(workdir) assert os.path.exists(workdir), "Work directory does not exist" # Estimated and known parameters known = {} est = {'a': (7., 0., 8.), 'b': (2.0, 1., 4.)} # Session