def __call__(self, x, *args): # Instantiate the model self.logger.debug(f"x = {x}") if self.model is None: self.model = self._get_model_instance(self.fmu_path, self.inp, self.known, self.est, self.output_names) logging.debug(f"Model instance returned: {self.model}") # Updated parameters are stored in x. Need to update the model. parameters = pd.DataFrame(index=[0]) try: for v, ep in zip(x, self.est): parameters[ep.name] = self.rescale(v, ep.lo, ep.hi) except TypeError as e: raise e self.logger.debug(f"parameters = {parameters}") self.model.parameters_from_df(parameters) self.logger.debug(f"est: {self.est}") self.logger.debug(f"parameters: {parameters}") self.logger.debug(f"model: {self.model}") self.logger.debug("Calling simulation...") result = self.model.simulate() self.logger.debug(f"result: {result}") err = calc_err(result, self.ideal, ftype=self.ftype)["tot"] # Update best error and result if err < self.best_err: self.best_err = err self.res = result return err
def calculate(self): # Just in case, individual result and error # are cleared before simulation self.reset() # Important to set estimated parameters just before simulation, # because all individuals share the same model instance self.model.set_param(self.est_par_df) # Simulation self.result = self.model.simulate(Individual.COM_POINTS) # Make sure the returned result is not empty assert self.result.empty is False, \ 'Empty result returned from simulation... (?)' # Calculate error self.logger.debug("Calculating error ({}) in individual {}" .format(self.ftype, self.genes)) self.error = calc_err(self.result, self.ideal, ftype=self.ftype)
def objective(x): """Returns model error""" # Updated parameters are stored in x. Need to update the model. self.logger.debug('objective(x={})'.format(x)) parameters = pd.DataFrame(index=[0]) try: for v, ep in zip(x, self.est): parameters[ep.name] = SCIPY.rescale(v, ep.lo, ep.hi) except TypeError as e: print(x) raise e self.model.set_param(parameters) result = self.model.simulate() err = calc_err(result, self.ideal, ftype=self.ftype)['tot'] # Update best error and result if err < self.best_err: self.best_err = err self.res = result return err
def estimate(self): # Initial error initial_result = self.model.simulate() self.res = initial_result initial_error = calc_err(initial_result, self.ideal, ftype=self.ftype)['tot'] self.best_err = initial_error def objective(x): """Returns model error""" # Updated parameters are stored in x. Need to update the model. self.logger.debug('objective(x={})'.format(x)) parameters = pd.DataFrame(index=[0]) try: for v, ep in zip(x, self.est): parameters[ep.name] = SCIPY.rescale(v, ep.lo, ep.hi) except TypeError as e: print(x) raise e self.model.set_param(parameters) result = self.model.simulate() err = calc_err(result, self.ideal, ftype=self.ftype)['tot'] # Update best error and result if err < self.best_err: self.best_err = err self.res = result return err # Initial guess x0 = [SCIPY.scale(x.value, x.lo, x.hi) for x in self.est] self.logger.debug('SciPy x0 = {}'.format(x0)) # Save initial guess in summary row = pd.DataFrame(index=[0]) for x, c in zip(x0, SCIPY.TMP_SUMMARY.columns): row[c] = x row[SCIPY.ERR] = np.nan row[SCIPY.METHOD] = SCIPY.NAME SCIPY.TMP_SUMMARY = SCIPY.TMP_SUMMARY.append(row, ignore_index=True) # Parameter bounds b = [(0., 1.) for x in self.est] out = minimize(objective, x0, bounds=b, constraints=[], method=self.solver, callback=SCIPY._callback, options=self.options) outx = [SCIPY.rescale(x, ep.lo, ep.hi) for x, ep in zip(out.x.tolist(), self.est)] self.logger.debug('SciPy x = {}'.format(outx)) # Update summary self.summary = SCIPY.TMP_SUMMARY.copy() self.summary.index += 1 # Adjust iteration counter self.summary.index.name = SCIPY.ITER # Rename index # Update error self.summary[SCIPY.ERR] = \ list(map(objective, self.summary[[x.name for x in self.est]].values)) for ep in self.est: name = ep.name # list(map(...)) - for Python 2/3 compatibility self.summary[name] = \ list(map(lambda x: SCIPY.rescale(x, ep.lo, ep.hi), self.summary[name])) # Rescale # Add solver name to column `method` self.summary[SCIPY.METHOD] += '[' + self.solver + ']' # Reset temp placeholder SCIPY.TMP_SUMMARY = pd.DataFrame(columns=self.summary_cols) # Return DataFrame with estimates par_vec = outx par_df = pd.DataFrame(columns=[x.name for x in self.est], index=[0]) for col, x in zip(par_df.columns, par_vec): par_df[col] = x return par_df
def _search(self): """ Pattern _search loop. :return: DataFrame with estimates """ initial_estimates = copy.deepcopy(self.est) best_estimates = copy.deepcopy(initial_estimates) current_estimates = copy.deepcopy(initial_estimates) initial_result = self.model.simulate() self.res = initial_result initial_error = calc_err(initial_result, self.ideal, ftype=self.ftype)["tot"] best_err = initial_error # First line of the summary summary = estpars_2_df(current_estimates) summary[PS.ERR] = [initial_error] # Counters n_try = 0 iteration = 0 # Search loop while ((n_try < self.try_lim) and (iteration < self.max_iter) and (self.rel_step > self.tol)): iteration += 1 self.logger.info("Iteration no. {} " "=========================".format(iteration)) improved = False # Iterate over all parameters for par in current_estimates: for sign in ["+", "-"]: # Calculate new parameter new_par = self._get_new_estpar(par, self.rel_step, sign) # Simulate and calculate error self.model.set_param(estpars_2_df([new_par])) result = self.model.simulate() err = calc_err(result, self.ideal, ftype=self.ftype)["tot"] # Save point if solution improved if err < best_err: self.res = result best_err = err # Shortest path search # best_estimates = PS._replace_par(best_estimates, # new_par) # Orthogonal search best_estimates = PS._replace_par( current_estimates, new_par) improved = True # Reset model parameters self.model.set_param(estpars_2_df(current_estimates)) # Go to the new point current_estimates = copy.deepcopy(best_estimates) # Update summary current_estimates_df = estpars_2_df(current_estimates) current_estimates_df.index = [iteration] summary = pd.concat([summary, current_estimates_df]) summary[PS.ERR][iteration] = best_err if not improved: n_try += 1 self.rel_step /= PS.STEP_DEC self.logger.info("Solution did not improve...") self.logger.debug("Step reduced to {}".format(self.rel_step)) self.logger.debug("Tries left: {}".format(self.try_lim - n_try)) else: # Solution improved, reset n_try counter n_try = 0 self.rel_step *= PS.STEP_INC if self.rel_step > PS.STEP_CEILING: self.rel_step = PS.STEP_CEILING self.logger.info("Solution improved") self.logger.debug("Current step is {}".format(self.rel_step)) self.logger.info("New error: {}".format(best_err)) self.logger.debug("New estimates:\n{}".format( estpars_2_df(current_estimates))) # Reorder columns in summary s_cols = summary.columns.tolist() s_cols.remove(PS.ERR) s_cols.append(PS.ERR) summary = summary[s_cols] # Start iterations from 1 summary.index += 1 # Rename index in summary summary.index = summary.index.rename(PS.ITER) # Add column with method name summary[PS.METHOD] = PS.NAME # Print summary reason = "Unknown" if n_try >= self.try_lim: reason = "Maximum number of tries to decrease the step reached" elif iteration >= self.max_iter: reason = "Maximum number of iterations reached" elif self.rel_step <= self.tol: reason = "Relative step smaller than the stoping criterion" self.logger.info("Pattern search finished. Reason: {}".format(reason)) self.logger.info("Summary:\n{}".format(summary)) # Final assignments self.summary = summary final_estimates = estpars_2_df(best_estimates) return final_estimates