def inner(*args, **kwargs): print_time = kwargs.get('print_time') if print_time is not False: timer = TicToc() timer.tic() output = func(*args, **kwargs) if print_time is not False: time = str(timedelta(seconds=round(timer.elapsed_time))) name = str(func).split(' ')[1] print(f'function `{name}`') print(f'Total time: {time}.') return output
def evaluate(self, thorough=True, jit=True, notify=False): """ Evaluate metric over the argument sample space and save values to `table`. Parameters ---------- thorough : bool If True, simulate the whole system with each sample. If False, simulate only the affected parts of the system. jit : bool Whether to JIT compile functions with Numba to speed up simulation. notify=False : bool, optional If True, notify elapsed time after each sample evaluation. """ if jit: speed_up() samples = self._samples if samples is None: raise RuntimeError('must load samples before evaluating') evaluate_sample = self._evaluate_sample_thorough if thorough else self._evaluate_sample_smart table = self.table if notify: from biosteam.utils import TicToc timer = TicToc() timer.tic() def evaluate(sample, count=[0]): count[0] += 1 values = evaluate_sample(sample) print(f"{count} Elapsed time: {timer.elapsed_time:.0f} sec") return values else: evaluate = evaluate_sample table[self._metric_indices] = [ evaluate(samples[i]) for i in self._index ]
def evaluate_across_coordinate(self, name, f_coordinate, coordinate, *, xlfile=None, notify=True, multi_coordinate=False): """ Evaluate across coordinate and save sample metrics. Parameters ---------- name : str or tuple[str] Name of coordinate f_coordinate : function Should change state of system given the coordinate. coordinte : array Coordinate values. xlfile : str, optional Name of file to save. File must end with ".xlsx" rule='L' : str Sampling rule. notify=True : bool, optional If True, notify elapsed time after each coordinate evaluation. """ table = self.table N_samples, N_parameters = table.shape N_points = len(coordinate) # Initialize data containers metric_data = {} def new_data(key, dct=metric_data): data = np.zeros([N_samples, N_points]) dct[key] = data return data for i in self.metrics: new_data(i.index) # Initialize timer if notify: timer = TicToc() timer.tic() def evaluate(): self.evaluate() print(f"[{n}] Elapsed time: {timer.elapsed_time:.0f} sec") else: evaluate = self.evaluate for n, x in enumerate(coordinate): f_coordinate(*x) if multi_coordinate else f_coordinate(x) evaluate() for metric in metric_data: metric_data[metric][:, n] = table[metric] if xlfile: if multi_coordinate: columns = pd.MultiIndex.from_tuples(coordinate, names=name) else: columns = pd.Index(coordinate, name=name) # Save data to excel data = pd.DataFrame(data=np.zeros([N_samples, N_points]), columns=columns) with pd.ExcelWriter(xlfile) as writer: for i, metric in zip(self.metrics, metric_data): data[:] = metric_data[metric] data.to_excel(writer, sheet_name=i.name) return metric_data
import pandas as pd from biosteam.utils import TicToc from biosteam.plots import plot_montecarlo_across_coordinate from lactic import models percentiles = [0, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 1] # %% # ============================================================================= # Evaluating across internal rate of return # ============================================================================= # Initiate a timer timer = TicToc('timer') timer.tic() model = models.model_IRR ''' Note: Not using `evaluate_across_coordinate` function as changing IRR does not affect the system, using IRR as the metrics for evaluation will save considerable time. ''' '''Evaluate''' np.random.seed(3221) N_simulation = 100 # 1000 samples = model.sample(N=N_simulation, rule='L')
def evaluate_across_coordinate(self, name, f_coordinate, coordinate, *, xlfile=None, notify=0, notify_coordinate=True, re_evaluate=True, multi_coordinate=False): """ Evaluate across coordinate and save sample metrics. Parameters ---------- name : str or tuple[str] Name of coordinate f_coordinate : function Should change state of system given the coordinate. coordinte : array Coordinate values. xlfile : str, optional Name of file to save. File must end with ".xlsx" rule='L' : str Sampling rule. notify=True : bool, optional If True, notify elapsed time after each coordinate evaluation. """ if self._samples is None: raise RuntimeError('must load samples before evaluating') table = self.table N_samples, N_parameters = table.shape N_points = len(coordinate) # Initialize data containers metric_indices = var_indices(self.metrics) shape = (N_samples, N_points) metric_data = {i: np.zeros(shape) for i in metric_indices} f_evaluate = self.evaluate # Initialize timer if re_evaluate: if notify_coordinate: from biosteam.utils import TicToc timer = TicToc() timer.tic() def evaluate(): f_evaluate(notify=notify) print( f"[Coordinate {n}] Elapsed time: {timer.elapsed_time:.0f} sec" ) else: evaluate = f_evaluate else: evaluate = lambda: None for n, x in enumerate(coordinate): f_coordinate(*x) if multi_coordinate else f_coordinate(x) evaluate() for metric in metric_data: metric_data[metric][:, n] = table[metric] if xlfile: if multi_coordinate: columns = pd.MultiIndex.from_tuples(coordinate, names=name) else: columns = pd.Index(coordinate, name=name) # Save data to excel data = pd.DataFrame(data=np.zeros([N_samples, N_points]), columns=columns) with pd.ExcelWriter(xlfile) as writer: for metric in self.metrics: data[:] = metric_data[metric.index] data.to_excel(writer, sheet_name=metric.short_description) return metric_data
def evaluate(self, thorough=True, notify=0, file=None, autosave=0, autoload=False): """ Evaluate metrics over the loaded samples and save values to `table`. Parameters ---------- thorough : bool If True, simulate the whole system with each sample. If False, simulate only the affected parts of the system and skip simulation for repeated states. notify=0 : int, optional If 1 or greater, notify elapsed time after the given number of sample evaluations. file : str, optional Name of file to save/load pickled evaluation results. autosave : int, optional If 1 or greater, save pickled evaluation results after the given number of sample evaluations. autoload : bool, optional Whether to load pickled evaluation results from file. Warning ------- Any changes made to either the model or the samples will not be accounted for when autoloading and may lead to misleading results. """ samples = self._samples if samples is None: raise RuntimeError('must load samples before evaluating') evaluate_sample = self._evaluate_sample table = self.table if notify: from biosteam.utils import TicToc timer = TicToc() timer.tic() count = [0] def evaluate(sample, thorough, count=count): count[0] += 1 values = evaluate_sample(sample, thorough) if not count[0] % notify: print( f"{count} Elapsed time: {timer.elapsed_time:.0f} sec") return values else: evaluate = evaluate_sample if autoload: try: with open(file, "rb") as f: number, values, table_index, table_columns = pickle.load(f) if (table_index != table.index).any() or (table_columns != table.columns).any(): raise ValueError( 'table layout does not match autoload file') del table_index, table_columns index = self._index[number:] except: number = 0 index = self._index values = [None] * len(index) else: if notify: count[0] = number else: number = 0 index = self._index values = [None] * len(index) if autosave: layout = table.index, table.columns for number, i in enumerate(index, number): values[i] = evaluate(samples[i], thorough) if not number % autosave: obj = (number, values, *layout) with open(file, 'wb') as f: pickle.dump(obj, f) else: for i in index: values[i] = evaluate(samples[i], thorough) table[var_indices(self._metrics)] = values
@author: yalinli_cabbi """ # %% # ============================================================================= # Simulate pretreatment efficacy # ============================================================================= import numpy as np import pandas as pd from biosteam.utils import TicToc timer_efficacy = TicToc('timer_efficacy') timer_efficacy.tic() lignin = np.arange(0, 0.41, 0.01) conversion_max = np.ones(1000) conversion_min = np.zeros(1000) # Liquid hot water (LHW) np.random.seed(3221) intercept_LHW_1 = np.random.normal(0.84, 0.04, 1000) intercept_LHW_2 = np.random.normal(1.32, 0.07, 1000) slope_LHW_2 = np.random.normal(-2.33, 0.33, 1000) df_LHW = pd.DataFrame() # Acid intercept_acid = np.random.normal(1.04, 0.04, 1000)