def run(filename, input_path='', ncpu=1, verbose=20, **kwargs): if is_interactive(): config_logger(file=False, stream_level=verbose) cases = _find_cases(filename, input_path) system = None t0, _ = elapsed() if len(cases) == 1: system = run_case(cases[0], **kwargs) else: _run_multiprocess(cases, ncpu, **kwargs) t0, s0 = elapsed(t0) if len(cases) == 1: logger.info(f'-> Single process finished in {s0}.') elif len(cases) >= 2: logger.info(f'-> Multiple processes finished in {s0}.') return system
def run(self, no_pbar=False, no_summary=False, **kwargs): """ Run time-domain simulation using numerical integration. The default method is the Implicit Trapezoidal Method (ITM). Parameters ---------- no_pbar : bool True to disable progress bar no_summary : bool, optional True to disable the display of summary """ system = self.system dae = self.system.dae config = self.config succeed = False resume = False if system.PFlow.converged is False: logger.warning('Power flow not solved. Simulation will not continue.') system.exit_code += 1 return succeed # load from csv is provided if self.from_csv is not None: self.data_csv = self._load_csv(self.from_csv) if no_summary is False and (system.dae.t == 0): self.summary() # only initializing at t=0 allows to continue when `run` is called again. if system.dae.t == 0: self.init() else: # resume simulation resume = True logger.debug("Resuming simulation from t=%.4fs.", system.dae.t) self._calc_h_first() logger.debug("Initial step size for resumed simulation is h=%.4fs.", self.h) self.pbar = tqdm(total=100, ncols=70, unit='%', file=sys.stdout, disable=no_pbar) if resume: perc = round((dae.t - config.t0) / (config.tf - config.t0) * 100, 0) self.next_pc = perc + 1 self.pbar.update(perc) self.qrt_start = time.time() self.headroom = 0.0 t0, _ = elapsed() while (system.dae.t - self.h < self.config.tf) and (not self.busted): if self.callpert is not None: self.callpert(dae.t, system) step_status = False # call the stepping method of the integration method (or data replay) if self.data_csv is None: step_status = self.itm_step() # compute for the current step else: step_status = self._csv_step() if step_status: dae.store() self.streaming_step() # check if the next step is critical time self.do_switch() self.calc_h() dae.t += self.h # show progress in percentage perc = max(min((dae.t - config.t0) / (config.tf - config.t0) * 100, 100), 0) if perc >= self.next_pc: self.pbar.update(1) self.next_pc += 1 # quasi-real-time check and wait (except for the last step) if config.qrt and self.h > 0: rt_end = self.qrt_start + self.h * config.kqrt # if the ending time has passed if time.time() - rt_end > 0: logger.debug('Simulation over-run at t=%4.4g s.', dae.t) else: self.headroom += (rt_end - time.time()) while time.time() - rt_end < 0: time.sleep(1e-4) self.qrt_start = time.time() else: if self.calc_h() == 0: self.err_msg = "Time step reduced to zero. Convergence is not likely." self.busted = True break self.pbar.close() delattr(self, 'pbar') # removed `pbar` so that System object can be serialized if self.busted: logger.error(self.err_msg) logger.error("Simulation terminated at t=%.4f s.", system.dae.t) system.exit_code += 1 elif system.dae.t == self.config.tf: succeed = True # success flag system.exit_code += 0 else: system.exit_code += 1 _, s1 = elapsed(t0) logger.info('Simulation completed in %s.', s1) if config.qrt: logger.debug('QRT headroom time: %.4g s.', self.headroom) # need to unpack data in case of resumed simulations. system.dae.ts.unpack() if not system.files.no_output: self.save_output() # end data streaming if system.config.dime_enabled: system.streaming.finalize() # load data into `TDS.plotter` in a notebook or in an interactive mode if is_notebook() or is_interactive(): self.load_plotter() return succeed
def run(self, no_pbar=False, no_summary=False, **kwargs): """ Run the implicit numerical integration for TDS. Parameters ---------- no_pbar : bool True to disable progress bar no_summary : bool, optional True to disable the display of summary """ system = self.system dae = self.system.dae config = self.config succeed = False resume = False if system.PFlow.converged is False: logger.warning('Power flow not solved. Simulation will not continue.') system.exit_code += 1 return succeed if no_summary is False: self.summary() # only initializing at t=0 allows to continue when `run` is called again. if system.dae.t == 0: self.init() else: # resume simulation resume = True self.pbar = tqdm(total=100, ncols=70, unit='%', file=sys.stdout, disable=no_pbar) if resume: perc = round((dae.t - config.t0) / (config.tf - config.t0) * 100, 0) self.next_pc = perc + 1 self.pbar.update(perc) t0, _ = elapsed() while (system.dae.t < self.config.tf) and (not self.busted): if self.callpert is not None: self.callpert(dae.t, system) if self._itm_step(): # simulate the current step # store values dae.ts.store_txyz(dae.t.tolist(), dae.xy, self.system.get_z(models=system.exist.pflow_tds), ) # check if the next step is critical time self.do_switch() self.calc_h() dae.t += self.h # show progress in percentage perc = max(min((dae.t - config.t0) / (config.tf - config.t0) * 100, 100), 0) if perc >= self.next_pc: self.pbar.update(1) self.next_pc += 1 else: if self.calc_h() == 0: self.err_msg = "Time step reduced to zero. Convergence is not likely." self.busted = True break self.pbar.close() delattr(self, 'pbar') # removed `pbar` so that System object can be dilled if self.busted: logger.error(self.err_msg) logger.error(f"Simulation terminated at t={system.dae.t:.4f}.") system.exit_code += 1 elif system.dae.t == self.config.tf: succeed = True # success flag system.exit_code += 0 else: system.exit_code += 1 _, s1 = elapsed(t0) logger.info(f'Simulation completed in {s1}.') system.TDS.save_output() # load data into `TDS.plotter` in a notebook or in an interactive mode if is_notebook() or is_interactive(): self.load_plotter() return succeed
def run(self, no_summary=False, **kwargs): """ Run time-domain simulation using numerical integration. The default method is the Implicit Trapezoidal Method (ITM). """ system = self.system dae = self.system.dae config = self.config succeed = False resume = False if system.PFlow.converged is False: logger.warning( 'Power flow not solved. Simulation will not continue.') system.exit_code += 1 return succeed # load from csv is provided if self.from_csv is not None: self.data_csv = self._load_csv(self.from_csv) if no_summary is False and (system.dae.t == 0): self.summary() # only initializing at t=0 allows to continue when `run` is called again. if system.dae.t == 0: self.init() else: # resume simulation resume = True logger.debug("Resuming simulation from t=%.4fs.", system.dae.t) self._calc_h_first() logger.debug( "Initial step size for resumed simulation is h=%.4fs.", self.h) if system.options.get("init") is True: logger.debug("Initialization only is requested and done") return self.initialized if is_notebook(): self.pbar = tqdm_nb(total=100, unit='%', file=sys.stdout, disable=self.config.no_tqdm) else: self.pbar = tqdm(total=100, unit='%', ncols=80, ascii=True, file=sys.stdout, disable=self.config.no_tqdm) if resume: perc = round((dae.t - config.t0) / (config.tf - config.t0) * 100, 2) self.last_pc = perc self.pbar.update(perc) self.qrt_start = time.time() self.headroom = 0.0 # write variable list file at the beginning if not system.files.no_output: system.dae.write_lst(self.system.files.lst) t0, _ = elapsed() while (system.dae.t - self.h < self.config.tf) and (not self.busted): # call perturbation file if specified if self.callpert is not None: self.callpert(dae.t, system) step_status = False # call the stepping method of the integration method (or data replay) if self.data_csv is None: step_status = self.itm_step() # compute for the current step else: step_status = self._csv_step() # record number of iterations and success flag if system.config.save_stats: self.call_stats.append( (system.dae.t.tolist(), self.niter, step_status)) if step_status: if config.save_every != 0: if config.save_every == 1: dae.store() else: if dae.kcount % config.save_every == 0: dae.store() # offload if exceeds `max_store` if self.config.limit_store and len( dae.ts._ys) >= self.config.max_store: # write to file if enabled if not system.files.no_output: self.save_output() logger.info( "Offload data from memory to file for t=%.2f - %.2f sec", dae.ts.t[0], dae.ts.t[-1]) # clear storage in memory anyway dae.ts.reset() self.streaming_step() if self.check_criteria() is False: self.err_msg = 'Violated stability criteria. To turn off, set [TDS].criteria = 0.' self.busted = True # check if the next step is critical time self.do_switch() self.calc_h() dae.t += self.h dae.kcount += 1 # show progress in percentage perc = max( min((dae.t - config.t0) / (config.tf - config.t0) * 100, 100), 0) perc = round(perc, 2) perc_diff = perc - self.last_pc if perc_diff >= 1: self.pbar.update(perc_diff) self.last_pc = self.last_pc + perc_diff # quasi-real-time check and wait (except for the last step) if config.qrt and self.h > 0: rt_end = self.qrt_start + self.h * config.kqrt # if the ending time has passed t_overrun = time.time() - rt_end if t_overrun > 0: logger.debug( 'Simulation over-run for %4.4g msec at t=%4.4g s.', 1000 * t_overrun, dae.t) else: self.headroom += (rt_end - time.time()) while time.time() - rt_end < 0: time.sleep(1e-4) self.qrt_start = time.time() else: if self.calc_h() == 0: self.err_msg = "Time step reduced to zero. Convergence is not likely." self.busted = True break if self.busted: logger.error(self.err_msg) logger.error("Simulation terminated at t=%.4f s.", system.dae.t) system.exit_code += 1 elif system.dae.t == self.config.tf: succeed = True # success flag system.exit_code += 0 self.pbar.update(100 - self.last_pc) else: system.exit_code += 1 # removed `pbar` so that System object can be serialized self.pbar.close() self.pbar = None t1, s1 = elapsed(t0) self.exec_time = t1 - t0 logger.info('Simulation to t=%.2f sec completed in %s.', config.tf, s1) if config.qrt: logger.debug('QRT headroom time: %.4g s.', self.headroom) # in case of resumed simulations, # manually unpack data to update arrays in `dae.ts` # disable warning in case data has just been dumped system.dae.ts.unpack(warn_empty=False) if (not system.files.no_output) and (config.save_mode == 'auto'): t0, _ = elapsed() self.save_output() _, s1 = elapsed(t0) np_file = self.system.files.npz logger.info('Outputs to "%s" and "%s".', self.system.files.lst, np_file) logger.info('Outputs written in %s.', s1) # end data streaming if system.config.dime_enabled: system.streaming.finalize() # load data into `TDS.plotter` in a notebook or in an interactive mode if is_notebook() or is_interactive(): self.load_plotter() return succeed
def run(filename, input_path='', verbose=20, mp_verbose=30, ncpu=os.cpu_count(), pool=False, cli=False, codegen=False, shell=False, **kwargs): """ Entry point to run ANDES routines. Parameters ---------- filename : str file name (or pattern) input_path : str, optional input search path verbose : int, 10 (DEBUG), 20 (INFO), 30 (WARNING), 40 (ERROR), 50 (CRITICAL) Verbosity level mp_verbose : int Verbosity level for multiprocessing tasks ncpu : int, optional Number of cpu cores to use in parallel pool: bool, optional Use Pool for multiprocessing to return a list of created Systems. kwargs Other supported keyword arguments cli : bool, optional If is running from command-line. If True, returns exit code instead of System codegen : bool, optional Run full code generation for System before loading case. Only used for single test case. shell : bool, optional If True, enter IPython shell after routine. Returns ------- System or exit_code An instance of system (if `cli == False`) or an exit code otherwise.. """ if is_interactive(): config_logger(file=False, stream_level=verbose) # put `input_path` back to `kwargs` kwargs['input_path'] = input_path cases = _find_cases(filename, input_path) system = None ex_code = 0 if len(filename) > 0 and len(cases) == 0: ex_code = 1 # file specified but not found t0, _ = elapsed() if len(cases) == 1: system = run_case(cases[0], codegen=codegen, **kwargs) elif len(cases) > 1: # suppress logging output during multiprocessing logger.info('-> Processing %s jobs on %s CPUs.', len(cases), ncpu) set_logger_level(logger, logging.StreamHandler, mp_verbose) set_logger_level(logger, logging.FileHandler, logging.DEBUG) kwargs['no_pbar'] = True if pool is True: system = _run_multiprocess_pool(cases, ncpu=ncpu, verbose=verbose, mp_verbose=mp_verbose, **kwargs) else: system = _run_multiprocess_proc(cases, ncpu=ncpu, verbose=verbose, mp_verbose=mp_verbose, **kwargs) # restore command line output when all jobs are done set_logger_level(logger, logging.StreamHandler, verbose) log_files = find_log_path(logger) if len(log_files) > 0: log_paths = '\n'.join(log_files) print(f'Log saved to "{log_paths}".') t0, s0 = elapsed(t0) if len(cases) == 1: if system is not None: ex_code += system.exit_code else: ex_code += 1 elif len(cases) > 1: if isinstance(system, list): for s in system: ex_code += s.exit_code if len(cases) == 1: if ex_code == 0: print(f'-> Single process finished in {s0}.') else: print(f'-> Single process exit with an error in {s0}.') elif len(cases) > 1: if ex_code == 0: print(f'-> Multiprocessing finished in {s0}.') else: print(f'-> Multiprocessing exit with an error in {s0}.') # IPython interactive shell if shell is True: try: from IPython import embed # load plotter before entering IPython if system is None: logger.warning( "IPython: The System object has not been created.") elif isinstance(system, System): logger.info( "IPython: Access System object in variable `system`.") system.TDS.load_plotter() elif isinstance(system, list): logger.warning( "IPython: System objects stored in list `system`.\n" "Call `TDS.load_plotter()` on each for plotter.") embed() except ImportError: logger.warning("IPython import error. Installed?") if cli is True: return ex_code return system
def config_logger( stream=True, file=True, stream_level=logging.INFO, log_file='andes.log', log_path=None, file_level=logging.DEBUG, ): """ Configure a logger for the andes package with options for a `FileHandler` and a `StreamHandler`. This function is called at the beginning of ``andes.main.main()``. Parameters ---------- stream : bool, optional Create a `StreamHandler` for `stdout` if ``True``. If ``False``, the handler will not be created. file : bool, optionsl True if logging to ``log_file``. log_file : str, optional Logg file name for `FileHandler`, ``'andes.log'`` by default. If ``None``, the `FileHandler` will not be created. log_path : str, optional Path to store the log file. By default, the path is generated by get_log_dir() in utils.misc. stream_level : {10, 20, 30, 40, 50}, optional `StreamHandler` verbosity level. file_level : {10, 20, 30, 40, 50}, optional `FileHandler` verbosity level. Returns ------- None """ lg = logging.getLogger('andes') lg.setLevel(logging.DEBUG) if log_path is None: log_path = get_log_dir() sh_formatter_str = '%(message)s' if stream_level == 1: sh_formatter_str = '%(name)s:%(lineno)d - %(levelname)s - %(message)s' stream_level = 10 sh_formatter = logging.Formatter(sh_formatter_str) if len(lg.handlers) == 0: if stream is True: sh = logging.StreamHandler() sh.setFormatter(sh_formatter) sh.setLevel(stream_level) lg.addHandler(sh) # file handler for level DEBUG and up if file is True and (log_file is not None): log_full_path = os.path.join(log_path, log_file) fh_formatter = logging.Formatter( '%(process)d: %(asctime)s - %(name)s - %(levelname)s - %(message)s' ) fh = logging.FileHandler(log_full_path) fh.setLevel(file_level) fh.setFormatter(fh_formatter) lg.addHandler(fh) globals()['logger'] = lg if not is_interactive(): coloredlogs.install(logger=lg, level=stream_level, fmt=sh_formatter_str)
def run(filename, input_path='', verbose=20, mp_verbose=30, ncpu=os.cpu_count(), pool=False, **kwargs): """ Entry point to run ANDES routines. Parameters ---------- filename : str file name (or pattern) input_path : str, optional input search path verbose : int, 10 (DEBUG), 20 (INFO), 30 (WARNING), 40 (ERROR), 50 (CRITICAL) Verbosity level mp_verbose : int Verbosity level for multiprocessing tasks ncpu : int, optional Number of cpu cores to use in parallel pool: bool, optional Use Pool for multiprocessing to return a list of created Systems. kwargs Other supported keyword arguments Returns ------- System An instance """ if is_interactive(): config_logger(file=False, stream_level=verbose) cases = _find_cases(filename, input_path) system = None t0, _ = elapsed() if len(cases) == 1: system = run_case(cases[0], **kwargs) elif len(cases) > 1: # suppress logging output during multiprocessing logger.info('-> Processing {} jobs on {} CPUs.'.format(len(cases), ncpu)) set_logger_level(logger, logging.StreamHandler, mp_verbose) set_logger_level(logger, logging.FileHandler, logging.DEBUG) kwargs['disable_pbar'] = True if pool is True: system = _run_multiprocess_pool(cases, ncpu=ncpu, verbose=verbose, mp_verbose=mp_verbose, **kwargs) else: system = _run_multiprocess_proc(cases, ncpu=ncpu, verbose=verbose, mp_verbose=mp_verbose, **kwargs) # restore command line output when all jobs are done set_logger_level(logger, logging.StreamHandler, verbose) log_files = find_log_path(logger) if len(log_files) > 0: log_paths = '\n'.join(log_files) print(f'Log saved to "{log_paths}".') t0, s0 = elapsed(t0) if len(cases) == 1: logger.info(f'-> Single process finished in {s0}.') elif len(cases) >= 2: print(f'-> Multiprocessing finished in {s0}.') return system