def func_wrapper(obj, func, timeout, args=None, name=''): """Expect obj to be an instance of Mapdl""" complete = [False] @threaded_daemon def run(name=''): if args is not None: if isinstance(args, (tuple, list)): results.append(func(obj, *args)) else: results.append(func(obj, args)) else: results.append(func(obj)) complete[0] = True run_thread = run(name='map.run') if timeout: tstart = time.time() while not complete[0]: time.sleep(0.01) if (time.time() - tstart) > timeout: break if not complete[0]: LOG.error("Killed instance due to timeout of %f seconds", timeout) obj.exit() else: run_thread.join() if not complete[0]: try: obj.exit() except: pass # ensure that the directory is cleaned up if obj._cleanup: # allow MAPDL to die time.sleep(5) if os.path.isdir(obj.directory): try: shutil.rmtree(obj.directory) except Exception as e: LOG.warning( "Unable to remove directory at %s:\n%s", obj.directory, str(e), ) obj.locked = False if pbar: pbar.update(1)
def _monitor_pool(self, refresh=1.0, name=''): """Checks if instances within a pool have exited (failed) and restarts them. """ while self._active: for index, instance in enumerate(self._instances): if not instance: # encountered placeholder continue if instance._exited: try: # use the next port after the current available port port = max(self._ports) + 1 self._spawn_mapdl(index, port=port, name=f'Instance {index}').join() except Exception as e: LOG.error(e, exc_info=True) time.sleep(refresh)
def test_log_to_file(tmpdir): """Testing writing to log file. Since the default loglevel of LOG is error, debug are not normally recorded to it. """ file_path = os.path.join(tmpdir, 'instance.log') file_msg_error = 'This is a error message' file_msg_debug = 'This is a debug message' # The LOG loglevel is changed in previous test, # hence making sure now it is the "default" one. LOG.logger.setLevel('ERROR') LOG.std_out_handler.setLevel('ERROR') if not LOG.file_handler: LOG.log_to_file(file_path) LOG.error(file_msg_error) LOG.debug(file_msg_debug) with open(file_path, 'r') as fid: text = ''.join(fid.readlines()) assert file_msg_error in text assert file_msg_debug not in text assert 'ERROR' in text assert 'DEBUG' not in text LOG.logger.setLevel('DEBUG') for each_handler in LOG.logger.handlers: each_handler.setLevel('DEBUG') file_msg_debug = "This debug message should be recorded." LOG.debug(file_msg_debug) with open(file_path, 'r') as fid: text = ''.join(fid.readlines()) assert file_msg_debug in text
def map( self, func, iterable=None, progress_bar=True, close_when_finished=False, timeout=None, wait=True, ): """Run a function for each instance of mapdl within the pool Parameters ---------- func : function User function with an instance of ``mapdl`` as the first argument. The remaining arguments should match the number of items in each iterable (if any). iterable : list, tuple, optional An iterable containing a set of arguments for ``func``. If None, will run ``func`` once for each instance of mapdl. progress_bar : bool, optional Show a progress bar when running the batch. Defaults to ``True``. close_when_finished : bool, optional Exit the MAPDL instances when the pool is finished. Default ``False``. timeout : float, optional Maximum runtime in seconds for each iteration. If ``None``, no timeout. If specified, each iteration will be only allowed to run ``timeout`` seconds, and then killed and treated as a failure. wait : bool, optional Block execution until the batch is complete. Default ``True``. Returns ------- list A list containing the return values for ``func``. Failed runs will not return an output. Since the returns are not necessarily in the same order as ``iterable``, you may want to add some sort of tracker to the return of your user function``func``. Examples -------- Run several input files while storing the final routine. Note how the user function to be mapped must use ``mapdl`` as the first argument. The function can have any number of additional arguments. >>> completed_indices = [] >>> def func(mapdl, input_file, index): # input_file, index = args mapdl.clear() output = mapdl.input(input_file) completed_indices.append(index) return mapdl.parameters.routine >>> inputs = [(examples.vmfiles['vm%d' % i], i) for i in range(1, 10)] >>> output = pool.map(func, inputs, progress_bar=True, wait=True) ['Begin level', 'Begin level', 'Begin level', 'Begin level', 'Begin level', 'Begin level', 'Begin level', 'Begin level', 'Begin level'] """ # check if any instances are available if not len(self): # instances could still be spawning... if not all(v is None for v in self._instances): raise RuntimeError("No MAPDL instances available.") results = [] if iterable is not None: n = len(iterable) else: n = len(self) pbar = None if progress_bar: pbar = tqdm(total=n, desc="MAPDL Running") @threaded_daemon def func_wrapper(obj, func, timeout, args=None, name=''): """Expect obj to be an instance of Mapdl""" complete = [False] @threaded_daemon def run(name=''): if args is not None: if isinstance(args, (tuple, list)): results.append(func(obj, *args)) else: results.append(func(obj, args)) else: results.append(func(obj)) complete[0] = True run_thread = run(name='map.run') if timeout: tstart = time.time() while not complete[0]: time.sleep(0.01) if (time.time() - tstart) > timeout: break if not complete[0]: LOG.error("Killed instance due to timeout of %f seconds", timeout) obj.exit() else: run_thread.join() if not complete[0]: try: obj.exit() except: pass # ensure that the directory is cleaned up if obj._cleanup: # allow MAPDL to die time.sleep(5) if os.path.isdir(obj.directory): try: shutil.rmtree(obj.directory) except Exception as e: LOG.warning( "Unable to remove directory at %s:\n%s", obj.directory, str(e), ) obj.locked = False if pbar: pbar.update(1) threads = [] if iterable is not None: threads = [] for args in iterable: # grab the next available instance of mapdl instance = self.next_available() instance.locked = True threads.append( func_wrapper(instance, func, timeout, args, name='Map_Thread')) if close_when_finished: # start closing any instances that are not in execution while not all(v is None for v in self._instances): # grab the next available instance of mapdl and close it instance, i = self.next_available(return_index=True) self._instances[i] = None try: instance.exit() except Exception as e: LOG.error("Failed to close instance", exc_info=True) else: # wait for all threads to complete if wait: [thread.join() for thread in threads] else: # simply apply to all for instance in self._instances: if instance: threads.append(func_wrapper(instance, func, timeout)) # wait for all threads to complete if wait: [thread.join() for thread in threads] return results
def test_global_methods(caplog): LOG.logger.setLevel('DEBUG') LOG.std_out_handler.setLevel('DEBUG') msg = f'This is a debug message' LOG.debug(msg) assert msg in caplog.text msg = f'This is an info message' LOG.info(msg) assert msg in caplog.text msg = f'This is a warning message' LOG.warning(msg) assert msg in caplog.text msg = f'This is an error message' LOG.error(msg) assert msg in caplog.text msg = f'This is a critical message' LOG.critical(msg) assert msg in caplog.text msg = f'This is a 30 message using "log"' LOG.log(30, msg) assert msg in caplog.text
def launch_mapdl( exec_file=None, run_location=None, jobname="file", nproc=2, ram=None, mode=None, override=False, loglevel="ERROR", additional_switches="", start_timeout=120, port=None, cleanup_on_exit=True, start_instance=True, ip=LOCALHOST, clear_on_connect=True, log_apdl=False, verbose_mapdl=False, license_server_check=True, license_type=None, **kwargs, ): """Start MAPDL locally in gRPC mode. Parameters ---------- exec_file : str, optional The location of the MAPDL executable. Will use the cached location when left at the default ``None``. run_location : str, optional MAPDL working directory. Defaults to a temporary working directory. If directory doesn't exist, will create one. jobname : str, optional MAPDL jobname. Defaults to ``'file'``. nproc : int, optional Number of processors. Defaults to 2. ram : float, optional Fixed amount of memory to request for MAPDL. If ``None``, then MAPDL will use as much as available on the host machine. mode : str, optional Mode to launch MAPDL. Must be one of the following: - ``'grpc'`` - ``'corba'`` - ``'console'`` The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and provides the best performance and stability. The ``'corba'`` mode is available from v17.0 and newer and is given legacy support. This mode requires the additional ``ansys_corba`` module. Finally, the ``'console'`` mode is for legacy use only Linux only prior to v17.0. This console mode is pending depreciation. override : bool, optional Attempts to delete the lock file at the run_location. Useful when a prior MAPDL session has exited prematurely and the lock file has not been deleted. loglevel : str, optional Sets which messages are printed to the console. ``'INFO'`` prints out all ANSYS messages, ``'WARNING``` prints only messages containing ANSYS warnings, and ``'ERROR'`` logs only error messages. additional_switches : str, optional Additional switches for MAPDL, for example ``'aa_r'``, the academic research license, would be added with: - ``additional_switches="-aa_r"`` Avoid adding switches like -i -o or -b as these are already included to start up the MAPDL server. See the notes section for additional details. start_timeout : float, optional Maximum allowable time to connect to the MAPDL server. port : int Port to launch MAPDL gRPC on. Final port will be the first port available after (or including) this port. Defaults to 50052. You can also override the default behavior of this keyword argument with the environment variable ``PYMAPDL_PORT=<VALID PORT>`` custom_bin : str, optional Path to the MAPDL custom executable. On release 2020R2 on Linux, if ``None``, will check to see if you have ``ansys.mapdl_bin`` installed and use that executable. cleanup_on_exit : bool, optional Exit MAPDL when python exits or the mapdl Python instance is garbage collected. start_instance : bool, optional When False, connect to an existing MAPDL instance at ``ip`` and ``port``, which default to ``'127.0.0.1'`` at 50052. Otherwise, launch a local instance of MAPDL. You can also override the default behavior of this keyword argument with the environment variable ``PYMAPDL_START_INSTANCE=FALSE``. ip : bool, optional Used only when ``start_instance`` is ``False``. Defaults to ``'127.0.0.1'``. You can also override the default behavior of this keyword argument with the environment variable "PYMAPDL_IP=FALSE". clear_on_connect : bool, optional Used only when ``start_instance`` is ``False``. Defaults to ``True``, giving you a fresh environment when connecting to MAPDL. log_apdl : str, optional Enables logging every APDL command to the local disk. This can be used to "record" all the commands that are sent to MAPDL via PyMAPDL so a script can be run within MAPDL without PyMAPDL. remove_temp_files : bool, optional Removes temporary files on exit. Default ``False``. verbose_mapdl : bool, optional Enable printing of all output when launching and running MAPDL. This should be used for debugging only as output can be tracked within pymapdl. Default ``False``. license_server_check : bool, optional Check if the license server is available if MAPDL fails to start. Only available on ``mode='grpc'``. Defaults ``True``. license_type : str, optional Enable license type selection. You can input a string for its license name (for example ``'meba'`` or ``'ansys'``) or its description ("enterprise solver" or "enterprise" respectively). You can also use legacy licenses (for example ``'aa_t_a'``) but it will also raise a warning. If it is not used (``None``), no specific license will be requested, being up to the license server to provide a specific license type. Default is ``None``. Notes ----- These are the MAPDL switch options as of 2020R2 applicable for running MAPDL as a service via gRPC. Excluded switches such as ``"-j"`` either not applicable or are set via keyword arguments. -acc <device> : Enables the use of GPU hardware. See GPU Accelerator Capability in the Parallel Processing Guide for more information. -amfg : Enables the additive manufacturing capability. Requires an additive manufacturing license. For general information about this feature, see AM Process Simulation in ANSYS Workbench. -ansexe <executable> : Activates a custom mechanical APDL executable. In the ANSYS Workbench environment, activates a custom Mechanical APDL executable. -custom <executable> : Calls a custom Mechanical APDL executable See Running Your Custom Executable in the Programmer's Reference for more information. -db value : Initial memory allocation Defines the portion of workspace (memory) to be used as the initial allocation for the database. The default is 1024 MB. Specify a negative number to force a fixed size throughout the run; useful on small memory systems. -dis : Enables Distributed ANSYS See the Parallel Processing Guide for more information. -dvt : Enables ANSYS DesignXplorer advanced task (add-on). Requires DesignXplorer. -l <language> : Specifies a language file to use other than English This option is valid only if you have a translated message file in an appropriately named subdirectory in ``/ansys_inc/v201/ansys/docu`` or ``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu`` -m <workspace> : Specifies the total size of the workspace Workspace (memory) in megabytes used for the initial allocation. If you omit the ``-m`` option, the default is 2 GB (2048 MB). Specify a negative number to force a fixed size throughout the run. -machines <IP> : Specifies the distributed machines Machines on which to run a Distributed ANSYS analysis. See Starting Distributed ANSYS in the Parallel Processing Guide for more information. -mpi <value> : Specifies the type of MPI to use. See the Parallel Processing Guide for more information. -mpifile <appfile> : Specifies an existing MPI file Specifies an existing MPI file (appfile) to be used in a Distributed ANSYS run. See Using MPI Files in the Parallel Processing Guide for more information. -na <value>: Specifies the number of GPU accelerator devices Number of GPU devices per machine or compute node when running with the GPU accelerator feature. See GPU Accelerator Capability in the Parallel Processing Guide for more information. -name <value> : Defines Mechanical APDL parameters Set mechanical APDL parameters at program start-up. The parameter name must be at least two characters long. For details about parameters, see the ANSYS Parametric Design Language Guide. -p <productname> : ANSYS session product Defines the ANSYS session product that will run during the session. For more detailed information about the ``-p`` option, see Selecting an ANSYS Product via the Command Line. -ppf <license feature name> : HPC license Specifies which HPC license to use during a parallel processing run. See HPC Licensing in the Parallel Processing Guide for more information. -smp : Enables shared-memory parallelism. See the Parallel Processing Guide for more information. Examples -------- Launch MAPDL using the best protocol. >>> from ansys.mapdl.core import launch_mapdl >>> mapdl = launch_mapdl() Run MAPDL with shared memory parallel and specify the location of the ansys binary. >>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe' >>> mapdl = launch_mapdl(exec_file, additional_switches='-smp') Connect to an existing instance of MAPDL at IP 192.168.1.30 and port 50001. This is only available using the latest ``'grpc'`` mode. >>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30', port=50001) Force the usage of the CORBA protocol. >>> mapdl = launch_mapdl(mode='corba') Run MAPDL using the console mode (available only on Linux). >>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194', mode='console') """ # These parameters are partially used for unit testing set_no_abort = kwargs.get("set_no_abort", True) ip = os.environ.get("PYMAPDL_IP", ip) if "PYMAPDL_PORT" in os.environ: port = int(os.environ.get("PYMAPDL_PORT")) if port is None: port = MAPDL_DEFAULT_PORT # connect to an existing instance if enabled if not get_start_instance(start_instance): mapdl = MapdlGrpc( ip=ip, port=port, cleanup_on_exit=False, loglevel=loglevel, set_no_abort=set_no_abort, ) if clear_on_connect: mapdl.clear() return mapdl # verify executable if exec_file is None: # Load cached path exec_file = get_ansys_path() if exec_file is None: raise FileNotFoundError( "Invalid exec_file path or cannot load cached " "mapdl path. Enter one manually by specifying " "exec_file=") else: # verify ansys exists at this location if not os.path.isfile(exec_file): raise FileNotFoundError( f'Invalid MAPDL executable at "{exec_file}"\n' "Enter one manually using exec_file=") # verify run location if run_location is None: temp_dir = tempfile.gettempdir() run_location = os.path.join(temp_dir, "ansys_%s" % random_string(10)) if not os.path.isdir(run_location): try: os.mkdir(run_location) except: raise RuntimeError("Unable to create the temporary working " f'directory "{run_location}"\n' "Please specify run_location=") else: if not os.path.isdir(run_location): raise FileNotFoundError( f'"{run_location}" is not a valid directory') # verify no lock file and the mode is valid check_lock_file(run_location, jobname, override) mode = check_mode(mode, _version_from_path(exec_file)) # cache start parameters additional_switches = _validate_add_sw(additional_switches, exec_file, kwargs.pop("force_intel", False)) if isinstance(license_type, str): # In newer license server versions an invalid license name just get discarded and produces no effect or warning. # For example: # ```bash # mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES. # mapdl.exe -p yoyoyo # The -p flag is ignored and it run the default license. # ``` # # In older versions probably it might raise an error. But not sure. license_type = license_type.lower().strip() if 'enterprise' in license_type and 'solver' not in license_type: license_type = 'ansys' elif 'enterprise' in license_type and 'solver' in license_type: license_type = 'meba' elif 'premium' in license_type: license_type = 'mech_2' elif 'pro' in license_type: license_type = 'mech_1' elif license_type not in ALLOWABLE_LICENSES: allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES] warn_text = \ f"The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n" + \ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n" + \ f"Recognized license names: {' '.join(allow_lics)}" warnings.warn(warn_text, UserWarning) additional_switches += ' -p ' + license_type LOG.debug( f"Using specified license name '{license_type}' in the 'license_type' keyword argument." ) elif '-p ' in additional_switches: # There is already a license request in additional switches. license_type = re.findall( r'-p \b(\w*)', additional_switches)[0] # getting only the first product license. if license_type not in ALLOWABLE_LICENSES: allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES] warn_text = \ f"The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n" + \ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n" + \ f"Recognized license names: {' '.join(allow_lics)}" warnings.warn(warn_text, UserWarning) LOG.warning(warn_text) LOG.debug( f"Using specified license name '{license_type}' in the additional switches parameter." ) elif license_type is not None: raise TypeError( "The argument 'license_type' does only accept str or None.") start_parm = { "exec_file": exec_file, "run_location": run_location, "additional_switches": additional_switches, "jobname": jobname, "nproc": nproc, } if mode in ["console", "corba"]: start_parm["start_timeout"] = start_timeout else: start_parm["ram"] = ram start_parm["override"] = override start_parm["timeout"] = start_timeout # Check the license server if license_server_check: # configure timeout to be 90% of the wait time of the startup # time for Ansys. lic_check = LicenseChecker(timeout=start_timeout * 0.9, verbose=verbose_mapdl) lic_check.start() try: if mode == "console": from ansys.mapdl.core.mapdl_console import MapdlConsole mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm) elif mode == "corba": try: # pending deprication to ansys-mapdl-corba from ansys.mapdl.core.mapdl_corba import MapdlCorba except ImportError: raise ImportError( "To use this feature, install the MAPDL CORBA package" " with:\n\npip install ansys_corba") from None broadcast = kwargs.get("log_broadcast", False) mapdl = MapdlCorba( loglevel=loglevel, log_apdl=log_apdl, log_broadcast=broadcast, verbose=verbose_mapdl, **start_parm, ) elif mode == "grpc": port, actual_run_location = launch_grpc(port=port, verbose=verbose_mapdl, ip=ip, **start_parm) mapdl = MapdlGrpc( ip=ip, port=port, cleanup_on_exit=cleanup_on_exit, loglevel=loglevel, set_no_abort=set_no_abort, remove_temp_files=kwargs.pop("remove_temp_files", False), log_apdl=log_apdl, **start_parm, ) if run_location is None: mapdl._path = actual_run_location except Exception as exception: # Failed to launch for some reason. Check if failure was due # to the license check if license_server_check: lic_check.check() # pass raise exception return mapdl
def handler(sig, frame): # pragma: no cover """Pass signal to custom interrupt handler.""" logger.info("KeyboardInterrupt received. Waiting until MAPDL " "execution finishes") SIGINT_TRACKER.append(True)