def comment_show(user, nodes): """Show the comments of one or multiple nodes.""" for node in nodes: msg = '* Comments for Node<{}>'.format(node.pk) echo.echo('*' * len(msg)) echo.echo(msg) echo.echo('*' * len(msg)) all_comments = node.get_comments() if user is not None: comments = [comment for comment in all_comments if comment.user.email == user.email] if not comments: valid_users = ', '.join(set(comment.user.email for comment in all_comments)) echo.echo_warning('no comments found for user {}'.format(user)) echo.echo_info('valid users found for Node<{}>: {}'.format(node.pk, valid_users)) else: comments = all_comments for comment in comments: comment_msg = [ 'Comment<{}> for Node<{}> by {}'.format(comment.id, node.pk, comment.user.email), 'Created on {}'.format(timezone.localtime(comment.ctime).strftime('%Y-%m-%d %H:%M')), 'Last modified on {}'.format(timezone.localtime(comment.mtime).strftime('%Y-%m-%d %H:%M')), '\n{}\n'.format(comment.content), ] echo.echo('\n'.join(comment_msg)) if not comments: echo.echo_info('no comments found')
def computer_duplicate(ctx, computer, non_interactive, **kwargs): """Duplicate a computer allowing to change some parameters.""" from aiida import orm from aiida.orm.utils.builders.computer import ComputerBuilder if kwargs['label'] in get_computer_names(): echo.echo_critical('A computer called {} already exists'.format(kwargs['label'])) kwargs['transport'] = kwargs['transport'].name kwargs['scheduler'] = kwargs['scheduler'].name computer_builder = ctx.computer_builder for key, value in kwargs.items(): if value is not None: setattr(computer_builder, key, value) try: computer = computer_builder.new() except (ComputerBuilder.ComputerValidationError, ValidationError) as e: echo.echo_critical('{}: {}'.format(type(e).__name__, e)) else: echo.echo_success('stored computer {}<{}>'.format(computer.label, computer.pk)) try: computer.store() except ValidationError as err: echo.echo_critical('unable to store the computer: {}. Exiting...'.format(err)) else: echo.echo_success('Computer<{}> {} created'.format(computer.pk, computer.label)) is_configured = computer.is_user_configured(orm.User.objects.get_default()) if not is_configured: echo.echo_info('Note: before the computer can be used, it has to be configured with the command:') echo.echo_info(' verdi computer configure {} {}'.format(computer.transport_type, computer.label))
def setup_code(non_interactive, **kwargs): """Add a Code.""" from aiida.common.exceptions import ValidationError if not non_interactive: pre, post = ensure_scripts(kwargs.pop('prepend_text', ''), kwargs.pop('append_text', ''), kwargs) kwargs['prepend_text'] = pre kwargs['append_text'] = post if kwargs.pop('on_computer'): kwargs['code_type'] = CodeBuilder.CodeType.ON_COMPUTER else: kwargs['code_type'] = CodeBuilder.CodeType.STORE_AND_UPLOAD code_builder = CodeBuilder(**kwargs) code = code_builder.new() try: code.store() code.reveal() # newly setup code shall not be hidden except ValidationError as err: echo.echo_critical( 'unable to store the code: {}. Exiting...'.format(err)) echo.echo_success('code "{}" stored in DB.'.format(code.label)) echo.echo_info('pk: {}, uuid: {}'.format(code.pk, code.uuid))
def _gather_imports(archives, webpages) -> List[Tuple[str, bool]]: """Gather archives to import and sort into local files and URLs. :returns: list of (archive path, whether it is web based) """ from aiida.tools.importexport.common.utils import get_valid_import_links final_archives = [] # Build list of archives to be imported for archive in archives: if archive.startswith('http://') or archive.startswith('https://'): final_archives.append((archive, True)) else: final_archives.append((archive, False)) # Discover and retrieve *.aiida files at URL(s) if webpages is not None: for webpage in webpages: try: echo.echo_info(f'retrieving archive URLS from {webpage}') urls = get_valid_import_links(webpage) except Exception as error: echo.echo_critical( f'an exception occurred while trying to discover archives at URL {webpage}:\n{error}' ) else: echo.echo_success(f'{len(urls)} archive URLs discovered and added') final_archives.extend([(u, True) for u in urls]) return final_archives
def detect_duplicate_uuid(table, apply_patch): """Detect and fix entities with duplicate UUIDs. Before aiida-core v1.0.0, there was no uniqueness constraint on the UUID column of the node table in the database and a few other tables as well. This made it possible to store multiple entities with identical UUIDs in the same table without the database complaining. This bug was fixed in aiida-core=1.0.0 by putting an explicit uniqueness constraint on UUIDs on the database level. However, this would leave databases created before this patch with duplicate UUIDs in an inconsistent state. This command will run an analysis to detect duplicate UUIDs in a given table and solve it by generating new UUIDs. Note that it will not delete or merge any rows. """ from aiida.manage.database.integrity.duplicate_uuid import deduplicate_uuids from aiida.manage.manager import get_manager manager = get_manager() manager._load_backend(schema_check=False) # pylint: disable=protected-access try: messages = deduplicate_uuids(table=table, dry_run=not apply_patch) except Exception as exception: # pylint: disable=broad-except echo.echo_critical('integrity check failed: {}'.format(str(exception))) else: for message in messages: echo.echo_info(message) if apply_patch: echo.echo_success('integrity patch completed') else: echo.echo_success('dry-run of integrity patch completed')
def _show_vesta(exec_name, structure_list): """ Plugin for VESTA This VESTA plugin was added by Yue-Wen FANG and Abel Carreras at Kyoto University in the group of Prof. Isao Tanaka's lab """ import tempfile import subprocess # pylint: disable=protected-access with tempfile.NamedTemporaryFile(mode='w+b', suffix='.cif') as tmpf: for structure in structure_list: tmpf.write(structure._exportcontent('cif')[0]) tmpf.flush() try: subprocess.check_output([exec_name, tmpf.name]) except subprocess.CalledProcessError: # The program died: just print a message echo.echo_info( 'the call to {} ended with an error.'.format(exec_name)) except OSError as err: if err.errno == 2: echo.echo_critical( "No executable '{}' found. Add to the path, " 'or try with an absolute path.'.format(exec_name)) else: raise
def print_last_process_state_change(process_type=None): """ Print the last time that a process of the specified type has changed its state. This function will also print a warning if the daemon is not running. :param process_type: optional process type for which to get the latest state change timestamp. Valid process types are either 'calculation' or 'work'. """ from aiida.cmdline.utils.echo import echo_info, echo_warning from aiida.common import timezone from aiida.common.utils import str_timedelta from aiida.engine.daemon.client import get_daemon_client from aiida.engine.utils import get_process_state_change_timestamp client = get_daemon_client() timestamp = get_process_state_change_timestamp(process_type) if timestamp is None: echo_info('last time an entry changed state: never') else: timedelta = timezone.delta(timestamp, timezone.now()) formatted = format_local_time(timestamp, format_str='at %H:%M:%S on %Y-%m-%d') relative = str_timedelta(timedelta, negative_to_zero=True, max_num_fields=1) echo_info('last time an entry changed state: {} ({})'.format( relative, formatted)) if not client.is_daemon_running: echo_warning('the daemon is not running', bold=True)
def work_plugins(entry_point): """ Print a list of registered workflow plugins or details of a specific workflow plugin """ from aiida.common.exceptions import LoadingPluginFailed, MissingPluginError from aiida.plugins.entry_point import get_entry_point_names, load_entry_point if entry_point: try: plugin = load_entry_point('aiida.workflows', entry_point) except (LoadingPluginFailed, MissingPluginError) as exception: echo.echo_critical(exception) else: echo.echo_info(entry_point) echo.echo(plugin.get_description()) else: entry_points = get_entry_point_names('aiida.workflows') if entry_points: echo.echo('Registered workflow entry points:') for registered_entry_point in entry_points: echo.echo("* {}".format(registered_entry_point)) echo.echo('') echo.echo_info( 'Pass the entry point as an argument to display detailed information' ) else: echo.echo_error('No workflow plugins found')
def workflow_list(short, all_states, depth, past_days, workflows): """List legacy workflows""" from aiida.backends.utils import get_workflow_list from aiida.orm.workflow import get_workflow_info from aiida.orm.backend import construct_backend # pylint: disable=no-name-in-module tab_size = 2 backend = construct_backend() current_user = backend.users.get_automatic_user() wf_list = get_workflow_list([workflow.pk for workflow in workflows], user=current_user, all_states=all_states, n_days_ago=past_days) for workflow in wf_list: if not workflow.is_subworkflow() or workflow in workflows: echo.echo('\n'.join( get_workflow_info(workflow, tab_size=tab_size, short=short, depth=depth))) if not workflows: if all_states: echo.echo_info('# No workflows found') else: echo.echo_info('# No running workflows found')
def _show_xcrysden(exec_name, object_list, **kwargs): """ Plugin for xcrysden """ import tempfile import subprocess if len(object_list) > 1: raise MultipleObjectsError( 'Visualization of multiple trajectories is not implemented') obj = object_list[0] # pylint: disable=protected-access with tempfile.NamedTemporaryFile(mode='w+b', suffix='.xsf') as tmpf: tmpf.write(obj._exportcontent('xsf', **kwargs)[0]) tmpf.flush() try: subprocess.check_output([exec_name, '--xsf', tmpf.name]) except subprocess.CalledProcessError: # The program died: just print a message echo.echo_info( 'the call to {} ended with an error.'.format(exec_name)) except OSError as err: if err.errno == 2: echo.echo_critical( "No executable '{}' found. Add to the path, " 'or try with an absolute path.'.format(exec_name)) else: raise
def _show_jmol(exec_name, trajectory_list, **kwargs): """ Plugin for jmol """ import tempfile import subprocess # pylint: disable=protected-access with tempfile.NamedTemporaryFile(mode='w+b') as handle: for trajectory in trajectory_list: handle.write(trajectory._exportcontent('cif', **kwargs)[0]) handle.flush() try: subprocess.check_output([exec_name, handle.name]) except subprocess.CalledProcessError: # The program died: just print a message echo.echo_info( 'the call to {} ended with an error.'.format(exec_name)) except OSError as err: if err.errno == 2: echo.echo_critical( "No executable '{}' found. Add to the path, " 'or try with an absolute path.'.format(exec_name)) else: raise
def computer_setup(ctx, non_interactive, **kwargs): """Create a new computer.""" from aiida.orm.utils.builders.computer import ComputerBuilder if kwargs['label'] in get_computer_names(): echo.echo_critical( 'A computer called {c} already exists. ' 'Use "verdi computer duplicate {c}" to set up a new ' 'computer starting from the settings of {c}.'.format( c=kwargs['label'])) kwargs['transport'] = kwargs['transport'].name kwargs['scheduler'] = kwargs['scheduler'].name computer_builder = ComputerBuilder(**kwargs) try: computer = computer_builder.new() except (ComputerBuilder.ComputerValidationError, ValidationError) as e: echo.echo_critical(f'{type(e).__name__}: {e}') try: computer.store() except ValidationError as err: echo.echo_critical(f'unable to store the computer: {err}. Exiting...') else: echo.echo_success(f'Computer<{computer.pk}> {computer.label} created') echo.echo_info( 'Note: before the computer can be used, it has to be configured with the command:' ) echo.echo_info( f' verdi computer configure {computer.transport_type} {computer.label}' )
def cmd_mark(force): """Mark all `StructureData` nodes with partial occupancies. A `StructureData` is said to have partial occupancies if any of its kinds contain more than one symbol (an alloy) or the total weight is not unity (vacancies). Any `StructureData` that matches this definition will get an extra set with the name `partial_occupancies=True`, otherwise it will be `False`. The reason for setting this extra also on structures that do not have partial occupancies, is to make it easy in the future which structures have already been considered from marking. When the command is then executed only those structures without mark have to be considered. """ from aiida.manage.manager import get_manager builder = get_unmarked_structure_builder() unmarked = builder.count() echo.echo_info('found {} unmarked structures'.format(unmarked)) if not unmarked: echo.echo_success('nothing to be done') return total = 0 with click.progressbar(label='Marking structures', length=unmarked, show_pos=True) as progress: with get_manager().get_backend().transaction(): for [structure] in builder.iterall(): partial_occupancies = structure.is_alloy or structure.has_vacancies structure.set_extra(KEY_PARTIAL_OCCUPANCIES, partial_occupancies) if partial_occupancies: total += 1 progress.update(1) echo.echo_success('marked {} structures as containing partial occupancies'.format(total))
def profile_list(): """Display a list of all available profiles.""" try: config = get_config() except (exceptions.MissingConfigurationError, exceptions.ConfigurationError) as exception: # This can happen for a fresh install and the `verdi setup` has not yet been run. In this case it is still nice # to be able to see the configuration directory, for instance for those who have set `AIIDA_PATH`. This way # they can at least verify that it is correctly set. from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER echo.echo_info('configuration folder: {}'.format(AIIDA_CONFIG_FOLDER)) echo.echo_critical(str(exception)) else: echo.echo_info('configuration folder: {}'.format(config.dirpath)) if not config.profiles: echo.echo_warning( 'no profiles configured: run `verdi setup` to create one') else: sort = lambda profile: profile.name highlight = lambda profile: profile.name == config.default_profile_name echo.echo_formatted_list(config.profiles, ['name'], sort=sort, highlight=highlight)
def _show_vmd(exec_name, structure_list): """ Plugin for vmd """ import tempfile import subprocess if len(structure_list) > 1: raise MultipleObjectsError("Visualization of multiple objects " "is not implemented") structure = structure_list[0] # pylint: disable=protected-access with tempfile.NamedTemporaryFile(suffix='.xsf') as tmpf: tmpf.write(structure._exportstring('xsf')[0]) tmpf.flush() try: subprocess.check_output([exec_name, tmpf.name]) except subprocess.CalledProcessError: # The program died: just print a message echo.echo_info( "the call to {} ended with an error.".format(exec_name)) except OSError as err: if err.errno == 2: echo.echo_critical( "No executable '{}' found. Add to the path, " "or try with an absolute path.".format(exec_name)) else: raise
def cmd_list(version, functional, protocol, project, raw): """List installed configurations of the SSSP.""" from tabulate import tabulate mapping_project = { 'count': lambda family: family.count(), 'version': lambda family: family.label.split('/')[1], 'functional': lambda family: family.label.split('/')[2], 'protocol': lambda family: family.label.split('/')[3], } rows = [] for [group] in get_sssp_families_builder(version, functional, protocol).iterall(): row = [] for projection in project: try: projected = mapping_project[projection](group) except KeyError: projected = getattr(group, projection) row.append(projected) rows.append(row) if not rows: echo.echo_info('SSSP has not yet been installed: use `aiida-sssp install` to install it.') return if raw: echo.echo(tabulate(rows, disable_numparse=True, tablefmt='plain')) else: echo.echo(tabulate(rows, headers=[projection.capitalize() for projection in project], disable_numparse=True))
def profile_show(profile): """Show details for a profile.""" if profile is None: echo.echo_critical('no profile to show') echo.echo_info('Configuration for: {}'.format(profile.name)) data = sorted([(k.lower(), v) for k, v in profile.dictionary.items()]) echo.echo(tabulate.tabulate(data))
def export_workflow_data(apps, _): """Export existing legacy workflow data to a JSON file.""" from tempfile import NamedTemporaryFile DbWorkflow = apps.get_model('db', 'DbWorkflow') DbWorkflowData = apps.get_model('db', 'DbWorkflowData') DbWorkflowStep = apps.get_model('db', 'DbWorkflowStep') count_workflow = DbWorkflow.objects.count() count_workflow_data = DbWorkflowData.objects.count() count_workflow_step = DbWorkflowStep.objects.count() # Nothing to do if all tables are empty if count_workflow == 0 and count_workflow_data == 0 and count_workflow_step == 0: return if not configuration.PROFILE.is_test_profile: echo.echo('\n') echo.echo_warning( 'The legacy workflow tables contain data but will have to be dropped to continue.' ) echo.echo_warning( 'If you continue, the content will be dumped to a JSON file, before dropping the tables.' ) echo.echo_warning( 'This serves merely as a reference and cannot be used to restore the database.' ) echo.echo_warning( 'If you want a proper backup, make sure to dump the full database and backup your repository' ) if not click.confirm('Are you sure you want to continue', default=True): sys.exit(1) delete_on_close = configuration.PROFILE.is_test_profile data = { 'workflow': serializers.serialize('json', DbWorkflow.objects.all()), 'workflow_data': serializers.serialize('json', DbWorkflowData.objects.all()), 'workflow_step': serializers.serialize('json', DbWorkflowStep.objects.all()), } with NamedTemporaryFile(prefix='legacy-workflows', suffix='.json', dir='.', delete=delete_on_close, mode='wb') as handle: filename = handle.name json.dump(data, handle) # If delete_on_close is False, we are running for the user and add additional message of file location if not delete_on_close: echo.echo_info(f'Exported workflow data to {filename}')
def setup( non_interactive, profile, email, first_name, last_name, institution, db_engine, db_backend, db_host, db_port, db_name, db_username, db_password, repository ): """Setup a new profile.""" # pylint: disable=too-many-arguments,too-many-locals,unused-argument from aiida import orm from aiida.manage.configuration import get_config profile.database_engine = db_engine profile.database_backend = db_backend profile.database_name = db_name profile.database_port = db_port profile.database_hostname = db_host profile.database_username = db_username profile.database_password = db_password profile.repository_uri = 'file://' + repository config = get_config() # Creating the profile config.add_profile(profile) config.set_default_profile(profile.name) # Load the profile load_profile(profile.name) echo.echo_success('created new profile `{}`.'.format(profile.name)) # Migrate the database echo.echo_info('migrating the database.') backend = get_manager()._load_backend(schema_check=False) # pylint: disable=protected-access try: backend.migrate() except Exception as exception: # pylint: disable=broad-except echo.echo_critical( 'database migration failed, probably because connection details are incorrect:\n{}'.format(exception) ) else: echo.echo_success('database migration completed.') # Optionally setting configuration default user settings config.set_option('user.email', email, override=False) config.set_option('user.first_name', first_name, override=False) config.set_option('user.last_name', last_name, override=False) config.set_option('user.institution', institution, override=False) # Create the user if it does not yet exist created, user = orm.User.objects.get_or_create( email=email, first_name=first_name, last_name=last_name, institution=institution ) if created: user.store() profile.default_user = user.email config.update_profile(profile) config.store()
def group_create(group_label): """Create an empty group with a given name.""" from aiida import orm group, created = orm.Group.objects.get_or_create(label=group_label) if created: echo.echo_success("Group created with PK = {} and name '{}'".format(group.id, group.label)) else: echo.echo_info("Group '{}' already exists, PK = {}".format(group.label, group.id))
def check_db_name(self, dbname): """Looks up if a database with the name exists, prompts for using or creating a differently named one.""" create = True while create and self.db_exists(dbname): echo.echo_info('database {} already exists!'.format(dbname)) if not click.confirm( 'Use it (make sure it is not used by another profile)?'): dbname = click.prompt('new name', type=str, default=dbname) else: create = False return dbname, create
def configure_computer_main(computer, user, **kwargs): """Configure a computer via the CLI.""" from aiida import orm user = user or orm.User.objects.get_default() echo.echo_info('Configuring computer {} for user {}.'.format(computer.name, user.email)) if user.email != get_manager().get_profile().default_user: echo.echo_info('Configuring different user, defaults may not be appropriate.') computer.configure(user=user, **kwargs) echo.echo_success('{} successfully configured for {}'.format(computer.name, user.email))
def configure_computer_main(computer, user, **kwargs): """Configure a computer via the CLI.""" from aiida.orm.backend import construct_backend from aiida.control.computer import configure_computer from aiida.common.utils import get_configured_user_email backend = construct_backend() user = user or backend.users.get_automatic_user() echo.echo_info('Configuring computer {} for user {}.'.format(computer.name, user.email)) if user.email != get_configured_user_email(): echo.echo_info('Configuring different user, defaults may not be appropriate.') configure_computer(computer, user=user, **kwargs) echo.echo_success('{} successfully configured for {}'.format(computer.name, user.email))
def enable_computer(only_for_user, computer): """Enable a computer""" from aiida.common.exceptions import NotExistent if only_for_user is None: if computer.is_enabled(): echo.echo_info("Computer '{}' already enabled.".format( computer.name)) else: computer.set_enabled_state(True) echo.echo_info("Computer '{}' enabled.".format(computer.name)) else: try: authinfo = computer.get_authinfo(only_for_user) except NotExistent: echo.echo_critical( "User with email '{}' is not configured for computer '{}' yet." .format(only_for_user.email, computer.name)) if not authinfo.enabled: authinfo.enabled = True echo.echo_info("Computer '{}' enabled for user {}.".format( computer.name, only_for_user.get_full_name())) else: echo.echo_info( "Computer '{}' was already enabled for user {} {}.".format( computer.name, only_for_user.first_name, only_for_user.last_name))
def disable_computer(only_for_user, computer): """Disable a computer. Useful, for instance, when a computer is under maintenance.""" from aiida.common.exceptions import NotExistent if only_for_user is None: if not computer.is_enabled(): echo.echo_info("Computer '{}' already disabled.".format( computer.name)) else: computer.set_enabled_state(False) echo.echo_info("Computer '{}' disabled.".format(computer.name)) else: try: authinfo = computer.get_authinfo(only_for_user) except NotExistent: echo.echo_critical( "User with email '{}' is not configured for computer '{}' yet." .format(only_for_user.email, computer.name)) if authinfo.enabled: authinfo.enabled = False echo.echo_info("Computer '{}' disabled for user {}.".format( computer.name, only_for_user.get_full_name())) else: echo.echo_info( "Computer '{}' was already disabled for user {} {}.".format( computer.name, only_for_user.first_name, only_for_user.last_name))
def group_create(group_name): """ Create a new empty group with the name GROUP_NAME """ from aiida.orm import Group as OrmGroup group, created = OrmGroup.get_or_create(name=group_name) if created: echo.echo_success("Group created with PK = {} and name '{}'".format( group.pk, group.name)) else: echo.echo_info("Group '{}' already exists, PK = {}".format( group.name, group.pk))
def check_db(self, dbname): """Looks up if a database with the name exists, prompts for using or creating a differently named one. :param str dbname: Name of the database to be created or reused. :returns: tuple (dbname, created) """ create = True while create and self.db_exists(dbname): echo.echo_info(f'database {dbname} already exists!') if not click.confirm( 'Use it (make sure it is not used by another profile)?'): dbname = click.prompt('new name', type=str, default=dbname) else: create = False return dbname, create
def check_dbuser(self, dbuser): """Looks up if a given user already exists, prompts for using or creating a differently named one. :param str dbuser: Name of the user to be created or reused. :returns: tuple (dbuser, created) """ create = True while create and self.dbuser_exists(dbuser): echo.echo_info('Database user "{}" already exists!'.format(dbuser)) if not click.confirm('Use it? '): dbuser = click.prompt('New database user name: ', type=str, default=dbuser) else: create = False return dbuser, create
def _import_archive(archive: str, web_based: bool, import_kwargs: dict, try_migration: bool): """Perform the archive import. :param archive: the path or URL to the archive :param web_based: If the archive needs to be downloaded first :param import_kwargs: keyword arguments to pass to the import function :param try_migration: whether to try a migration if the import raises IncompatibleArchiveVersionError """ from aiida.common.folders import SandboxFolder from aiida.tools.importexport import ( detect_archive_type, EXPORT_VERSION, import_data, IncompatibleArchiveVersionError ) from aiida.tools.importexport.archive.migrators import get_migrator with SandboxFolder() as temp_folder: archive_path = archive if web_based: echo.echo_info(f'downloading archive: {archive}') try: response = urllib.request.urlopen(archive) except Exception as exception: _echo_exception(f'downloading archive {archive} failed', exception) temp_folder.create_file_from_filelike(response, 'downloaded_archive.zip') archive_path = temp_folder.get_abs_path('downloaded_archive.zip') echo.echo_success('archive downloaded, proceeding with import') echo.echo_info(f'starting import: {archive}') try: import_data(archive_path, **import_kwargs) except IncompatibleArchiveVersionError as exception: if try_migration: echo.echo_info(f'incompatible version detected for {archive}, trying migration') try: migrator = get_migrator(detect_archive_type(archive_path))(archive_path) archive_path = migrator.migrate( EXPORT_VERSION, None, out_compression='none', work_dir=temp_folder.abspath ) except Exception as exception: _echo_exception(f'an exception occurred while migrating the archive {archive}', exception) echo.echo_info('proceeding with import of migrated archive') try: import_data(archive_path, **import_kwargs) except Exception as exception: _echo_exception( f'an exception occurred while trying to import the migrated archive {archive}', exception ) else: _echo_exception(f'an exception occurred while trying to import the archive {archive}', exception) except Exception as exception: _echo_exception(f'an exception occurred while trying to import the archive {archive}', exception) echo.echo_success(f'imported archive {archive}')
def simple_prompt_loop(self, ctx, param, value): """prompt until successful conversion. dispatch control sequences""" if not hasattr(ctx, 'prompt_loop_info_printed'): echo.echo_info('enter "?" for help') ctx.prompt_loop_info_printed = True while 1: # prompt value = self.prompt_func(ctx) if value in self._ctrl: # dispatch - e.g. show help self._ctrl[value]() continue else: # try to convert, if unsuccessful continue prompting successful, value = self.safely_convert(value, param, ctx) if successful: return value