def test_node_tree_printer(self): """Test the `NodeTreePrinter` utility.""" from aiida.cmdline.utils.ascii_vis import NodeTreePrinter with Capturing(): NodeTreePrinter.print_node_tree(self.node, max_depth=1) with Capturing(): NodeTreePrinter.print_node_tree(self.node, max_depth=1, follow_links=())
def test_node_tree_printer(self): """Test the `NodeTreePrinter` utility.""" from aiida.cmdline.utils.ascii_vis import NodeTreePrinter with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=AiidaDeprecationWarning) with Capturing(): NodeTreePrinter.print_node_tree(self.node, max_depth=1) with Capturing(): NodeTreePrinter.print_node_tree(self.node, max_depth=1, follow_links=())
def setUp(self): """Go to a specific schema version before running tests.""" from aiida.backends import sqlalchemy as sa from aiida.orm import autogroup self.current_autogroup = autogroup.current_autogroup autogroup.current_autogroup = None assert self.migrate_from and self.migrate_to, \ "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) self.migrate_from = [(self.app, self.migrate_from)] self.migrate_to = [(self.app, self.migrate_to)] executor = MigrationExecutor(connection) self.apps = executor.loader.project_state(self.migrate_from).apps self.schema_editor = connection.schema_editor() # Reset session for the migration sa.get_scoped_session().close() # Reverse to the original migration with Capturing(): executor.migrate(self.migrate_from) # Reset session after the migration sa.get_scoped_session().close() self.DbLink = self.apps.get_model('db', 'DbLink') self.DbNode = self.apps.get_model('db', 'DbNode') self.DbUser = self.apps.get_model('db', 'DbUser') self.DbUser.objects.all().delete() self.default_user = self.DbUser(1, 'aiida@localhost') self.default_user.save() try: self.setUpBeforeMigration() # Run the migration to test executor = MigrationExecutor(connection) executor.loader.build_graph() # Reset session for the migration sa.get_scoped_session().close() with Capturing(): executor.migrate(self.migrate_to) # Reset session after the migration sa.get_scoped_session().close() self.apps = executor.loader.project_state(self.migrate_to).apps except Exception: # Bring back the DB to the correct state if this setup part fails import traceback traceback.print_stack() self._revert_database_schema() raise
def test_print_process_info(self): """Test the `print_process_info` method.""" from aiida.cmdline.utils.common import print_process_info from aiida.common.utils import Capturing from aiida.engine import Process class TestProcessWithoutDocstring(Process): # pylint: disable=missing-docstring @classmethod def define(cls, spec): super(TestProcessWithoutDocstring, cls).define(spec) spec.input('some_input') class TestProcessWithDocstring(Process): """Some docstring.""" @classmethod def define(cls, spec): super(TestProcessWithDocstring, cls).define(spec) spec.input('some_input') # We are just checking that the command does not except with Capturing(): print_process_info(TestProcessWithoutDocstring) print_process_info(TestProcessWithDocstring)
def test_migrate_newest_version(self): """ Test critical message and SystemExit is raised, when an export file with the newest export version is migrated """ # Initialization metadata = {'export_version': newest_version} # Check with self.assertRaises(SystemExit) as exception: with Capturing(capture_stderr=True): new_version = migrate_recursively(metadata, {}, None) self.assertIn( 'Critical: Your export file is already at the newest export version {}'.format( metadata['export_version'] ), exception.exception, msg="Expected a critical statement that the export version '{}' is the newest export version '{}', " 'instead got {}'.format(metadata['export_version'], newest_version, exception.exception) ) self.assertIsNone( new_version, msg='migrate_recursively should not return anything, ' "hence the 'return' should be None, but instead it is {}".format(new_version) )
def _revert_database_schema(self): """Bring back the DB to the correct state.""" from aiida.backends.djsite.db.migrations import LATEST_MIGRATION self.migrate_to = [(self.app, LATEST_MIGRATION)] executor = MigrationExecutor(connection) with Capturing(): executor.migrate(self.migrate_to)
def setUp(self): """Go to a specific schema version before running tests.""" from aiida.backends.djsite import get_scoped_session from aiida.orm import autogroup self.current_autogroup = autogroup.CURRENT_AUTOGROUP autogroup.CURRENT_AUTOGROUP = None assert self.migrate_from and self.migrate_to, \ "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) self.migrate_from = [(self.app, self.migrate_from)] self.migrate_to = [(self.app, self.migrate_to)] executor = MigrationExecutor(connection) self.apps = executor.loader.project_state(self.migrate_from).apps self.schema_editor = connection.schema_editor() # Before running the migration, make sure we close the querybuilder session which may still contain references # to objects whose mapping may be invalidated after resetting the schema to an older version. This can block # the migrations so we first expunge those objects by closing the session. get_scoped_session().close() # Reverse to the original migration with Capturing(): executor.migrate(self.migrate_from) self.DbLink = self.apps.get_model('db', 'DbLink') self.DbNode = self.apps.get_model('db', 'DbNode') self.DbUser = self.apps.get_model('db', 'DbUser') self.DbUser.objects.all().delete() self.default_user = self.DbUser(1, 'aiida@localhost') self.default_user.save() try: self.setUpBeforeMigration() # Run the migration to test executor = MigrationExecutor(connection) executor.loader.build_graph() with Capturing(): executor.migrate(self.migrate_to) self.apps = executor.loader.project_state(self.migrate_to).apps except Exception: # Bring back the DB to the correct state if this setup part fails import traceback traceback.print_stack() self._revert_database_schema() raise
def run_async(): yield run_until_paused(process) process.play() with Capturing(): with self.assertRaises(RuntimeError): yield process.future()
def temporary_config_instance(): """Create a temporary AiiDA instance.""" current_config = None current_config_path = None current_profile_name = None temporary_config_directory = None from aiida.common.utils import Capturing from aiida.manage import configuration from aiida.manage.configuration import settings, load_profile, reset_profile try: from aiida.manage.configuration.settings import create_instance_directories # Store the current configuration instance and config directory path current_config = configuration.CONFIG current_config_path = current_config.dirpath current_profile_name = configuration.PROFILE.name reset_profile() configuration.CONFIG = None # Create a temporary folder, set it as the current config directory path and reset the loaded configuration profile_name = 'test_profile_1234' temporary_config_directory = tempfile.mkdtemp() settings.AIIDA_CONFIG_FOLDER = temporary_config_directory # Create the instance base directory structure, the config file and a dummy profile create_instance_directories() # The constructor of `Config` called by `load_config` will print warning messages about migrating it with Capturing(): configuration.CONFIG = configuration.load_config(create=True) profile = create_mock_profile( name=profile_name, repository_dirpath=temporary_config_directory) # Add the created profile and set it as the default configuration.CONFIG.add_profile(profile) configuration.CONFIG.set_default_profile(profile_name, overwrite=True) configuration.CONFIG.store() load_profile() yield configuration.CONFIG finally: # Reset the config folder path and the config instance reset_profile() settings.AIIDA_CONFIG_FOLDER = current_config_path configuration.CONFIG = current_config load_profile(current_profile_name) # Destroy the temporary instance directory if temporary_config_directory and os.path.isdir( temporary_config_directory): shutil.rmtree(temporary_config_directory)
def set_ase(self, aseatoms): """ Set the contents of the CifData starting from an ASE atoms object :param aseatoms: the ASE atoms object """ import tempfile cif = cif_from_ase(aseatoms) with tempfile.NamedTemporaryFile(mode='w+') as tmpf: with Capturing(): tmpf.write(pycifrw_from_cif(cif, loops=ase_loops).WriteOut()) tmpf.flush() self.set_file(tmpf.name)
def test_simple_run(self): """ Run the workchain which should hit the exception and therefore end up in the EXCEPTED state """ process = TestWorkChainAbortChildren.MainWorkChain() with Capturing(): with self.assertRaises(RuntimeError): launch.run(process) self.assertEqual(process.node.is_finished_ok, False) self.assertEqual(process.node.is_excepted, True) self.assertEqual(process.node.is_killed, False)
def _revert_database_schema(self): """Bring back the DB to the correct state.""" from ...migrations import LATEST_MIGRATION from aiida.backends import sqlalchemy as sa self.migrate_to = [(self.app, LATEST_MIGRATION)] # Reset session for the migration sa.get_scoped_session().close() executor = MigrationExecutor(connection) with Capturing(): executor.migrate(self.migrate_to) # Reset session after the migration sa.get_scoped_session().close()
def test_create_use_destroy_profile2(self): """ Test temporary test profile creation * The profile gets created, the dbenv loaded * Data can be stored in the db * reset_db deletes all data added after profile creation * destroy_all removes all traces of the test run Note: This test function loads the dbenv - i.e. you cannot run similar test functions (that create profiles) in the same test session. aiida.manage.configuration.reset_profile() was not yet enough, see https://github.com/aiidateam/aiida-core/issues/3482 """ with Capturing() as output: self.profile_manager.create_profile() self.assertTrue(self.profile_manager.root_dir_ok, msg=output) self.assertTrue(self.profile_manager.config_dir_ok, msg=output) self.assertTrue(self.profile_manager.repo_ok, msg=output) from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER self.assertEqual(AIIDA_CONFIG_FOLDER, self.profile_manager.config_dir, msg=output) from aiida.orm import load_node from aiida.plugins import DataFactory data = DataFactory('dict')(dict={'key': 'value'}) data.store() data_pk = data.pk self.assertTrue(load_node(data_pk)) with self.assertRaises(TestManagerError): self.test_create_aiida_db() self.profile_manager.reset_db() with self.assertRaises(Exception): load_node(data_pk) temp_dir = self.profile_manager.root_dir self.profile_manager.destroy_all() with self.assertRaises(Exception): self.profile_manager.postgres.db_exists( self.profile_manager.dbinfo['db_name']) self.assertFalse(os.path.exists(temp_dir)) self.assertIsNone(self.profile_manager.root_dir) self.assertIsNone(self.profile_manager.pg_cluster)
def test_query_yes_no(self): """ This method tests the query_yes_no method behaves as expected. To perform this, a lambda function is used to simulate the user input. """ from aiida.common.utils import Capturing from aiida.manage.backup import backup_utils # Capture the sysout for the following code with Capturing(): # Check the yes backup_utils.input = lambda _: 'y' self.assertTrue(backup_utils.query_yes_no('', 'yes')) backup_utils.input = lambda _: 'yes' self.assertTrue(backup_utils.query_yes_no('', 'yes')) # Check the no backup_utils.input = lambda _: 'no' self.assertFalse(backup_utils.query_yes_no('', 'yes')) backup_utils.input = lambda _: 'n' self.assertFalse(backup_utils.query_yes_no('', 'yes')) # Check the empty default value that should # lead to an error with self.assertRaises(ValueError): backup_utils.query_yes_no('', '') # Check that a None default value and no answer from # the user should lead to the repetition of the query until # it is answered properly self.seq = -1 answers = ['', '', '', 'yes'] backup_utils.input = lambda _: answers[self.array_counter()] self.assertTrue(backup_utils.query_yes_no('', None)) self.assertEqual(self.seq, len(answers) - 1) # Check that the default answer is returned # when the user doesn't give an answer backup_utils.input = lambda _: '' self.assertTrue(backup_utils.query_yes_no('', 'yes')) backup_utils.input = lambda _: '' self.assertFalse(backup_utils.query_yes_no('', 'no'))
def set_values(self, values): """ Set internal representation to `values`. Warning: This also writes a new CIF file. :param values: PyCifRW CifFile object .. note:: requires PyCifRW module. """ import tempfile with tempfile.NamedTemporaryFile(mode='w+') as tmpf: with Capturing(): tmpf.write(values.WriteOut()) tmpf.flush() self.set_file(tmpf.name) self._values = values
def _prepare_cif(self, trajectory_index=None, main_file_name=''): # pylint: disable=unused-argument """ Write the given trajectory to a string of format CIF. """ from aiida.orm.nodes.data.cif \ import ase_loops, cif_from_ase, pycifrw_from_cif from aiida.common.utils import Capturing cif = '' indices = list(range(self.numsteps)) if trajectory_index is not None: indices = [trajectory_index] for idx in indices: structure = self.get_step_structure(idx) ciffile = pycifrw_from_cif(cif_from_ase(structure.get_ase()), ase_loops) with Capturing(): cif = cif + ciffile.WriteOut() return cif.encode('utf-8'), {}
def test_wrong_versions(self): """Test correct errors are raised if export files have wrong version numbers""" from aiida.tools.importexport.migration import MIGRATE_FUNCTIONS # Initialization wrong_versions = ['0.0', '0.1.0', '0.99'] old_versions = list(MIGRATE_FUNCTIONS.keys()) legal_versions = old_versions + [newest_version] wrong_version_metadatas = [] for version in wrong_versions: metadata = {'export_version': version} wrong_version_metadatas.append(metadata) # Checks # Make sure the "wrong_versions" are wrong for version in wrong_versions: self.assertNotIn( version, legal_versions, msg= "'{}' was not expected to be a legal version, legal version: {}" .format(version, legal_versions)) # Make sure migrate_recursively throws a critical message and raises SystemExit for metadata in wrong_version_metadatas: with self.assertRaises(SystemExit) as exception: with Capturing(capture_stderr=True): new_version = migrate_recursively(metadata, {}, None) self.assertIn( 'Critical: Cannot migrate from version {}'.format( metadata['export_version']), exception.exception, msg= "Expected a critical statement for the wrong export version '{}', " 'instead got {}'.format(metadata['export_version'], exception.exception)) self.assertIsNone( new_version, msg='migrate_recursively should not return anything, ' "hence the 'return' should be None, but instead it is {}". format(new_version))
def test_integration(self): """Test integration""" from aiida.common.utils import Capturing # Fill in the repository with data self.fill_repo() try: # Create a temp folder where the backup files will be placed # and the backup will be stored temp_folder = tempfile.mkdtemp() # Capture the sysout of the following command with Capturing(): # Create the backup scripts backup_full_path = self.create_backup_scripts(temp_folder) # Put the backup folder in the path sys.path.append(backup_full_path) # Import the backup script - this action will also run it # It is assumed that the backup script ends with .py importlib.import_module(self._bs_instance._script_filename[:-3]) # Check the backup import os from aiida.manage.configuration import get_profile from aiida.common.utils import are_dir_trees_equal dirpath_repository = get_profile().repository_path source_dir = os.path.join(dirpath_repository, self._repo_rel_path) dest_dir = os.path.join(backup_full_path, self._bs_instance._file_backup_folder_rel, self._repo_rel_path) res, msg = are_dir_trees_equal(source_dir, dest_dir) self.assertTrue( res, 'The backed-up repository has differences to the original one. ' + str(msg) + '. If the test fails, report it in issue #2134.') finally: shutil.rmtree(temp_folder, ignore_errors=True)
def test_manager(backend=BACKEND_DJANGO, profile_name=None, pgtest=None): """ Context manager for TestManager objects. Sets up temporary AiiDA environment for testing or reuses existing environment, if `AIIDA_TEST_PROFILE` environment variable is set. Example pytest fixture:: def aiida_profile(): with test_manager(backend) as test_mgr: yield fixture_mgr Example unittest test runner:: with test_manager(backend) as test_mgr: # ready for tests # everything cleaned up :param backend: database backend, either BACKEND_SQLA or BACKEND_DJANGO :param profile_name: name of test profile to be used or None (to use temporary profile) :param pgtest: a dictionary of arguments to be passed to PGTest() for starting the postgresql cluster, e.g. {'pg_ctl': '/somepath/pg_ctl'}. Should usually not be necessary. """ from aiida.common.utils import Capturing from aiida.common.log import configure_logging try: if not _GLOBAL_TEST_MANAGER.has_profile_open(): if profile_name: _GLOBAL_TEST_MANAGER.use_profile(profile_name=profile_name) else: with Capturing(): # capture output of AiiDA DB setup _GLOBAL_TEST_MANAGER.use_temporary_profile(backend=backend, pgtest=pgtest) configure_logging(with_orm=True) yield _GLOBAL_TEST_MANAGER finally: _GLOBAL_TEST_MANAGER.destroy_all()
def empty_config(tmp_path) -> Config: """Create a temporary configuration instance. This creates a temporary directory with a clean `.aiida` folder and basic configuration file. The currently loaded configuration and profile are stored in memory and are automatically restored at the end of this context manager. :return: a new empty config instance. """ from aiida.common.utils import Capturing from aiida.manage import configuration from aiida.manage.configuration import settings, reset_profile # Store the current configuration instance and config directory path current_config = configuration.CONFIG current_config_path = current_config.dirpath current_profile_name = configuration.PROFILE.name reset_profile() configuration.CONFIG = None # Create a temporary folder, set it as the current config directory path and reset the loaded configuration settings.AIIDA_CONFIG_FOLDER = str(tmp_path) # Create the instance base directory structure, the config file and a dummy profile settings.create_instance_directories() # The constructor of `Config` called by `load_config` will print warning messages about migrating it with Capturing(): configuration.CONFIG = configuration.load_config(create=True) yield get_config() # Reset the config folder path and the config instance. Note this will always be executed after the yield no # matter what happened in the test that used this fixture. reset_profile() settings.AIIDA_CONFIG_FOLDER = current_config_path configuration.CONFIG = current_config load_profile(current_profile_name)
def test_ask_backup_question(self): """ This method checks that the combined use of query_string and query_yes_no by the ask_backup_question is done as expected. """ from aiida.common.utils import Capturing from aiida.manage.backup import backup_utils # Capture the sysout for the following code with Capturing(): # Test that a question that asks for an integer is working # The given answers are in order: # - a non-accepted empty answer # - an answer that can not be parsed based on the given type # - the final expected answer self.seq = -1 answers = ['', '3fd43', '1', 'yes'] backup_utils.input = lambda _: answers[self.array_counter()] self.assertEqual(backup_utils.ask_question('', int, False), int(answers[2])) # Test that a question that asks for a date is working correctly. # The behavior is similar to the above test. self.seq = -1 answers = ['', '3fd43', '2015-07-28 20:48:53.197537+02:00', 'yes'] backup_utils.input = lambda _: answers[self.array_counter()] self.assertEqual( backup_utils.ask_question('', datetime.datetime, False), parse(answers[2])) # Check that None is not allowed as answer question = '' answer = '' backup_utils.input = lambda x: answer if x == question else 'y' self.assertEqual(backup_utils.ask_question(question, int, True), None)
def test_full_backup_setup_script(self): """ This method is a full test of the backup setup script. It launches it, replies to all the question as the user would do and in the end it checks that the correct files were created with the right content. """ from aiida.common.utils import Capturing # Create a temp folder where the backup files will be placed temp_folder = tempfile.mkdtemp() try: temp_aiida_folder = os.path.join(temp_folder, '.aiida') # The predefined answers for the setup script counter = utils.ArrayCounter() answers = [ temp_aiida_folder, # the backup folder path '', # should the folder be created? '', # destination folder of the backup '', # should the folder be created? 'n', # print config explanation? '', # configure the backup conf file now? '2014-07-18 13:54:53.688484+00:00', # start date of backup? '', # is it correct? '', # days to backup? '', # is it correct? '2015-04-11 13:55:53.688484+00:00', # end date of backup '', # is it correct? '1', # periodicity '', # is it correct? '2', # threshold? '' # is it correct? ] backup_utils.input = lambda _: answers[counter.array_counter()] # Run the setup script and catch the sysout with Capturing(): backup_setup.BackupSetup().run() # Get the backup configuration files & dirs backup_conf_records = os.listdir(temp_aiida_folder) # Check if all files & dirs are there self.assertTrue( backup_conf_records is not None and len(backup_conf_records) == 4 and 'backup_dest' in backup_conf_records and 'backup_info.json.tmpl' in backup_conf_records and 'start_backup.py' in backup_conf_records and 'backup_info.json' in backup_conf_records, 'The created backup folder does not have the expected files. It contains: {}.' ''.format(backup_conf_records)) # Check the content of the main backup configuration file with open(os.path.join(temp_aiida_folder, 'backup_info.json'), encoding='utf8') as conf_jfile: conf_cont = json.load(conf_jfile) self.assertEqual( conf_cont[AbstractBackup.OLDEST_OBJECT_BK_KEY], '2014-07-18 13:54:53.688484+00:00') self.assertEqual(conf_cont[AbstractBackup.DAYS_TO_BACKUP_KEY], None) self.assertEqual( conf_cont[AbstractBackup.END_DATE_OF_BACKUP_KEY], '2015-04-11 13:55:53.688484+00:00') self.assertEqual(conf_cont[AbstractBackup.PERIODICITY_KEY], 1) self.assertEqual( conf_cont[AbstractBackup.BACKUP_LENGTH_THRESHOLD_KEY], 2) finally: shutil.rmtree(temp_folder, ignore_errors=True)
def verdi_status(no_rmq): """Print status of AiiDA services.""" # pylint: disable=broad-except,too-many-statements from aiida.cmdline.utils.daemon import get_daemon_status, delete_stale_pid_file from aiida.common.utils import Capturing from aiida.manage.external.rmq import get_rmq_url from aiida.manage.manager import get_manager from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER exit_code = ExitCode.SUCCESS print_status(ServiceStatus.UP, 'config dir', AIIDA_CONFIG_FOLDER) manager = get_manager() profile = manager.get_profile() if profile is None: print_status(ServiceStatus.WARNING, 'profile', 'no profile configured yet') echo.echo_info( 'Configure a profile by running `verdi quicksetup` or `verdi setup`.' ) return try: profile = manager.get_profile() print_status(ServiceStatus.UP, 'profile', 'On profile {}'.format(profile.name)) except Exception as exc: print_status(ServiceStatus.ERROR, 'profile', 'Unable to read AiiDA profile', exception=exc) sys.exit(ExitCode.CRITICAL ) # stop here - without a profile we cannot access anything # Getting the repository repo_folder = 'undefined' try: repo_folder = profile.repository_path except Exception as exc: print_status(ServiceStatus.ERROR, 'repository', 'Error with repo folder', exception=exc) exit_code = ExitCode.CRITICAL else: print_status(ServiceStatus.UP, 'repository', repo_folder) # Getting the postgres status by trying to get a database cursor database_data = [ profile.database_username, profile.database_hostname, profile.database_port ] try: with override_log_level(): # temporarily suppress noisy logging backend = manager.get_backend() backend.cursor() except Exception: print_status(ServiceStatus.DOWN, 'postgres', 'Unable to connect as {}@{}:{}'.format(*database_data)) exit_code = ExitCode.CRITICAL else: print_status(ServiceStatus.UP, 'postgres', 'Connected as {}@{}:{}'.format(*database_data)) # Getting the rmq status if not no_rmq: try: with Capturing(capture_stderr=True): with override_log_level( ): # temporarily suppress noisy logging comm = manager.create_communicator(with_orm=False) comm.stop() except Exception as exc: print_status(ServiceStatus.ERROR, 'rabbitmq', 'Unable to connect to rabbitmq', exception=exc) exit_code = ExitCode.CRITICAL else: print_status(ServiceStatus.UP, 'rabbitmq', 'Connected to {}'.format(get_rmq_url())) # Getting the daemon status try: client = manager.get_daemon_client() delete_stale_pid_file(client) daemon_status = get_daemon_status(client) daemon_status = daemon_status.split('\n')[ 0] # take only the first line if client.is_daemon_running: print_status(ServiceStatus.UP, 'daemon', daemon_status) else: print_status(ServiceStatus.WARNING, 'daemon', daemon_status) exit_code = ExitCode.SUCCESS # A daemon that is not running is not a failure except Exception as exc: print_status(ServiceStatus.ERROR, 'daemon', 'Error getting daemon status', exception=exc) exit_code = ExitCode.CRITICAL # Note: click does not forward return values to the exit code, see https://github.com/pallets/click/issues/747 sys.exit(exit_code)