def get_scoped_session(**kwargs): """Return a scoped session According to SQLAlchemy docs, this returns always the same object within a thread, and a different object in a different thread. Moreover, since we update the session class upon forking, different session objects will be used. :param kwargs: keyword argument that will be passed on to :py:func:`aiida.backends.utils.create_sqlalchemy_engine`, opening the possibility to change QueuePool time outs and more. See https://docs.sqlalchemy.org/en/13/core/engines.html?highlight=create_engine#sqlalchemy.create_engine for more info. """ from aiida.manage.configuration import get_profile global ENGINE global SESSION_FACTORY if SESSION_FACTORY is not None: session = SESSION_FACTORY() return session if ENGINE is None: ENGINE = create_sqlalchemy_engine(get_profile(), **kwargs) SESSION_FACTORY = create_scoped_session_factory(ENGINE, expire_on_commit=True) return SESSION_FACTORY()
def get_repository_folder(subfolder=None): """ Return the top folder of the local repository. """ try: return _repository_folder_cache[subfolder] except KeyError: from aiida.manage.configuration import get_profile repository_path = get_profile().repository_path if not os.path.isdir(repository_path): raise ImportError if subfolder is None: retval = os.path.abspath(repository_path) elif subfolder == 'sandbox': retval = os.path.abspath(os.path.join(repository_path, 'sandbox')) elif subfolder == 'repository': retval = os.path.abspath( os.path.join(repository_path, 'repository')) else: raise ValueError( "Invalid 'subfolder' passed to get_repository_folder: {}". format(subfolder)) _repository_folder_cache[subfolder] = retval return retval
def inner(config_dict): with tempfile.NamedTemporaryFile() as handle: yaml.dump({get_profile().name: config_dict}, handle, encoding='utf-8') configure(config_file=handle.name) yield # reset the configuration configure()
def reset_session(profile=None): """ Resets (global) engine and sessionmaker classes, to create a new one (or creates a new one from scratch if not already available) :param profile: the profile whose configuration to use to connect to the database """ from multiprocessing.util import register_after_fork from aiida.common import json from aiida.manage.configuration import get_profile global ENGINE global SCOPED_SESSION_CLASS if profile is None: profile = get_profile() separator = ':' if profile.database_port else '' engine_url = 'postgresql://{user}:{password}@{hostname}{separator}{port}/{name}'.format( separator=separator, user=profile.database_username, password=profile.database_password, hostname=profile.database_hostname, port=profile.database_port, name=profile.database_name) ENGINE = create_engine(engine_url, json_serializer=json.dumps, json_deserializer=json.loads, encoding='utf-8') SCOPED_SESSION_CLASS = scoped_session( sessionmaker(bind=ENGINE, expire_on_commit=True)) register_after_fork(ENGINE, recreate_after_fork)
def get_scoped_session(**kwargs): """Return a scoped session for the given profile that is exclusively to be used for the `QueryBuilder`. Since the `QueryBuilder` implementation uses SqlAlchemy to map the query onto the models in order to generate the SQL to be sent to the database, it requires a session, which is an :class:`sqlalchemy.orm.session.Session` instance. The only purpose is for SqlAlchemy to be able to connect to the database perform the query and retrieve the results. Even the Django backend implementation will use SqlAlchemy for its `QueryBuilder` and so also needs an SqlA session. It is important that we do not reuse the scoped session factory in the SqlAlchemy implementation, because that runs the risk of cross-talk once profiles can be switched dynamically in a single python interpreter. Therefore the Django implementation of the `QueryBuilder` should keep its own SqlAlchemy engine and scoped session factory instances that are used to provide the query builder with a session. :param kwargs: keyword arguments that will be passed on to :py:func:`aiida.backends.utils.create_sqlalchemy_engine`, opening the possibility to change QueuePool time outs and more. See https://docs.sqlalchemy.org/en/13/core/engines.html?highlight=create_engine#sqlalchemy.create_engine for more info. :return: :class:`sqlalchemy.orm.session.Session` instance with engine configured for the given profile. """ from aiida.manage.configuration import get_profile global ENGINE global SESSION_FACTORY if SESSION_FACTORY is not None: session = SESSION_FACTORY() return session if ENGINE is None: ENGINE = create_sqlalchemy_engine(get_profile(), **kwargs) SESSION_FACTORY = create_scoped_session_factory(ENGINE) return SESSION_FACTORY()
def test_empty_enabled_disabled(self): # pylint: disable=no-self-use """Test that `aiida.manage.caching.configure` does not except when either `enabled` or `disabled` is `None`. This will happen when the configuration file specifies either one of the keys but no actual values, e.g.:: profile_name: default: False enabled: In this case, the dictionary parsed by yaml will contain `None` for the `enabled` key. Now this will be unlikely, but the same holds when all values are commented:: profile_name: default: False enabled: # - aiida.calculations:templatereplacer which is not unlikely to occurr in the wild. """ configuration = { get_profile().name: { 'default': True, 'enabled': None, 'disabled': None } } with tempfile.NamedTemporaryFile() as handle: yaml.dump(configuration, handle, encoding='utf-8') configure(config_file=handle.name) # Check that `get_use_cache` also does not except get_use_cache(identifier='aiida.calculations:templatereplacer')
def get_scoped_session(): """Return a scoped session for the given profile that is exclusively to be used for the `QueryBuilder`. Since the `QueryBuilder` implementation uses SqlAlchemy to map the query onto the models in order to generate the SQL to be sent to the database, it requires a session, which is an :class:`sqlalchemy.orm.session.Session` instance. The only purpose is for SqlAlchemy to be able to connect to the database perform the query and retrieve the results. Even the Django backend implementation will use SqlAlchemy for its `QueryBuilder` and so also needs an SqlA session. It is important that we do not reuse the scoped session factory in the SqlAlchemy implementation, because that runs the risk of cross-talk once profiles can be switched dynamically in a single python interpreter. Therefore the Django implementation of the `QueryBuilder` should keep its own SqlAlchemy engine and scoped session factory instances that are used to provide the query builder with a session. :param profile: :class:`aiida.manage.configuration.profile.Profile` for which to configure the engine. :return: :class:`sqlalchemy.orm.session.Session` instance with engine configured for the given profile. """ from aiida.manage.configuration import get_profile global ENGINE global SESSION_FACTORY if SESSION_FACTORY is not None: session = SESSION_FACTORY() return session if ENGINE is None: ENGINE = create_sqlalchemy_engine(get_profile()) SESSION_FACTORY = create_scoped_session_factory(ENGINE) return SESSION_FACTORY()
def is_dbenv_loaded(): """Determine whether database environment is already loaded. :rtype: bool .. deprecated:: 1.0.0 Will be removed in `v2.0.0`, use :func:`aiida.manage.configuration.load_profile` instead. """ warnings.warn('function is deprecated, use `load_profile` instead', AiidaDeprecationWarning) # pylint: disable=no-member return get_profile() is not None
def _get_config(config_file): """Return the caching configuration. :param config_file: the absolute path to the caching configuration file :return: the configuration dictionary """ from aiida.manage.configuration import get_profile from aiida.plugins.entry_point import is_valid_entry_point_string, load_entry_point_from_string profile = get_profile() if profile is None: exceptions.ConfigurationError('no profile has been loaded') try: with open(config_file, 'r', encoding='utf8') as handle: config = yaml.safe_load(handle)[profile.name] except (OSError, IOError, KeyError): # No config file, or no config for this profile return DEFAULT_CONFIG # Validate configuration for key in config: if key not in DEFAULT_CONFIG: raise ValueError( "Configuration error: Invalid key '{}' in cache_config.yml". format(key)) # Add defaults where key is either completely missing or specifies no values in which case it will be `None` for key, default_config in DEFAULT_CONFIG.items(): if key not in config or config[key] is None: config[key] = default_config # Validate the entry point identifiers for key in [ConfigKeys.ENABLED.value, ConfigKeys.DISABLED.value]: # If the key is defined in the file but contains no values, it will be `None` if config[key] is None: continue for identifier in config[key]: if not is_valid_entry_point_string(identifier): raise exceptions.ConfigurationError( "entry point '{}' in 'cache_config.yml' is not a valid entry point string." .format(identifier)) try: load_entry_point_from_string(identifier) except exceptions.EntryPointError as exception: raise exceptions.ConfigurationError( "entry point '{}' in 'cache_config.yml' can not be loaded: {}." .format(identifier, exception)) return config
def load_configuration(identifier): """Write the caching file for given configuration and load it.""" configuration = { get_profile().name: { 'default': True, 'enabled': [identifier] } } with tempfile.NamedTemporaryFile() as handle: yaml.dump(configuration, handle, encoding='utf-8') configure(config_file=handle.name)
def setUp(self): """Write a temporary config file, and load the configuration.""" self.config_reference = { get_profile().name: { 'default': True, 'enabled': ['aiida.calculations:arithmetic.add'], 'disabled': ['aiida.calculations:templatereplacer'] } } with tempfile.NamedTemporaryFile() as handle: yaml.dump(self.config_reference, handle, encoding='utf-8') configure(config_file=handle.name)
def _run_cli_command( command: click.Command, options: Optional[List] = None, input: Optional[Union[str, bytes, IO]] = None, raises: bool = False, ) -> Result: """Run the command and check the result. .. note:: the `output_lines` attribute is added to return value containing list of stripped output lines. :param options: the list of command line options to pass to the command invocation :param raises: whether the command is expected to raise an exception :return: test result """ import traceback from aiida.cmdline.commands.cmd_verdi import VerdiCommandGroup from aiida.common import AttributeDict from aiida.manage.configuration import get_config, get_profile config = get_config() profile = get_profile() obj = AttributeDict({"config": config, "profile": profile}) # Convert any ``pathlib.Path`` objects in the ``options`` to their absolute filepath string representation. # This is necessary because the ``invoke`` command does not support these path objects. options = [ str(option) if isinstance(option, pathlib.Path) else option for option in options or [] ] # We need to apply the ``VERBOSITY`` option. When invoked through the command line, this is done by the logic # of the ``VerdiCommandGroup``, but when testing commands, the command is retrieved directly from the module # which circumvents this machinery. command = VerdiCommandGroup.add_verbosity_option(command) runner = click.testing.CliRunner() result = runner.invoke(command, args=options, obj=obj, input=input) if raises: assert result.exception is not None, result.output assert result.exit_code != 0 else: assert result.exception is None, "".join( traceback.format_exception(*result.exc_info)) assert result.exit_code == 0, result.output result.output_lines = [ line.strip() for line in result.output.split("\n") if line.strip() ] return result
def has_icsd_config(): """ :return: True if the currently loaded profile has a ICSD configuration """ from aiida.manage.configuration import get_profile profile = get_profile() required_keywords = { 'ICSD_SERVER_URL', 'ICSD_MYSQL_HOST', 'ICSD_MYSQL_USER', 'ICSD_MYSQL_PASSWORD', 'ICSD_MYSQL_DB' } return required_keywords <= set(profile.dictionary.keys())
def get_node_repository_sub_folder(uuid): """Return the absolute path to the sub folder `path` within the repository of the node with the given UUID. :param uuid: UUID of the node :return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path` """ from aiida.manage.configuration import get_profile uuid = str(uuid) repo_dirpath = os.path.join(get_profile().repository_path, 'repository') node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path') return node_dirpath
def _get_config(config_file): """Return the caching configuration. :param config_file: the absolute path to the caching configuration file :return: the configuration dictionary """ from aiida.manage.configuration import get_profile profile = get_profile() if profile is None: exceptions.ConfigurationError('no profile has been loaded') try: with open(config_file, 'r', encoding='utf8') as handle: config = yaml.safe_load(handle)[profile.name] except (OSError, IOError, KeyError): # No config file, or no config for this profile return DEFAULT_CONFIG # Validate configuration for key in config: if key not in DEFAULT_CONFIG: raise exceptions.ConfigurationError( "Configuration error: Invalid key '{}' in cache_config.yml". format(key)) # Add defaults where key is either completely missing or specifies no values in which case it will be `None` for key, default_config in DEFAULT_CONFIG.items(): if key not in config or config[key] is None: config[key] = default_config try: type_check(config[ConfigKeys.DEFAULT.value], bool) type_check(config[ConfigKeys.ENABLED.value], list) type_check(config[ConfigKeys.DISABLED.value], list) except TypeError as exc: raise exceptions.ConfigurationError( 'Invalid type in caching configuration file.') from exc # Check validity of enabled and disabled entries try: for identifier in config[ConfigKeys.ENABLED.value] + config[ ConfigKeys.DISABLED.value]: _validate_identifier_pattern(identifier=identifier) except ValueError as exc: raise exceptions.ConfigurationError( 'Invalid identifier pattern in enable or disable list.') from exc return config
def load_backend_if_not_loaded(): """Load the database backend environment for the currently loaded profile. If no profile has been loaded yet, the default profile will be loaded first. A spinner will be shown during both actions to indicate that the function is working and has not crashed, since loading can take a second. """ from aiida.manage.configuration import get_profile, load_profile from aiida.manage.manager import get_manager manager = get_manager() if get_profile() is None or not manager.backend_loaded: with spinner(): load_profile() # This will load the default profile if no profile has already been loaded manager.get_backend() # This will load the backend of the loaded profile, if not already loaded
def load_dbenv(profile=None): """Alias for `load_dbenv` from `aiida.backends.utils` :param profile: name of the profile to load :type profile: str .. deprecated:: 1.0.0 Will be removed in `v2.0.0`, use :func:`aiida.manage.configuration.load_profile` instead. """ warnings.warn('function is deprecated, use `load_profile` instead', AiidaDeprecationWarning) # pylint: disable=no-member current_profile = get_profile() from aiida.common import InvalidOperation if current_profile: raise InvalidOperation('You cannot call load_dbenv multiple times!') load_profile(profile)
def __init__(self, sandbox_in_repo=True): """ Initializes the object by creating a new temporary folder in the sandbox. :param bool sandbox_in_repo: If True (default), creates the folder in the repository. If false, relies on the defaults of tempfile.mkdtemp """ # First check if the sandbox folder already exists if sandbox_in_repo: sandbox = os.path.join(get_profile().repository_path, 'sandbox') if not os.path.exists(sandbox): os.makedirs(sandbox) abspath = tempfile.mkdtemp(dir=sandbox) else: abspath = tempfile.mkdtemp() super().__init__(abspath=abspath)
def setUp(self): """ Set up IcsdDbImporter for web and mysql db query. """ from aiida.manage.configuration import get_profile profile = get_profile() self.server = profile.dictionary['ICSD_SERVER_URL'] self.host = profile.dictionary['ICSD_MYSQL_HOST'] self.user = profile.dictionary['ICSD_MYSQL_USER'] self.password = profile.dictionary['ICSD_MYSQL_PASSWORD'] self.dbname = profile.dictionary['ICSD_MYSQL_DB'] self.dbport = profile.dictionary.get('ICSD_MYSQL_PORT', 3306) self.importerdb = icsd.IcsdDbImporter(server=self.server, host=self.host) self.importerweb = icsd.IcsdDbImporter(server=self.server, host=self.host, querydb=False)
def clean_repository(cls): """ Cleans up file repository. """ from aiida.manage.configuration import get_profile from aiida.common.exceptions import InvalidOperation import shutil dirpath_repository = get_profile().repository_path base_repo_path = os.path.basename(os.path.normpath(dirpath_repository)) if TEST_KEYWORD not in base_repo_path: raise InvalidOperation( 'Warning: The repository folder {} does not ' 'seem to belong to a test profile and will therefore not be deleted.\n' 'Full repository path: ' '{}'.format(base_repo_path, dirpath_repository)) # Clean the test repository shutil.rmtree(dirpath_repository, ignore_errors=True) os.makedirs(dirpath_repository)
def get_default(self): """ Get the current default user :return: The default user :rtype: :class:`aiida.orm.User` """ if self._default_user is self.UNDEFINED: from aiida.manage.configuration import get_profile profile = get_profile() email = profile.default_user if not email: self._default_user = None try: self._default_user = self.get(email=email) except (exceptions.MultipleObjectsError, exceptions.NotExistent): self._default_user = None return self._default_user
def test_integration(self): """Test integration""" from aiida.common.utils import Capturing # Fill in the repository with data self.fill_repo() try: # Create a temp folder where the backup files will be placed # and the backup will be stored temp_folder = tempfile.mkdtemp() # Capture the sysout of the following command with Capturing(): # Create the backup scripts backup_full_path = self.create_backup_scripts(temp_folder) # Put the backup folder in the path sys.path.append(backup_full_path) # Import the backup script - this action will also run it # It is assumed that the backup script ends with .py importlib.import_module(self._bs_instance._script_filename[:-3]) # Check the backup import os from aiida.manage.configuration import get_profile from aiida.common.utils import are_dir_trees_equal dirpath_repository = get_profile().repository_path source_dir = os.path.join(dirpath_repository, self._repo_rel_path) dest_dir = os.path.join(backup_full_path, self._bs_instance._file_backup_folder_rel, self._repo_rel_path) res, msg = are_dir_trees_equal(source_dir, dest_dir) self.assertTrue( res, 'The backed-up repository has differences to the original one. ' + str(msg) + '. If the test fails, report it in issue #2134.') finally: shutil.rmtree(temp_folder, ignore_errors=True)
def get_scoped_session(): """Return a scoped session According to SQLAlchemy docs, this returns always the same object within a thread, and a different object in a different thread. Moreover, since we update the session class upon forking, different session objects will be used. """ from aiida.manage.configuration import get_profile global ENGINE global SESSION_FACTORY if SESSION_FACTORY is not None: session = SESSION_FACTORY() return session if ENGINE is None: ENGINE = create_sqlalchemy_engine(get_profile()) SESSION_FACTORY = create_scoped_session_factory(ENGINE, expire_on_commit=True) return SESSION_FACTORY()
def __init__(self, section, uuid, subfolder=os.curdir): """ Initializes the object by pointing it to a folder in the repository. Pass the uuid as a string. """ if section not in VALID_SECTIONS: retstr = ( f"Repository section '{section}' not allowed. Valid sections are: {','.join(VALID_SECTIONS)}" ) raise ValueError(retstr) self._section = section self._uuid = uuid # If you want to change the sharding scheme, this is the only place # where changes should be needed FOR NODES AND WORKFLOWS # Of course, remember to migrate data! # We set a sharding of level 2+2 # Note that a similar sharding should probably has to be done # independently for calculations sent to remote computers in the # execmanager. # Note: I don't do any os.path.abspath (that internally calls # normpath, that may be slow): this is done abywat by the super # class. entity_dir = os.path.join(get_profile().repository_path, 'repository', str(section), str(uuid)[:2], str(uuid)[2:4], str(uuid)[4:]) dest = os.path.join(entity_dir, str(subfolder)) # Internal variable of this class self._subfolder = subfolder # This will also do checks on the folder limits super().__init__(abspath=dest, folder_limit=entity_dir)
def _get_repository_path(): from aiida.manage.configuration import get_profile return get_profile().repository_path
def run_materialsproject_api_tests(): from aiida.manage.configuration import get_profile profile = get_profile() return profile.dictionary.get('run_materialsproject_api_tests', False)
# # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=import-error, no-name-in-module """ Django settings for the AiiDA project. """ from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.dialects.postgresql import UUID from aiida.common import exceptions from aiida.common.timezone import get_current_timezone from aiida.manage.configuration import get_profile, settings try: PROFILE = get_profile() except exceptions.MissingConfigurationError as exception: raise exceptions.MissingConfigurationError( 'the configuration could not be loaded: {}'.format(exception)) if PROFILE is None: raise exceptions.ProfileConfigurationError('no profile has been loaded') if PROFILE.database_backend != 'django': raise exceptions.ProfileConfigurationError( 'incommensurate database backend `{}` for profile `{}`'.format( PROFILE.database_backend, PROFILE.name)) PROFILE_CONF = PROFILE.dictionary DATABASES = {