Пример #1
0
def load_documentation_profile():
    """Load a dummy profile just for the purposes of being able to build the documentation.

    The building of the documentation will require importing the `aiida` package and some code will try to access the
    loaded configuration and profile, which if not done will except. On top of that, Django will raise an exception if
    the database models are loaded before its settings are loaded. This also is taken care of by loading a Django
    profile and loading the corresponding backend. Calling this function will perform all these requirements allowing
    the documentation to be built without having to install and configure AiiDA nor having an actual database present.
    """
    import tempfile
    from aiida.manage.manager import get_manager
    from .config import Config
    from .profile import Profile

    global PROFILE
    global CONFIG

    with tempfile.NamedTemporaryFile() as handle:
        profile_name = 'readthedocs'
        profile = {
            'AIIDADB_ENGINE': 'postgresql_psycopg2',
            'AIIDADB_BACKEND': 'django',
            'AIIDADB_PORT': 5432,
            'AIIDADB_HOST': 'localhost',
            'AIIDADB_NAME': 'aiidadb',
            'AIIDADB_PASS': '******',
            'AIIDADB_USER': '******',
            'AIIDADB_REPOSITORY_URI': 'file:///dev/null',
        }
        config = {'default_profile': profile_name, 'profiles': {profile_name: profile}}
        PROFILE = Profile(profile_name, profile, from_config=True)
        CONFIG = Config(handle.name, config)
        get_manager()._load_backend(schema_check=False)  # pylint: disable=protected-access
Пример #2
0
def load_backend_if_not_loaded():
    """Load the current profile if necessary while running the spinner to show command hasn't crashed."""
    from aiida.manage.configuration import load_profile
    from aiida.manage.manager import get_manager
    with spinner():
        load_profile()
        get_manager().get_backend()
    def store_attributes(self):
        """Store new attributes in Node extras and reset self._node"""
        if self.new_attributes:
            optimade = self._get_optimade_extras()
            if optimade:
                optimade.update(self.new_attributes)
            else:
                optimade = self.new_attributes
            extras = (self._get_unique_node_property("extras")
                      if self._get_unique_node_property("extras") else {})
            extras["optimade"] = optimade

            profile = get_manager().get_profile()
            if profile.database_backend == "django":
                from aiida.backends.djsite.db.models import DbNode

                with get_manager().get_backend().transaction():
                    DbNode.objects.filter(pk=self._pk).update(extras=extras)
            elif profile.database_backend == "sqlalchemy":
                from aiida.backends.sqlalchemy.models.node import DbNode

                with get_manager().get_backend().transaction() as session:
                    session.query(DbNode).filter(DbNode.id == self._pk).update(
                        values={"extras": extras})
            else:
                raise AiidaError(
                    f'Unknown AiiDA backend "{profile.database_backend}" for profile'
                    f"{profile}")

            # For posterity, this is how to do the same, going through AiiDA's API:
            # self._node.set_extra(self.EXTRAS_KEY, optimade)

        # Lastly, reset NODE in an attempt to remove it from memory
        self._node = None
Пример #4
0
    def wrapper(*args, **kwargs):
        try:
            value = func(*args, **kwargs)
        finally:
            from aiida.manage.manager import get_manager

            get_manager().get_backend().get_session().close()
        return value
Пример #5
0
def submit(process, **inputs):
    """Submit the process with the supplied inputs to the daemon immediately returning control to the interpreter.

    .. warning: this should not be used within another process. Instead, there one should use the `submit` method of
        the wrapping process itself, i.e. use `self.submit`.

    .. warning: submission of processes requires `store_provenance=True`

    :param process: the process class to submit
    :type process: :class:`aiida.engine.Process`

    :param inputs: the inputs to be passed to the process
    :type inputs: dict

    :return: the calculation node of the process
    :rtype: :class:`aiida.orm.ProcessNode`
    """
    assert not is_process_function(process), 'Cannot submit a process function'

    # Submitting from within another process requires `self.submit` unless it is a work function, in which case the
    # current process in the scope should be an instance of `FunctionProcess`
    if is_process_scoped() and not isinstance(Process.current(),
                                              FunctionProcess):
        raise InvalidOperation(
            'Cannot use top-level `submit` from within another process, use `self.submit` instead'
        )

    runner = manager.get_manager().get_runner()
    controller = manager.get_manager().get_process_controller()

    process = instantiate_process(runner, process, **inputs)

    # If a dry run is requested, simply forward to `run`, because it is not compatible with `submit`. We choose for this
    # instead of raising, because in this way the user does not have to change the launcher when testing.
    if process.metadata.get('dry_run', False):
        _, node = run_get_node(process)
        return node

    if not process.metadata.store_provenance:
        raise InvalidOperation(
            'cannot submit a process with `store_provenance=False`')

    runner.persister.save_checkpoint(process)
    process.close()

    # Do not wait for the future's result, because in the case of a single worker this would c**k-block itself
    controller.continue_process(process.pid, nowait=False, no_reply=True)

    return process.node
Пример #6
0
    def setUpClass(cls, *args, **kwargs):
        # Note: this will raise an exception, that will be seen as a test
        # failure. To be safe, you should do the same check also in the tearDownClass
        # to avoid that it is run
        check_if_tests_can_run()

        # Force the loading of the backend which will load the required database environment
        get_manager().get_backend()

        cls.__backend_instance = cls.get_backend_class()()
        cls.__backend_instance.setUpClass_method(*args, **kwargs)
        cls.backend = cls.__backend_instance.backend
        cls.insert_data()

        cls._class_was_setup = True
Пример #7
0
    def test_simple_kill_through_process(self):
        """
        Run the workchain for one step and then kill it. This should have the
        workchain and its children end up in the KILLED state.
        """
        runner = get_manager().get_runner()
        process = TestWorkChainAbortChildren.MainWorkChain(inputs={'kill': Bool(True)})

        @gen.coroutine
        def run_async():
            yield run_until_waiting(process)

            process.kill()

            with self.assertRaises(plumpy.KilledError):
                yield process.future()

        runner.schedule(process)
        runner.loop.run_sync(lambda: run_async())

        child = process.node.get_outgoing(link_type=LinkType.CALL_WORK).first().node
        self.assertEqual(child.is_finished_ok, False)
        self.assertEqual(child.is_excepted, False)
        self.assertEqual(child.is_killed, True)

        self.assertEqual(process.node.is_finished_ok, False)
        self.assertEqual(process.node.is_excepted, False)
        self.assertEqual(process.node.is_killed, True)
Пример #8
0
    def test_simple_kill_through_process(self):
        """
        Run the workchain for one step and then kill it by calling kill
        on the workchain itself. This should have the workchain end up
        in the KILLED state.
        """
        runner = get_manager().get_runner()
        process = TestWorkChainAbort.AbortableWorkChain()

        @gen.coroutine
        def run_async():
            yield run_until_paused(process)

            self.assertTrue(process.paused)
            process.kill()

            with self.assertRaises(plumpy.ClosedError):
                launch.run(process)

        runner.schedule(process)
        runner.loop.run_sync(lambda: run_async())

        self.assertEqual(process.node.is_finished_ok, False)
        self.assertEqual(process.node.is_excepted, False)
        self.assertEqual(process.node.is_killed, True)
Пример #9
0
    def test_if_block_persistence(self):
        """
        This test was created to capture issue #902
        """
        runner = get_manager().get_runner()
        wc = IfTest()
        runner.schedule(wc)

        @gen.coroutine
        def run_async(workchain):
            yield run_until_paused(workchain)
            self.assertTrue(workchain.ctx.s1)
            self.assertFalse(workchain.ctx.s2)

            # Now bundle the thing
            bundle = plumpy.Bundle(workchain)
            # Need to close the process before recreating a new instance
            workchain.close()

            # Load from saved state
            workchain2 = bundle.unbundle()
            self.assertTrue(workchain2.ctx.s1)
            self.assertFalse(workchain2.ctx.s2)

            bundle2 = plumpy.Bundle(workchain2)
            self.assertDictEqual(bundle, bundle2)

            workchain.play()
            yield workchain.future()
            self.assertTrue(workchain.ctx.s1)
            self.assertTrue(workchain.ctx.s2)

        runner.loop.run_sync(lambda: run_async(wc))
Пример #10
0
def set_process_state_change_timestamp(process):
    """
    Set the global setting that reflects the last time a process changed state, for the process type
    of the given process, to the current timestamp. The process type will be determined based on
    the class of the calculation node it has as its database container.

    :param process: the Process instance that changed its state
    """
    from aiida.common import timezone
    from aiida.common.exceptions import UniquenessError
    from aiida.manage.manager import get_manager  # pylint: disable=cyclic-import
    from aiida.orm import ProcessNode, CalculationNode, WorkflowNode

    if isinstance(process.node, CalculationNode):
        process_type = 'calculation'
    elif isinstance(process.node, WorkflowNode):
        process_type = 'work'
    elif isinstance(process.node, ProcessNode):
        # This will only occur for testing, as in general users cannot launch plain Process classes
        return
    else:
        raise ValueError('unsupported calculation node type {}'.format(type(process.node)))

    key = PROCESS_STATE_CHANGE_KEY.format(process_type)
    description = PROCESS_STATE_CHANGE_DESCRIPTION.format(process_type)
    value = timezone.datetime_to_isoformat(timezone.now())

    try:
        manager = get_manager()
        manager.get_backend_manager().get_settings_manager().set(key, value, description)
    except UniquenessError as exception:
        process.logger.debug('could not update the {} setting because of a UniquenessError: {}'.format(key, exception))
Пример #11
0
    def load_instance_state(self, saved_state, load_context):
        """Load instance state.

        :param saved_state: saved instance state

        :param load_context:
        :type load_context: :class:`!plumpy.persistence.LoadSaveContext`
        """
        from aiida.manage import manager

        if 'runner' in load_context:
            self._runner = load_context.runner
        else:
            self._runner = manager.get_manager().get_runner()

        load_context = load_context.copyextend(loop=self._runner.loop, communicator=self._runner.communicator)
        super().load_instance_state(saved_state, load_context)

        if self.SaveKeys.CALC_ID.value in saved_state:
            self._node = orm.load_node(saved_state[self.SaveKeys.CALC_ID.value])
            self._pid = self.node.pk
        else:
            self._pid = self._create_and_setup_db_record()

        self.node.logger.info(f'Loaded process<{self.node.pk}> from saved state')
Пример #12
0
def start_daemon() -> None:
    """Start a daemon runner for the currently configured profile."""
    daemon_client = get_daemon_client()
    configure_logging(daemon=True, daemon_log_file=daemon_client.daemon_log_file)

    try:
        manager = get_manager()
        runner = manager.create_daemon_runner()
        manager.set_runner(runner)
    except Exception:
        LOGGER.exception('daemon runner failed to start')
        raise

    signals = (signal.SIGTERM, signal.SIGINT)
    for s in signals:  # pylint: disable=invalid-name
        runner.loop.add_signal_handler(s, lambda s=s: asyncio.create_task(shutdown_runner(runner)))

    try:
        LOGGER.info('Starting a daemon runner')
        runner.start()
    except SystemError as exception:
        LOGGER.info('Received a SystemError: %s', exception)
        runner.close()

    LOGGER.info('Daemon runner started')
Пример #13
0
    def __init__(
        self, name, hostname, description='', transport_type='', scheduler_type='', workdir=None, backend=None
    ):
        """Construct a new computer

        :type name: str
        :type hostname: str
        :type description: str
        :type transport_type: str
        :type scheduler_type: str
        :type workdir: str
        :type backend: :class:`aiida.orm.implementation.Backend`

        :rtype: :class:`aiida.orm.Computer`
        """
        # pylint: disable=too-many-arguments
        backend = backend or get_manager().get_backend()
        model = backend.computers.create(
            name=name,
            hostname=hostname,
            description=description,
            transport_type=transport_type,
            scheduler_type=scheduler_type
        )
        super(Computer, self).__init__(model)
        if workdir is not None:
            self.set_workdir(workdir)
Пример #14
0
def detect_invalid_nodes():
    """Scan the database for invalid nodes."""
    from tabulate import tabulate

    from aiida.manage.database.integrity.sql.nodes import INVALID_NODE_SELECT_STATEMENTS
    from aiida.manage.manager import get_manager

    integrity_violated = False

    backend = get_manager().get_backend()

    for check in INVALID_NODE_SELECT_STATEMENTS:

        result = backend.execute_prepared_statement(check.sql,
                                                    check.parameters)

        if result:
            integrity_violated = True
            echo.echo_warning(f'{check.message}:\n')
            echo.echo(tabulate(result, headers=check.headers))

    if not integrity_violated:
        echo.echo_success('no integrity violations detected')
    else:
        echo.echo_critical('one or more integrity violations detected')
Пример #15
0
    def load_instance_state(
            self, saved_state: MutableMapping[str, Any],
            load_context: plumpy.persistence.LoadSaveContext) -> None:
        """Load instance state.

        :param saved_state: saved instance state
        :param load_context:

        """
        from aiida.manage import manager

        if 'runner' in load_context:
            self._runner = load_context.runner
        else:
            self._runner = manager.get_manager().get_runner()

        load_context = load_context.copyextend(
            loop=self._runner.loop, communicator=self._runner.communicator)
        super().load_instance_state(saved_state, load_context)

        if self.SaveKeys.CALC_ID.value in saved_state:
            self._node = orm.load_node(
                saved_state[self.SaveKeys.CALC_ID.value])
            self._pid = self.node.pk  # pylint: disable=attribute-defined-outside-init
        else:
            self._pid = self._create_and_setup_db_record()  # pylint: disable=attribute-defined-outside-init

        self.node.logger.info(
            f'Loaded process<{self.node.pk}> from saved state')
Пример #16
0
    def generate_calcinfo(entry_point_name, folder, inputs=None):
        """Generate a `CalcInfo` instance for testing calculation jobs.

        A new `CalcJob` process instance is instantiated,
        and `prepare_for_submission` is called to populate the supplied folder,
        with raw inputs.

        Parameters
        ----------
        entry_point_name: str
        folder: aiida.common.folders.Folder
        inputs: dict or None

        """
        from aiida.engine.utils import instantiate_process
        from aiida.manage.manager import get_manager
        from aiida.plugins import CalculationFactory

        manager = get_manager()
        runner = manager.get_runner()

        process_class = CalculationFactory(entry_point_name)
        process = instantiate_process(runner, process_class, **inputs)

        calc_info = process.prepare_for_submission(folder)

        return calc_info
Пример #17
0
    def generate_context(wkchain_cls, inputs, outline_steps):
        """instantiate a WorkChain,
        call a list of methods (that should be part of `spec.outline`),
        then return a sanitized version of the workchain context for testing
        """
        from aiida.common.extendeddicts import AttributeDict
        from aiida.engine import ProcessBuilder
        from aiida.engine.utils import instantiate_process
        from aiida.manage.manager import get_manager
        from aiida.orm import Node

        class ContextDumper(yaml.Dumper):
            """Custom yaml dumper for a process context."""
            def represent_data(self, data):
                if isinstance(data, Node):
                    data = str(data.__class__)
                if isinstance(data, AttributeDict):
                    data = dict(data)

                return super(ContextDumper, self).represent_data(data)

        manager = get_manager()
        runner = manager.get_runner()

        if isinstance(inputs, ProcessBuilder):
            wkchain = instantiate_process(runner, inputs)
        else:
            wkchain = instantiate_process(runner, wkchain_cls, **inputs)
        step_outcomes = []
        for step in outline_steps:
            step_outcomes.append(getattr(wkchain, step)())

        context = yaml.dump(wkchain.ctx, Dumper=ContextDumper)
        return wkchain, step_outcomes, yaml.safe_load(context)
Пример #18
0
def detect_duplicate_uuid(table, apply_patch):
    """Detect and fix entities with duplicate UUIDs.

    Before aiida-core v1.0.0, there was no uniqueness constraint on the UUID column of the node table in the database
    and a few other tables as well. This made it possible to store multiple entities with identical UUIDs in the same
    table without the database complaining. This bug was fixed in aiida-core=1.0.0 by putting an explicit uniqueness
    constraint on UUIDs on the database level. However, this would leave databases created before this patch with
    duplicate UUIDs in an inconsistent state. This command will run an analysis to detect duplicate UUIDs in a given
    table and solve it by generating new UUIDs. Note that it will not delete or merge any rows.
    """
    from aiida.manage.database.integrity.duplicate_uuid import deduplicate_uuids
    from aiida.manage.manager import get_manager

    manager = get_manager()
    manager._load_backend(schema_check=False)  # pylint: disable=protected-access

    try:
        messages = deduplicate_uuids(table=table, dry_run=not apply_patch)
    except Exception as exception:  # pylint: disable=broad-except
        echo.echo_critical('integrity check failed: {}'.format(str(exception)))
    else:
        for message in messages:
            echo.echo_info(message)

        if apply_patch:
            echo.echo_success('integrity patch completed')
        else:
            echo.echo_success('dry-run of integrity patch completed')
Пример #19
0
    def __init__(self, **kwargs):
        """
        Initialise the parameters.
        Create the basic query_help
        """

        # basic initialization
        super().__init__(**kwargs)

        self._default_projections = [
            'id', 'label', 'node_type', 'process_type', 'ctime', 'mtime',
            'uuid', 'user_id'
        ]

        # Inspect the subclasses of NodeTranslator, to avoid hard-coding
        # (should resemble the following tree)
        r"""
                                              /- CodeTranslator
                                             /
                                            /- KpointsTranslator
                                           /
                           /- DataTranslator -- StructureTranslator
                          /                \
                         /                  \- BandsTranslator
                        /
        NodeTranslator
                        \
                         \- CalculationTranslator
        """

        self._subclasses = self._get_subclasses()
        self._backend = get_manager().get_backend()
Пример #20
0
def delete_nodes_and_connections_sqla(pks_to_delete):  # pylint: disable=invalid-name
    """
    Delete all nodes corresponding to pks in the input.
    :param pks_to_delete: A list, tuple or set of pks that should be deleted.
    """
    # pylint: disable=no-value-for-parameter
    from aiida.backends.sqlalchemy.models.node import DbNode, DbLink
    from aiida.backends.sqlalchemy.models.group import table_groups_nodes
    from aiida.manage.manager import get_manager

    backend = get_manager().get_backend()

    with backend.transaction() as session:
        # I am first making a statement to delete the membership of these nodes to groups.
        # Since table_groups_nodes is a sqlalchemy.schema.Table, I am using expression language to compile
        # a stmt to be executed by the session. It works, but it's not nice that two different ways are used!
        # Can this be changed?
        stmt = table_groups_nodes.delete().where(
            table_groups_nodes.c.dbnode_id.in_(list(pks_to_delete)))
        session.execute(stmt)
        # First delete links, then the Nodes, since we are not cascading deletions.
        # Here I delete the links coming out of the nodes marked for deletion.
        session.query(DbLink).filter(DbLink.input_id.in_(
            list(pks_to_delete))).delete(synchronize_session='fetch')
        # Here I delete the links pointing to the nodes marked for deletion.
        session.query(DbLink).filter(DbLink.output_id.in_(
            list(pks_to_delete))).delete(synchronize_session='fetch')
        # Now I am deleting the nodes
        session.query(DbNode).filter(DbNode.id.in_(
            list(pks_to_delete))).delete(synchronize_session='fetch')
Пример #21
0
    def __init__(self, inputs=None, logger=None, runner=None, parent_pid=None, enable_persistence=True):
        """ Process constructor.

        :param inputs: process inputs
        :type inputs: dict

        :param logger: aiida logger
        :type logger: :class:`logging.Logger`

        :param runner: process runner
        :type: :class:`aiida.engine.runners.Runner`

        :param parent_pid: id of parent process
        :type parent_pid: int

        :param enable_persistence: whether to persist this process
        :type enable_persistence: bool
        """
        from aiida.manage import manager

        self._runner = runner if runner is not None else manager.get_manager().get_runner()

        super().__init__(
            inputs=self.spec().inputs.serialize(inputs),
            logger=logger,
            loop=self._runner.loop,
            communicator=self.runner.communicator)

        self._node = None
        self._parent_pid = parent_pid
        self._enable_persistence = enable_persistence
        if self._enable_persistence and self.runner.persister is None:
            self.logger.warning('Disabling persistence, runner does not have a persister')
            self._enable_persistence = False
Пример #22
0
def get_num_workers():
    """
    Get the number of active daemon workers from the circus client
    """
    from aiida.common.exceptions import CircusCallError
    from aiida.manage.manager import get_manager

    manager = get_manager()
    client = manager.get_daemon_client()

    if client.is_daemon_running:
        response = client.get_numprocesses()
        if response['status'] != 'ok':
            if response['status'] == client.DAEMON_ERROR_TIMEOUT:
                raise CircusCallError(
                    'verdi thought the daemon was alive, but the call to the daemon timed-out'
                )
            elif response['status'] == client.DAEMON_ERROR_NOT_RUNNING:
                raise CircusCallError(
                    'verdi thought the daemon was running, but really it is not'
                )
            else:
                raise CircusCallError
        try:
            return response['numprocesses']
        except KeyError:
            raise CircusCallError(
                'Circus did not return the number of daemon processes')
Пример #23
0
    def get_creation_statistics(self, user_pk=None):
        """
        Return a dictionary with the statistics of node creation, summarized by day,
        optimized for the Django backend.

        :note: Days when no nodes were created are not present in the returned `ctime_by_day` dictionary.

        :param user_pk: If None (default), return statistics for all users.
            If user pk is specified, return only the statistics for the given user.

        :return: a dictionary as
            follows::

                {
                   "total": TOTAL_NUM_OF_NODES,
                   "types": {TYPESTRING1: count, TYPESTRING2: count, ...},
                   "ctime_by_day": {'YYYY-MMM-DD': count, ...}

            where in `ctime_by_day` the key is a string in the format 'YYYY-MM-DD' and the value is
            an integer with the number of nodes created that day."""
        # pylint: disable=no-member
        import sqlalchemy as sa
        import aiida.backends.djsite.db.models as djmodels
        from aiida.manage.manager import get_manager
        backend = get_manager().get_backend()

        # Get the session (uses internally aldjemy - so, sqlalchemy) also for the Djsite backend
        session = backend.get_session()

        retdict = {}

        total_query = session.query(djmodels.DbNode.sa)
        types_query = session.query(
            djmodels.DbNode.sa.node_type.label('typestring'),
            sa.func.count(djmodels.DbNode.sa.id))
        stat_query = session.query(
            sa.func.date_trunc('day', djmodels.DbNode.sa.ctime).label('cday'),
            sa.func.count(djmodels.DbNode.sa.id))

        if user_pk is not None:
            total_query = total_query.filter(
                djmodels.DbNode.sa.user_id == user_pk)
            types_query = types_query.filter(
                djmodels.DbNode.sa.user_id == user_pk)
            stat_query = stat_query.filter(
                djmodels.DbNode.sa.user_id == user_pk)

        # Total number of nodes
        retdict['total'] = total_query.count()

        # Nodes per type
        retdict['types'] = dict(types_query.group_by('typestring').all())

        # Nodes created per day
        stat = stat_query.group_by('cday').order_by('cday').all()

        ctime_by_day = {_[0].strftime('%Y-%m-%d'): _[1] for _ in stat}
        retdict['ctime_by_day'] = ctime_by_day

        return retdict
Пример #24
0
def start_daemon():
    """Start a daemon runner for the currently configured profile."""
    daemon_client = get_daemon_client()
    configure_logging(daemon=True,
                      daemon_log_file=daemon_client.daemon_log_file)

    try:
        manager = get_manager()
        runner = manager.create_daemon_runner()
        manager.set_runner(runner)
    except Exception as exception:
        LOGGER.exception('daemon runner failed to start')
        raise

    def shutdown_daemon(_num, _frame):
        LOGGER.info('Received signal to shut down the daemon runner')
        runner.close()

    signal.signal(signal.SIGINT, shutdown_daemon)
    signal.signal(signal.SIGTERM, shutdown_daemon)

    LOGGER.info('Starting a daemon runner')

    try:
        runner.start()
    except SystemError as exception:
        LOGGER.info('Received a SystemError: %s', exception)
        runner.close()

    LOGGER.info('Daemon runner stopped')