示例#1
0
 def _init_entry_points(self):
     """
     Populate entry point information that will be used later on.  This should only be called
     once in the constructor after setting self.groups because the groups should not be changed
     after instantiation
     """
     self._entry_points = [(group, entry_point) for group in self.groups
                           for entry_point in get_entry_points(group)]
     self._entry_point_names = [
         entry_point.name for group in self.groups
         for entry_point in get_entry_points(group)
     ]
    def test_existing_workflows(self):
        """
        Test listing all preinstalled workflows
        """
        entry_points = get_entry_points('aiida.workflows')
        self.assertIsInstance(entry_points, list)

        for entry_point in entry_points:
            cls = WorkflowFactory(entry_point.name)
            self.assertTrue(issubclass(cls, (Workflow, WorkChain)),
                'Workflow plugin class {} is neither a subclass of {} nor {}'.format(cls, Workflow, WorkChain))
    def test_existing_schedulers(self):
        """
        Test listing all preinstalled schedulers
        """
        entry_points = get_entry_points('aiida.schedulers')
        self.assertIsInstance(entry_points, list)

        for entry_point in entry_points:
            cls = SchedulerFactory(entry_point.name)
            self.assertTrue(issubclass(cls, Scheduler),
                'Scheduler plugin class {} is not subclass of {}'.format(cls, Scheduler))
    def test_existing_transports(self):
        """
        Test listing all preinstalled transports
        """
        entry_points = get_entry_points('aiida.transports')
        self.assertIsInstance(entry_points, list)

        for entry_point in entry_points:
            cls = TransportFactory(entry_point.name)
            self.assertTrue(issubclass(cls, Transport),
                'Transport plugin class {} is not subclass of {}'.format(cls, Transport))
    def test_existing_data(self):
        """
        Test listing all preinstalled data classes
        """
        entry_points = get_entry_points('aiida.data')
        self.assertIsInstance(entry_points, list)

        for entry_point in entry_points:
            cls = DataFactory(entry_point.name)
            self.assertTrue(issubclass(cls, Data),
                'Data plugin class {} is not subclass of {}'.format(cls, Data))
    def test_existing_calculations(self):
        """
        Test listing all preinstalled calculations
        """
        entry_points = get_entry_points('aiida.calculations')
        self.assertIsInstance(entry_points, list)

        for entry_point in entry_points:
            cls = CalculationFactory(entry_point.name)
            self.assertTrue(issubclass(cls, Calculation),
                'Calculation plugin class {} is not subclass of {}'.format(cls, Calculation))
    def test_existing_dbimporters(self):
        """
        Test listing all preinstalled dbimporter plugins
        """
        entry_points = get_entry_points('aiida.tools.dbimporters')
        self.assertIsInstance(entry_points, list)

        for entry_point in entry_points:
            cls = DbImporterFactory(entry_point.name)
            self.assertTrue(issubclass(cls, DbImporter),
                'DbImporter plugin class {} is not subclass of {}'.format(cls, BaseTcodtranslator))
    def test_calculation_plugins(self):
        """Test verdi calculation plugins"""
        from aiida.plugins.entry_point import get_entry_points

        calculation_plugins = get_entry_points('aiida.calculations')

        result = self.cli_runner.invoke(command.calculation_plugins,
                                        ['non_existent'])
        self.assertIsNotNone(result.exception)

        result = self.cli_runner.invoke(command.calculation_plugins,
                                        ['simpleplugins.arithmetic.add'])
        self.assertIsNone(result.exception)
        self.assertTrue(len(get_result_lines(result)) > 0)

        result = self.cli_runner.invoke(command.calculation_plugins)
        self.assertIsNone(result.exception)
        self.assertTrue(
            len(get_result_lines(result)) > len(calculation_plugins))
示例#9
0
    def test_data_exporters(self):
        """Verify that the return value of the export methods of all `Data` sub classes have the correct type.

        It should be a tuple where the first should be a byte string and the second a dictionary.
        """
        from aiida.plugins.entry_point import get_entry_points

        for entry_point in get_entry_points('aiida.data'):

            data_class = entry_point.load()
            export_formats = data_class.get_export_formats()

            if not export_formats:
                continue

            instance = self.generate_class_instance(data_class)

            for fileformat in export_formats:
                content, dictionary = instance._exportcontent(fileformat)  # pylint: disable=protected-access
                self.assertIsInstance(content, bytes)
                self.assertIsInstance(dictionary, dict)
示例#10
0
    if as_option_string:
        for option in option_list:
            t_opt = transport_cls.auth_options[option.name]
            if config.get(option.name) or config.get(option.name) is False:
                if t_opt.get('switch'):
                    option_value = option.opts[-1] if config.get(
                        option.name
                    ) else f"--no-{option.name.replace('_', '-')}"
                elif t_opt.get('is_flag'):
                    is_default = config.get(
                        option.name) == transport_cli.transport_option_default(
                            option.name, computer)
                    option_value = option.opts[-1] if is_default else ''
                else:
                    option_value = f'{option.opts[-1]}={option.type(config[option.name])}'
                option_items.append(option_value)
        opt_string = ' '.join(option_items)
        echo.echo(escape_for_bash(opt_string))
    else:
        table = []
        for name in transport_cls.get_valid_auth_params():
            if name in config:
                table.append((f'* {name}', config[name]))
            else:
                table.append((f'* {name}', '-'))
        echo.echo(tabulate.tabulate(table, tablefmt='plain'))


for ep in get_entry_points('aiida.transports'):
    computer_configure.add_command(transport_cli.create_configure_cmd(ep.name))
示例#11
0
    def get_projectable_properties(self):
        """
        Get projectable properties specific for Computer
        :return: dict of projectable properties and column_order list
        """
        from aiida.plugins.entry_point import get_entry_points
        from aiida.common.exceptions import EntryPointError

        schedulers = {}
        for entry_point in get_entry_points('aiida.schedulers'):
            try:
                schedulers[entry_point.name] = {'doc': entry_point.load().__doc__}
            except EntryPointError:
                continue

        transports = {}
        for entry_point in get_entry_points('aiida.transports'):
            try:
                transports[entry_point.name] = {'doc': entry_point.load().__doc__}
            except EntryPointError:
                continue

        projectable_properties = {
            'description': {
                'display_name': 'Description',
                'help_text': 'short description of the Computer',
                'is_foreign_key': False,
                'type': 'str',
                'is_display': False
            },
            'hostname': {
                'display_name': 'Host',
                'help_text': 'Name of the host',
                'is_foreign_key': False,
                'type': 'str',
                'is_display': True
            },
            'id': {
                'display_name': 'Id',
                'help_text': 'Id of the object',
                'is_foreign_key': False,
                'type': 'int',
                'is_display': False
            },
            'name': {
                'display_name': 'Name',
                'help_text': 'Name of the object',
                'is_foreign_key': False,
                'type': 'str',
                'is_display': True
            },
            'scheduler_type': {
                'display_name': 'Scheduler',
                'help_text': 'Scheduler type',
                'is_foreign_key': False,
                'type': 'str',
                'valid_choices': schedulers,
                'is_display': True
            },
            'transport_type': {
                'display_name': 'Transport type',
                'help_text': 'Transport Type',
                'is_foreign_key': False,
                'type': 'str',
                'valid_choices': transports,
                'is_display': False
            },
            'uuid': {
                'display_name': 'Unique ID',
                'help_text': 'Universally Unique Identifier',
                'is_foreign_key': False,
                'type': 'unicode',
                'is_display': True
            }
        }

        # Note: final schema will contain details for only the fields present in column order
        column_order = ['uuid', 'name', 'hostname', 'description', 'scheduler_type', 'transport_type']

        return projectable_properties, column_order
示例#12
0
def infer_calculation_entry_point(type_strings):
    """Try to infer a calculation entry point name for all the calculation type strings that are found in the database.

    Before the plugin system was introduced, the `type` column of the node table was a string based on the base node
    type with the module path and class name appended. For example, for the `PwCalculation` class, which was a sub
    class of `JobCalculation`, would get `calculation.job.quantumespresso.pw.PwCalculation.` as its `type` string.
    At this point, the `JobCalculation` also still fullfilled the role of both the `Process` class as well as the
    `Node` class. In the migration for `v1.0.0`, this had to be migrated, where the `type` became that of the actual
    node i.e. `node.process.calculation.calcjob.CalcJobNode.` which would lose the information of which actual sub class
    it represented. This information should be stored in the `process_type` column, where the value is the name of the
    entry point of that calculation class.

    This function will, for a given set of calculation type strings of pre v1.0.0, try to map them on the known entry
    points for the calculation category. This is the union of those entry points registered at the AiiDA registry (see
    the mapping above) and those available in the environment in which this function is ran.

    If a type string cannot be mapped onto an entry point name, a fallback `process_type` string will be generated
    which is based on part of the old `type` string. For example, `calculation.job.unknown.UnknownCalculation.` would
    get the process type string `~unknown.UnknownCalculation`.

    The function will return a mapping of type strings onto their inferred process type strings.

    :param type_strings: a set of type strings whose entry point is to be inferred
    :return: a mapping of current node type string to the inferred entry point name
    """
    from reentry.entrypoint import EntryPoint
    from aiida.plugins.entry_point import get_entry_points

    prefix_calc_job = 'calculation.job.'
    entry_point_group = 'aiida.calculations'

    entry_point_names = []
    mapping_node_type_to_entry_point = {}

    # Build the list of known entry points, joining those present in the environment with the hard-coded set from taken
    # from the aiida-registry. Note that if entry points with the same name are found in both sets, the entry point
    # from the local environment is kept as leading.
    entry_points_local = get_entry_points(group=entry_point_group)
    entry_points_registry = [EntryPoint.parse(entry_point) for entry_point in registered_calculation_entry_points]

    entry_points = entry_points_local
    entry_point_names = [entry_point.name for entry_point in entry_points]

    for entry_point in entry_points_registry:
        if entry_point.name not in entry_point_names:
            entry_point_names.append(entry_point.name)

    for type_string in type_strings:

        # If it does not start with the calculation job prefix, it cannot possibly reference a calculation plugin
        if not type_string.startswith(prefix_calc_job):
            continue

        plugin_string = type_string[len(prefix_calc_job):]
        plugin_parts = [part for part in plugin_string.split('.') if part]
        plugin_class = plugin_parts.pop()
        inferred_entry_point_name = '.'.join(plugin_parts)

        if inferred_entry_point_name in entry_point_names:
            entry_point_string = '{entry_point_group}:{entry_point_name}'.format(
                entry_point_group=entry_point_group, entry_point_name=inferred_entry_point_name
            )
        elif inferred_entry_point_name:
            entry_point_string = '{plugin_name}.{plugin_class}'.format(
                plugin_name=inferred_entry_point_name, plugin_class=plugin_class
            )
        else:
            # If there is no inferred entry point name, i.e. there is no module name, use an empty string as fall back
            # This should only be the case for the type string `calculation.job.JobCalculation.`
            entry_point_string = ''

        mapping_node_type_to_entry_point[type_string] = entry_point_string

    return mapping_node_type_to_entry_point