Exemplo n.º 1
0
        class FieldTester:
            field = fields.TypedField('field', ClassA)
            field_maybe_none = fields.TypedField('field_maybe_none',
                                                 ClassA,
                                                 allow_none=True)

            def __init__(self, value):
                self.field = value
Exemplo n.º 2
0
class Singularity(ContainerPlatform):
    '''Container platform backend for running containers with `Singularity
    <https://sylabs.io/>`__.'''

    #: Enable CUDA support when launching the container.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    with_cuda = fields.TypedField(bool)

    def __init__(self):
        super().__init__()
        self.with_cuda = False

    def emit_prepare_commands(self, stagedir):
        return []

    def launch_command(self, stagedir):
        super().launch_command(stagedir)
        mount_points = self.mount_points + [(stagedir, _STAGEDIR_MOUNT)]
        run_opts = [f'-B"{mp[0]}:{mp[1]}"' for mp in mount_points]
        if self.with_cuda:
            run_opts.append('--nv')

        run_opts += self.options
        if self.command:
            return (f'singularity exec {" ".join(run_opts)} '
                    f'{self.image} {self.command}')

        if self.commands:
            return (f"singularity exec {' '.join(run_opts)} {self.image} "
                    f"bash -c 'cd {self.workdir}; {'; '.join(self.commands)}'")

        return f'singularity run {" ".join(run_opts)} {self.image}'
Exemplo n.º 3
0
class Singularity(ContainerPlatform):
    '''Container platform backend for running containers with `Singularity
    <https://sylabs.io/>`__.'''

    #: Enable CUDA support when launching the container.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    with_cuda = fields.TypedField('with_cuda', bool)

    def __init__(self):
        super().__init__()
        self.with_cuda = False

    def emit_prepare_commands(self):
        return []

    def launch_command(self):
        super().launch_command()
        exec_opts = ['-B"%s:%s"' % mp for mp in self.mount_points]
        if self.with_cuda:
            exec_opts.append('--nv')

        run_cmd = 'singularity exec %s %s bash -c ' % (' '.join(exec_opts),
                                                       self.image)
        return run_cmd + "'" + '; '.join(['cd ' + self.workdir] +
                                         self.commands) + "'"
Exemplo n.º 4
0
    class FieldTester:
        value = fields.DeprecatedField(fields.TypedField('value', int),
                                       'value field is deprecated')
        _value = fields.TypedField('value', int)
        ro = fields.DeprecatedField(fields.TypedField('ro', int),
                                    'value field is deprecated',
                                    fields.DeprecatedField.OP_SET)
        _ro = fields.TypedField('ro', int)
        wo = fields.DeprecatedField(fields.TypedField('wo', int),
                                    'value field is deprecated',
                                    fields.DeprecatedField.OP_GET)

        def __init__(self):
            self._value = 1
            self._ro = 2
            self.wo = 3
Exemplo n.º 5
0
class JobLauncher(abc.ABC):
    '''Abstract base class for job launchers.

    A job launcher is the executable that actually launches a distributed
    program to multiple nodes, e.g., ``mpirun``, ``srun`` etc.

    .. warning::

       Users may not create job launchers directly.

    .. note::
       .. versionchanged:: 2.8
          Job launchers do not get a reference to a job during their
          initialization.

    '''

    #: List of options to be passed to the job launcher invocation.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    options = fields.TypedField('options', typ.List[str])

    def __init__(self):
        self.options = []

    @abc.abstractmethod
    def command(self, job):
        # The launcher command to be emitted for ``job``
        pass

    def run_command(self, job):
        return ' '.join(self.command(job) + self.options)
Exemplo n.º 6
0
class Sarus(ContainerPlatform):
    '''Container platform backend for running containers with `Sarus
    <https://sarus.readthedocs.io>`__.'''

    #: Enable MPI support when launching the container.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    with_mpi = fields.TypedField('with_mpi', bool)

    def __init__(self):
        super().__init__()
        self.with_mpi = False
        self._command = 'sarus'

    def emit_prepare_commands(self):
        return [self._command + ' pull %s' % self.image]

    def launch_command(self):
        super().launch_command()
        run_opts = [
            '--mount=type=bind,source="%s",destination="%s"' % mp
            for mp in self.mount_points
        ]
        if self.with_mpi:
            run_opts.append('--mpi')

        run_cmd = self._command + ' run %s %s bash -c ' % (' '.join(run_opts),
                                                           self.image)
        return run_cmd + "'" + '; '.join(['cd ' + self.workdir] +
                                         self.commands) + "'"
Exemplo n.º 7
0
class JobLauncher(abc.ABC):
    '''A job launcher.

    A job launcher is the executable that actually launches a distributed
    program to multiple nodes, e.g., ``mpirun``, ``srun`` etc.

    .. note::

       Users cannot create job launchers directly. You may retrieve a
       registered launcher backend through the
       :func:`reframe.core.launchers.registry.getlauncher` function.

    .. note::
       .. versionchanged:: 2.8
          Job launchers do not get a reference to a job during their
          initialization.

    '''

    #: List of options to be passed to the job launcher invocation.
    #:
    #: :type: :class:`list` of :class:`str`
    #: :default: ``[]``
    options = fields.TypedField('options', typ.List[str])

    def __init__(self):
        self.options = []

    @abc.abstractmethod
    def command(self, job):
        # The launcher command to be emitted for ``job``
        pass

    def run_command(self, job):
        return ' '.join(self.command(job) + self.options)
Exemplo n.º 8
0
class ConfigureBasedBuildSystem(BuildSystem):
    '''Abstract base class for configured-based build systems.'''

    #: The top-level directory of the code.
    #:
    #: This is set automatically by the framework based on the
    #: :attr:`reframe.core.pipeline.RegressionTest.sourcepath` attribute.
    #:
    #: :type: :class:`str`
    #: :default: :class:`None`
    srcdir = fields.TypedField(str, type(None))

    #: The CMake build directory, where all the generated files will be placed.
    #:
    #: :type: :class:`str`
    #: :default: :class:`None`
    builddir = fields.TypedField(str, type(None))

    #: Additional configuration options to be passed to the CMake invocation.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    config_opts = fields.TypedField(typ.List[str])

    #: Options to be passed to the subsequent ``make`` invocation.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    make_opts = fields.TypedField(typ.List[str])

    #: Same as for the :attr:`Make` build system.
    #:
    #: :type: integer
    #: :default: ``1``
    max_concurrency = fields.TypedField(int, type(None))

    def __init__(self):
        super().__init__()
        self.srcdir = None
        self.builddir = None
        self.config_opts = []
        self.make_opts = []
        self.max_concurrency = 1
Exemplo n.º 9
0
class Sarus(ContainerPlatform):
    '''Container platform backend for running containers with `Sarus
    <https://sarus.readthedocs.io>`__.'''

    #: Enable MPI support when launching the container.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    with_mpi = fields.TypedField(bool)

    def __init__(self):
        super().__init__()
        self.with_mpi = False
        self._command = 'sarus'

    def emit_prepare_commands(self, stagedir):
        # The format that Sarus uses to call the images is
        # <reposerver>/<user>/<image>:<tag>. If an image was loaded
        # locally from a tar file, the <reposerver> is 'load'.
        if (not self.pull_image or not self.image
                or self.image.startswith('load/')):
            return []
        else:
            return [f'{self._command} pull {self.image}']

    def launch_command(self, stagedir):
        super().launch_command(stagedir)
        mount_points = self.mount_points + [(stagedir, _STAGEDIR_MOUNT)]
        run_opts = [
            f'--mount=type=bind,source="{mp[0]}",destination="{mp[1]}"'
            for mp in mount_points
        ]
        if self.with_mpi:
            run_opts.append('--mpi')

        if self.workdir:
            run_opts.append(f'-w {self.workdir}')

        run_opts += self.options
        if self.command:
            return (f'{self._command} run {" ".join(run_opts)} {self.image} '
                    f'{self.command}')

        if self.commands:
            return (f"{self._command} run {' '.join(run_opts)} {self.image} "
                    f"bash -c '{'; '.join(self.commands)}'")

        return f'{self._command} run {" ".join(run_opts)} {self.image}'
Exemplo n.º 10
0
class JobLauncher(abc.ABC):
    '''Abstract base class for job launchers.

    A job launcher is the executable that actually launches a distributed
    program to multiple nodes, e.g., ``mpirun``, ``srun`` etc.

    .. warning::

       Users may not create job launchers directly.

    .. note::
       .. versionchanged:: 2.8
          Job launchers do not get a reference to a job during their
          initialization.

    '''

    #: List of options to be passed to the job launcher invocation.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    options = fields.TypedField(typ.List[str])

    def __init__(self):
        self.options = []

    @abc.abstractmethod
    def command(self, job):
        '''The launcher command to be emitted for a specific job.

        Launcher backends provide concrete implementations of this method.

        :param job: A job descriptor.
        :returns: the basic launcher command as a list of tokens.
        '''

    def run_command(self, job):
        '''The full launcher command to be emitted for a specific job.

        This includes any user options.

        :param job: a job descriptor.
        :returns: the launcher command as a string.
        '''
        return ' '.join(self.command(job) + self.options)
Exemplo n.º 11
0
class JobLauncher(abc.ABC):
    '''A job launcher.

    A job launcher is the executable that actually launches a distributed
    program to multiple nodes, e.g., ``mpirun``, ``srun`` etc.

    .. note::
       This is an abstract class.
       Regression tests may not instantiate this class directly.

    .. note::
       .. versionchanged:: 2.8
          Job launchers do not get a reference to a job during their
          initialization.
    '''

    #: List of options to be passed to the job launcher invocation.
    #:
    #: :type: :class:`list` of :class:`str`
    #: :default: ``[]``
    options = fields.TypedField('options', typ.List[str])

    def __init__(self, options=[]):
        self.options = list(options)

    @abc.abstractmethod
    def command(self, job):
        '''The launcher command.

        :arg job: A :class:`reframe.core.schedulers.Job` that will be used by
            this launcher to properly emit its options.
            Subclasses may override this method and emit options according the
            number of tasks associated to the job etc.
        :returns: a list of command line arguments (including the launcher
            executable).
        '''

    def run_command(self, job):
        return ' '.join(self.command(job) + self.options)
Exemplo n.º 12
0
class Sarus(ContainerPlatform):
    '''Container platform backend for running containers with `Sarus
    <https://sarus.readthedocs.io>`__.'''

    #: Enable MPI support when launching the container.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    with_mpi = fields.TypedField('with_mpi', bool)

    def __init__(self):
        super().__init__()
        self.with_mpi = False
        self._command = 'sarus'

    def emit_prepare_commands(self):
        # The format that Sarus uses to call the images is
        # <reposerver>/<user>/<image>:<tag>. If an image was loaded
        # locally from a tar file, the <reposerver> is 'load'.
        if self.image.startswith('load/'):
            return []

        return [self._command + ' pull %s' % self.image]

    def launch_command(self):
        super().launch_command()
        run_opts = [
            '--mount=type=bind,source="%s",destination="%s"' % mp
            for mp in self.mount_points
        ]
        if self.with_mpi:
            run_opts.append('--mpi')

        run_opts += self.options
        run_cmd = self._command + ' run %s %s bash -c ' % (' '.join(run_opts),
                                                           self.image)
        return run_cmd + "'" + '; '.join(['cd ' + self.workdir] +
                                         self.commands) + "'"
Exemplo n.º 13
0
class ProgEnvironment(Environment):
    '''A class representing a programming environment.

    This type of environment adds also attributes for setting the compiler and
    compilation flags.

    If compilation flags are set to :class:`None` (the default, if not set
    otherwise in ReFrame's `configuration
    <configure.html#environments-configuration>`__), they are not passed to the
    ``make`` invocation.

    If you want to disable completely the propagation of the compilation flags
    to the ``make`` invocation, even if they are set, you should set the
    :attr:`propagate` attribute to :class:`False`.
    '''

    _cc = fields.TypedField('_cc', str)
    _cxx = fields.TypedField('_cxx', str)
    _ftn = fields.TypedField('_ftn', str)
    _cppflags = fields.TypedField('_cppflags', typ.List[str])
    _cflags = fields.TypedField('_cflags', typ.List[str])
    _cxxflags = fields.TypedField('_cxxflags', typ.List[str])
    _fflags = fields.TypedField('_fflags', typ.List[str])
    _ldflags = fields.TypedField('_ldflags', typ.List[str])

    def __init__(self,
                 name,
                 modules=None,
                 variables=None,
                 cc='cc',
                 cxx='CC',
                 ftn='ftn',
                 nvcc='nvcc',
                 cppflags=None,
                 cflags=None,
                 cxxflags=None,
                 fflags=None,
                 ldflags=None,
                 **kwargs):
        super().__init__(name, modules, variables)
        self._cc = cc
        self._cxx = cxx
        self._ftn = ftn
        self._nvcc = nvcc
        self._cppflags = cppflags or []
        self._cflags = cflags or []
        self._cxxflags = cxxflags or []
        self._fflags = fflags or []
        self._ldflags = ldflags or []

    @property
    def cc(self):
        '''The C compiler of this programming environment.

        :type: :class:`str`
        '''
        return self._cc

    @property
    def cxx(self):
        '''The C++ compiler of this programming environment.

        :type: :class:`str`
        '''
        return self._cxx

    @property
    def ftn(self):
        '''The Fortran compiler of this programming environment.

        :type: :class:`str`
        '''
        return self._ftn

    @property
    def cppflags(self):
        '''The preprocessor flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._cppflags

    @property
    def cflags(self):
        '''The C compiler flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._cflags

    @property
    def cxxflags(self):
        '''The C++ compiler flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._cxxflags

    @property
    def fflags(self):
        '''The Fortran compiler flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._fflags

    @property
    def ldflags(self):
        '''The linker flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._ldflags

    @property
    def nvcc(self):
        return self._nvcc

    def details(self):
        def format_flags(flags):
            if not flags:
                return '<None>'
            else:
                return ' '.join(flags)

        base_details = super().details()
        extra_details = [
            '    CC: %s' % self.cc,
            '    CXX: %s' % self.cxx,
            '    FTN: %s' % self.ftn,
            '    NVCC: %s' % self.nvcc,
            '    CFLAGS: %s' % format_flags(self.cflags),
            '    CXXFLAGS: %s' % format_flags(self.cxxflags),
            '    FFLAGS: %s' % format_flags(self.fflags),
            '    CPPFLAGS: %s' % format_flags(self.cppflags),
            '    LDFLAGS: %s' % format_flags(self.ldflags)
        ]

        return '\n'.join([base_details, '\n'.join(extra_details)])
Exemplo n.º 14
0
class ProgEnvironment(Environment):
    """A class representing a programming environment.

    This type of environment adds also attributes for setting the compiler and
    compilation flags.

    If compilation flags are set to :class:`None` (the default, if not set
    otherwise in ReFrame's `configuration
    <configure.html#environments-configuration>`__), they are not passed to the
    ``make`` invocation.

    If you want to disable completely the propagation of the compilation flags
    to the ``make`` invocation, even if they are set, you should set the
    :attr:`propagate` attribute to :class:`False`.
    """

    _cc = fields.TypedField('_cc', str)
    _cxx = fields.TypedField('_cxx', str)
    _ftn = fields.TypedField('_ftn', str)
    _cppflags = fields.TypedField('_cppflags', typ.List[str], type(None))
    _cflags = fields.TypedField('_cflags', typ.List[str], type(None))
    _cxxflags = fields.TypedField('_cxxflags', typ.List[str], type(None))
    _fflags = fields.TypedField('_fflags', typ.List[str], type(None))
    _ldflags = fields.TypedField('_ldflags', typ.List[str], type(None))

    def __init__(self,
                 name,
                 modules=[],
                 variables={},
                 cc='cc',
                 cxx='CC',
                 ftn='ftn',
                 nvcc='nvcc',
                 cppflags=None,
                 cflags=None,
                 cxxflags=None,
                 fflags=None,
                 ldflags=None,
                 **kwargs):
        super().__init__(name, modules, variables)
        self._cc = cc
        self._cxx = cxx
        self._ftn = ftn
        self._nvcc = nvcc
        self._cppflags = cppflags
        self._cflags = cflags
        self._cxxflags = cxxflags
        self._fflags = fflags
        self._ldflags = ldflags

    @property
    def cc(self):
        """The C compiler of this programming environment.

        :type: :class:`str`
        """
        return self._cc

    @property
    def cxx(self):
        """The C++ compiler of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._cxx

    @property
    def ftn(self):
        """The Fortran compiler of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._ftn

    @property
    def cppflags(self):
        """The preprocessor flags of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._cppflags

    @property
    def cflags(self):
        """The C compiler flags of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._cflags

    @property
    def cxxflags(self):
        """The C++ compiler flags of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._cxxflags

    @property
    def fflags(self):
        """The Fortran compiler flags of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._fflags

    @property
    def ldflags(self):
        """The linker flags of this programming environment.

        :type: :class:`str` or :class:`None`
        """
        return self._ldflags

    @property
    def nvcc(self):
        return self._nvcc
Exemplo n.º 15
0
class ContainerPlatform(abc.ABC):
    '''The abstract base class of any container platform.

    Concrete container platforms inherit from this class and must override the
    :func:`emit_prepare_cmds` and :func:`emit_launch_cmds` abstract functions.
    '''

    registry = fields.TypedField('registry', str, type(None))
    image = fields.TypedField('image', str, type(None))
    requires_mpi = fields.TypedField('requires_mpi', bool)
    commands = fields.TypedField('commands', typ.List[str])
    mount_points = fields.TypedField('mount_points',
                                     typ.List[typ.Tuple[str, str]])
    workdir = fields.TypedField('workdir', str, type(None))

    def __init__(self):
        self.registry = None
        self.image = None
        self.requires_mpi = False
        self.commands = []
        self.mount_points  = []
        self.workdir = None

    @abc.abstractmethod
    def emit_prepare_cmds(self):
        '''Returns commands that are necessary before running with this
        container platform.

        :raises: `ContainerError` in case of errors.

        .. note:
            This method is relevant only to developers of new container
            platforms.
        '''

    @abc.abstractmethod
    def emit_launch_cmds(self):
        '''Returns the command for running with this container platform.

        :raises: `ContainerError` in case of errors.

        .. note:
            This method is relevant only to developers of new container
            platforms.
        '''
        if self.registry:
            self.image = '/'.join([self.registry, self.image])

    @abc.abstractmethod
    def validate(self):
        '''Validates this container platform.

        :raises: `ContainerError` in case of errors.

        .. note:
            This method is relevant only to developers of new container
            platforms.
        '''
        if self.image is None:
            raise ContainerError('no image specified')

        if not self.commands:
            raise ContainerError('no commands specified')
Exemplo n.º 16
0
class Job:
    '''A job descriptor.

    A job descriptor is created by the framework after the "setup" phase and
    is associated with the test.

    .. warning::
       Users may not create a job descriptor directly.

    '''

    num_tasks = fields.TypedField('num_tasks', int)
    num_tasks_per_node = fields.TypedField('num_tasks_per_node', int,
                                           type(None))
    num_tasks_per_core = fields.TypedField('num_tasks_per_core', int,
                                           type(None))
    num_tasks_per_socket = fields.TypedField('num_tasks_per_socket', int,
                                             type(None))

    num_cpus_per_task = fields.TypedField('num_cpus_per_task', int, type(None))
    use_smt = fields.TypedField('use_smt', bool, type(None))
    time_limit = fields.TimerField('time_limit', type(None))

    #: Options to be passed to the backend job scheduler.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    options = fields.TypedField('options', typ.List[str])

    #: The (parallel) program launcher that will be used to launch the
    #: (parallel) executable of this job.
    #:
    #: Users are allowed to explicitly set the current job launcher, but this
    #: is only relevant in rare situations, such as when you want to wrap the
    #: current launcher command. For this specific scenario, you may have a
    #: look at the :class:`reframe.core.launchers.LauncherWrapper` class.
    #:
    #: The following example shows how you can replace the current partition's
    #: launcher for this test with the "local" launcher:
    #:
    #: .. code-block:: python
    #:
    #:    from reframe.core.backends import getlauncher
    #:
    #:    @rfm.run_after('setup')
    #:    def set_launcher(self):
    #:        self.job.launcher = getlauncher('local')()
    #:
    #: :type: :class:`reframe.core.launchers.JobLauncher`
    launcher = fields.TypedField('launcher', JobLauncher)
    scheduler = fields.TypedField('scheduler', JobScheduler)

    #: .. versionadded:: 2.21
    #:
    #: The ID of the current job.
    #:
    #: :type: :class:`int` or :class:`None`.
    jobid = fields.TypedField('jobid', int, type(None))

    #: .. versionadded:: 2.21
    #:
    #: The exit code of the job.
    #:
    #: This may or may not be set depending on the scheduler backend.
    #:
    #: :type: :class:`int` or :class:`None`.
    exitcode = fields.TypedField('exitcode', int, type(None))

    #: .. versionadded:: 2.21
    #:
    #: The state of the job.
    #:
    #: The value of this field is scheduler-specific.
    #:
    #: :type: :class:`str` or :class:`None`.
    state = fields.TypedField('state', str, type(None))

    #: .. versionadded:: 2.17
    #:
    #: The list of node names assigned to this job.
    #:
    #: This attribute is :class:`None` if no nodes are assigned to the job
    #: yet.
    #: This attribute is set reliably only for the ``slurm`` backend, i.e.,
    #: Slurm *with* accounting enabled.
    #: The ``squeue`` scheduler backend, i.e., Slurm *without* accounting,
    #: might not set this attribute for jobs that finish very quickly.
    #: For the ``local`` scheduler backend, this returns an one-element list
    #: containing the hostname of the current host.
    #:
    #: This attribute might be useful in a flexible regression test for
    #: determining the actual nodes that were assigned to the test.
    #: For more information on flexible node allocation, see the
    #: |--flex-alloc-nodes|_ command-line option
    #:
    #: This attribute is *not* supported by the ``pbs`` scheduler backend.
    nodelist = fields.TypedField('nodelist', typ.List[str], type(None))

    # The sched_* arguments are exposed also to the frontend
    def __init__(self,
                 name,
                 workdir='.',
                 script_filename=None,
                 stdout=None,
                 stderr=None,
                 max_pending_time=None,
                 sched_flex_alloc_nodes=None,
                 sched_access=[],
                 sched_account=None,
                 sched_partition=None,
                 sched_reservation=None,
                 sched_nodelist=None,
                 sched_exclude_nodelist=None,
                 sched_exclusive_access=None,
                 sched_options=None):

        # Mutable fields
        self.num_tasks = 1
        self.num_tasks_per_node = None
        self.num_tasks_per_core = None
        self.num_tasks_per_socket = None
        self.num_cpus_per_task = None
        self.use_smt = None
        self.time_limit = None
        self.options = sched_options or []

        # Live job information; to be filled during job's lifetime by the
        # scheduler
        self.jobid = None
        self.exitcode = None
        self.state = None
        self.nodelist = None

        self._name = name
        self._workdir = workdir
        self._script_filename = script_filename or '%s.sh' % name
        self._stdout = stdout or '%s.out' % name
        self._stderr = stderr or '%s.err' % name
        self._max_pending_time = max_pending_time
        self._completion_time = None

        # Backend scheduler related information
        self._sched_flex_alloc_nodes = sched_flex_alloc_nodes
        self._sched_access = sched_access
        self._sched_nodelist = sched_nodelist
        self._sched_exclude_nodelist = sched_exclude_nodelist
        self._sched_partition = sched_partition
        self._sched_reservation = sched_reservation
        self._sched_account = sched_account
        self._sched_exclusive_access = sched_exclusive_access

    @classmethod
    def create(cls, scheduler, launcher, *args, **kwargs):
        ret = Job(*args, **kwargs)
        ret.scheduler, ret.launcher = scheduler, launcher
        return ret

    @property
    def name(self):
        return self._name

    @property
    def workdir(self):
        return self._workdir

    @property
    def max_pending_time(self):
        return self._max_pending_time

    @property
    def script_filename(self):
        return self._script_filename

    @property
    def stdout(self):
        return self._stdout

    @property
    def stderr(self):
        return self._stderr

    @property
    def sched_flex_alloc_nodes(self):
        return self._sched_flex_alloc_nodes

    @property
    def sched_access(self):
        return self._sched_access

    @property
    def sched_nodelist(self):
        return self._sched_nodelist

    @property
    def sched_exclude_nodelist(self):
        return self._sched_exclude_nodelist

    @property
    def sched_partition(self):
        return self._sched_partition

    @property
    def sched_reservation(self):
        return self._sched_reservation

    @property
    def sched_account(self):
        return self._sched_account

    @property
    def sched_exclusive_access(self):
        return self._sched_exclusive_access

    @property
    def completion_time(self):
        return self.scheduler.completion_time(self) or self._completion_time

    def prepare(self, commands, environs=None, **gen_opts):
        environs = environs or []
        if self.num_tasks <= 0:
            num_tasks_per_node = self.num_tasks_per_node or 1
            min_num_tasks = (-self.num_tasks
                             if self.num_tasks else num_tasks_per_node)

            try:
                guessed_num_tasks = self.guess_num_tasks()
            except NotImplementedError as e:
                raise JobError('flexible node allocation is not supported by '
                               'this backend') from e

            if guessed_num_tasks < min_num_tasks:
                raise JobError(
                    'could not satisfy the minimum task requirement: '
                    'required %s, found %s' %
                    (min_num_tasks, guessed_num_tasks))

            self.num_tasks = guessed_num_tasks
            getlogger().debug('flex_alloc_nodes: setting num_tasks to %s' %
                              self.num_tasks)

        with shell.generate_script(self.script_filename,
                                   **gen_opts) as builder:
            builder.write_prolog(self.scheduler.emit_preamble(self))
            builder.write(runtime.emit_loadenv_commands(*environs))
            for c in commands:
                builder.write_body(c)

    def guess_num_tasks(self):
        num_tasks_per_node = self.num_tasks_per_node or 1
        if isinstance(self.sched_flex_alloc_nodes, int):
            if self.sched_flex_alloc_nodes <= 0:
                raise JobError('invalid number of flex_alloc_nodes: %s' %
                               self.sched_flex_alloc_nodes)

            return self.sched_flex_alloc_nodes * num_tasks_per_node

        available_nodes = self.scheduler.allnodes()
        getlogger().debug('flex_alloc_nodes: total available nodes %s ' %
                          len(available_nodes))

        # Try to guess the number of tasks now
        available_nodes = self.scheduler.filternodes(self, available_nodes)
        if self.sched_flex_alloc_nodes == 'idle':
            available_nodes = {n for n in available_nodes if n.is_available()}
            getlogger().debug('flex_alloc_nodes: selecting idle nodes: '
                              'available nodes now: %s' % len(available_nodes))

        return len(available_nodes) * num_tasks_per_node

    def submit(self):
        return self.scheduler.submit(self)

    def wait(self):
        if self.jobid is None:
            raise JobNotStartedError('cannot wait an unstarted job')

        self.scheduler.wait(self)
        self._completion_time = self._completion_time or time.time()

    def cancel(self):
        if self.jobid is None:
            raise JobNotStartedError('cannot cancel an unstarted job')

        return self.scheduler.cancel(self)

    def finished(self):
        if self.jobid is None:
            raise JobNotStartedError('cannot poll an unstarted job')

        done = self.scheduler.finished(self)
        if done:
            self._completion_time = self._completion_time or time.time()

        return done
Exemplo n.º 17
0
class Environment:
    """This class abstracts away an environment to run regression tests.

    It is simply a collection of modules to be loaded and environment variables
    to be set when this environment is loaded by the framework.
    Users may not create or modify directly environments.
    """
    name = fields.TypedField('name', typ.Str[r'(\w|-)+'])
    modules = fields.TypedField('modules', typ.List[str])
    variables = fields.TypedField('variables', typ.Dict[str, str])

    def __init__(self, name, modules=[], variables=[]):
        self._name = name
        self._modules = list(modules)
        self._variables = collections.OrderedDict(variables)
        self._loaded = False
        self._saved_variables = {}
        self._conflicted = []
        self._preloaded = set()
        self._module_ops = []

    @property
    def name(self):
        """The name of this environment.

        :type: :class:`str`
        """
        return self._name

    @property
    def modules(self):
        """The modules associated with this environment.

        :type: :class:`list` of :class:`str`
        """
        return util.SequenceView(self._modules)

    @property
    def variables(self):
        """The environment variables associated with this environment.

        :type: dictionary of :class:`str` keys/values.
        """
        return util.MappingView(self._variables)

    @property
    def is_loaded(self):
        """:class:`True` if this environment is loaded,
        :class:`False` otherwise.
        """
        is_module_loaded = runtime().modules_system.is_module_loaded
        return (all(map(is_module_loaded, self._modules)) and
                all(os.environ.get(k, None) == os_ext.expandvars(v)
                    for k, v in self._variables.items()))

    def load(self):
        # conflicted module list must be filled at the time of load
        rt = runtime()
        for m in self._modules:
            if rt.modules_system.is_module_loaded(m):
                self._preloaded.add(m)

            conflicted = rt.modules_system.load_module(m, force=True)
            for c in conflicted:
                self._module_ops.append(('u', c))

            self._module_ops.append(('l', m))
            self._conflicted += conflicted

        for k, v in self._variables.items():
            if k in os.environ:
                self._saved_variables[k] = os.environ[k]

            os.environ[k] = os_ext.expandvars(v)

        self._loaded = True

    def unload(self):
        if not self._loaded:
            return

        for k, v in self._variables.items():
            if k in self._saved_variables:
                os.environ[k] = self._saved_variables[k]
            elif k in os.environ:
                del os.environ[k]

        # Unload modules in reverse order
        for m in reversed(self._modules):
            if m not in self._preloaded:
                runtime().modules_system.unload_module(m)

        # Reload the conflicted packages, previously removed
        for m in self._conflicted:
            runtime().modules_system.load_module(m)

        self._loaded = False

    def emit_load_commands(self):
        rt = runtime()
        emit_fn = {
            'l': rt.modules_system.emit_load_commands,
            'u': rt.modules_system.emit_unload_commands
        }
        module_ops = self._module_ops or [('l', m) for m in self._modules]

        # Emit module commands
        ret = []
        for op, m in module_ops:
            ret += emit_fn[op](m)

        # Emit variable set commands
        for k, v in self._variables.items():
            ret.append('export %s=%s' % (k, v))

        return ret

    def emit_unload_commands(self):
        rt = runtime()

        # Invert the logic of module operations, since we are unloading the
        # environment
        emit_fn = {
            'l': rt.modules_system.emit_unload_commands,
            'u': rt.modules_system.emit_load_commands
        }

        ret = []
        for var in self._variables.keys():
            ret.append('unset %s' % var)

        if self._module_ops:
            module_ops = reversed(self._module_ops)
        else:
            module_ops = (('l', m) for m in reversed(self._modules))

        for op, m in module_ops:
            ret += emit_fn[op](m)

        return ret

    def __eq__(self, other):
        if not isinstance(other, type(self)):
            return NotImplemented

        return (self._name == other._name and
                set(self._modules) == set(other._modules) and
                self._variables == other._variables)

    def details(self):
        """Return a detailed description of this environment."""
        variables = '\n'.join(' '*8 + '- %s=%s' % (k, v)
                              for k, v in self.variables.items())
        lines = [
            self._name + ':',
            '    modules: ' + ', '.join(self.modules),
            '    variables:' + ('\n' if variables else '') + variables
        ]
        return '\n'.join(lines)

    def __str__(self):
        return self.name

    def __repr__(self):
        ret = "{0}(name='{1}', modules={2}, variables={3})"
        return ret.format(type(self).__name__, self.name,
                          self.modules, self.variables)
Exemplo n.º 18
0
    class B:
        z = fields.TypedField(int)

        def __init__(self, x, y):
            self.x = x
            self.y = y
Exemplo n.º 19
0
class ModulesSystem:
    '''A modules system.'''

    module_map = fields.TypedField(types.Dict[str, types.List[str]])

    @classmethod
    def create(cls, modules_kind=None):
        getlogger().debug(f'Initializing modules system {modules_kind!r}')
        if modules_kind is None or modules_kind == 'nomod':
            return ModulesSystem(NoModImpl())
        elif modules_kind == 'tmod31':
            return ModulesSystem(TMod31Impl())
        elif modules_kind == 'tmod':
            return ModulesSystem(TModImpl())
        elif modules_kind == 'tmod32':
            return ModulesSystem(TModImpl())
        elif modules_kind == 'tmod4':
            return ModulesSystem(TMod4Impl())
        elif modules_kind == 'lmod':
            return ModulesSystem(LModImpl())
        elif modules_kind == 'spack':
            return ModulesSystem(SpackImpl())
        else:
            raise ConfigError('unknown module system: %s' % modules_kind)

    def __init__(self, backend):
        self._backend = backend
        self.module_map = {}

    def resolve_module(self, name):
        '''Resolve module ``name`` in the registered module map.

        :returns: the list of real modules names pointed to by ``name``.
        :raises: :class:`reframe.core.exceptions.ConfigError` if the mapping
            contains a cycle.

        :meta private:
        '''
        ret = OrderedSet()
        visited = set()
        unvisited = [(name, None)]
        path = []
        while unvisited:
            node, parent = unvisited.pop()
            # Adjust the path
            while path and path[-1] != parent:
                path.pop()

            # Handle modules mappings with self loops
            if node == parent:
                ret.add(node)
                continue

            try:
                # We insert the adjacent nodes in reverse order, so as to
                # preserve the DFS access order
                adjacent = reversed(self.module_map[node])
            except KeyError:
                # We have reached a terminal node
                ret.add(node)
            else:
                path.append(node)
                for m in adjacent:
                    if m in path and m != node:
                        raise EnvironError('module cyclic dependency: ' +
                                           '->'.join(path + [m]))
                    if m not in visited:
                        unvisited.append((m, node))

            visited.add(node)

        return list(ret)

    @property
    def backend(self):
        return(self._backend)

    def available_modules(self, substr=None):
        '''Return a list of available modules that contain ``substr`` in their
        name.

        :rtype: List[str]
        '''
        return [str(m) for m in self._backend.available_modules(substr or '')]

    def loaded_modules(self):
        '''Return a list of loaded modules.

        :rtype: List[str]
        '''
        return [str(m) for m in self._backend.loaded_modules()]

    def conflicted_modules(self, name, collection=False, path=None):
        '''Return the list of the modules conflicting with module ``name``.

        If module ``name`` resolves to multiple real modules, then the returned
        list will be the concatenation of the conflict lists of all the real
        modules.

        :arg name: The name of the module.
        :arg collection: The module is a "module collection" (TMod4/LMod only).
        :arg path: The path where the module resides if not in the default
            ``MODULEPATH``.
        :returns: A list of conflicting module names.

        .. versionchanged:: 3.3
           The ``collection`` argument is added.

        .. versionchanged:: 3.5.0
           The ``path`` argument is added.

        '''
        ret = []
        for m in self.resolve_module(name):
            ret += self._conflicted_modules(m, collection, path)

        return ret

    def _conflicted_modules(self, name, collection=False, path=None):
        return [
            str(m)
            for m in self._backend.conflicted_modules(
                Module(name, collection, path)
            )
        ]

    def execute(self, cmd, *args):
        '''Execute an arbitrary module command.

        :arg cmd: The command to execute, e.g., ``load``, ``restore`` etc.
        :arg args: The arguments to pass to the command.
        :returns: The command output.
        '''
        return self._backend.execute(cmd, *args)

    def load_module(self, name, collection=False, path=None, force=False):
        '''Load the module ``name``.

        :arg collection: The module is a "module collection" (TMod4/Lmod only)
        :arg path: The path where the module resides if not in the default
            ``MODULEPATH``.
        :arg force: If set, forces the loading, unloading first any
            conflicting modules currently loaded. If module ``name`` refers to
            multiple real modules, all of the target modules will be loaded.
        :returns: A list of two-element tuples, where each tuple contains the
            module that was loaded and the list of modules that had to be
            unloaded first due to conflicts. This list will be normally of
            size one, but it can be longer if there is mapping that maps
            module ``name`` to multiple other modules.

        .. versionchanged:: 3.3
           - The ``collection`` argument is added.
           - This function now returns a list of tuples.

        .. versionchanged:: 3.5.0
           - The ``path`` argument is added.
           - The ``force`` argument is now the last argument.

        '''
        ret = []
        for m in self.resolve_module(name):
            ret.append((m, self._load_module(m, collection, path, force)))

        return ret

    def _load_module(self, name, collection=False, path=None, force=False):
        module = Module(name, collection, path)
        loaded_modules = self._backend.loaded_modules()
        if module in loaded_modules:
            # Do not try to load the module if it is already present
            return []

        # Get the list of the modules that need to be unloaded
        unload_list = set()
        if force:
            conflict_list = self._backend.conflicted_modules(module)
            unload_list = set(loaded_modules) & set(conflict_list)

        for m in unload_list:
            self._backend.unload_module(m)

        self._backend.load_module(module)
        return [str(m) for m in unload_list]

    def unload_module(self, name, collection=False, path=None):
        '''Unload module ``name``.

        :arg name: The name of the module to unload. If module ``name`` is
            resolved to multiple real modules, all the referred to modules
            will be unloaded in reverse order.
        :arg collection: The module is a "module collection" (TMod4 only)
        :arg path: The path where the module resides if not in the default
            ``MODULEPATH``.

        .. versionchanged:: 3.3
           The ``collection`` argument was added.

        .. versionchanged:: 3.5.0
           The ``path`` argument is added.

        '''
        for m in reversed(self.resolve_module(name)):
            self._unload_module(m, collection, path)

    def _unload_module(self, name, collection=False, path=None):
        self._backend.unload_module(Module(name, collection, path))

    def is_module_loaded(self, name):
        '''Check if module ``name`` is loaded.

        If module ``name`` refers to multiple real modules, this method will
        return :class:`True` only if all the referees are loaded.
        '''
        return all(self._is_module_loaded(m)
                   for m in self.resolve_module(name))

    def _is_module_loaded(self, name):
        return self._backend.is_module_loaded(Module(name))

    def load_mapping(self, mapping):
        '''Update the internal module mappings using a single mapping.

        :arg mapping: a string specifying the module mapping.
            Example syntax: ``'m0: m1 m2'``.

        :meta private:
        '''
        key, *rest = mapping.split(':')
        if len(rest) != 1:
            raise ConfigError('invalid mapping syntax: %s' % mapping)

        key = key.strip()
        values = rest[0].split()
        if not key:
            raise ConfigError('no key found in mapping: %s' % mapping)

        if not values:
            raise ConfigError('no mapping defined for module: %s' % key)

        self.module_map[key] = list(OrderedDict.fromkeys(values))

    def load_mapping_from_file(self, filename):
        '''Update the internal module mappings from mappings read from file.

        :meta private:
        '''
        with open(filename) as fp:
            for lineno, line in enumerate(fp, start=1):
                line = line.strip().split('#')[0]
                if not line:
                    continue

                try:
                    self.load_mapping(line)
                except ConfigError as e:
                    raise ConfigError('%s:%s' % (filename, lineno)) from e

    @property
    def name(self):
        '''The name of this module system.'''
        return self._backend.name()

    @property
    def version(self):
        '''The version of this module system.'''
        return self._backend.version()

    def unload_all(self):
        '''Unload all loaded modules.'''
        return self._backend.unload_all()

    @property
    def searchpath(self):
        '''The module system search path as a list of directories.'''
        return self._backend.searchpath()

    def searchpath_add(self, *dirs):
        '''Add ``dirs`` to the module system search path.'''
        return self._backend.searchpath_add(*dirs)

    def searchpath_remove(self, *dirs):
        '''Remove ``dirs`` from the module system search path.'''
        return self._backend.searchpath_remove(*dirs)

    def change_module_path(self, *dirs):
        return self._backend.change_module_path(*dirs)

    def emit_load_commands(self, name, collection=False, path=None):
        '''Return the appropriate shell commands for loading a module.

        Module mappings are not taken into account by this function.

        :arg name: The name of the module to load.
        :arg collection: The module is a "module collection" (TMod4/LMod only)
        :arg path: The path where the module resides if not in the default
            ``MODULEPATH``.
        :returns: A list of shell commands.

        .. versionchanged:: 3.3
           The ``collection`` argument was added and module mappings are no
           more taken into account by this function.

        .. versionchanged:: 3.5.0
           The ``path`` argument is added.

        '''

        # We don't consider module mappings here, because we cannot treat
        # correctly possible conflicts
        return self._backend.emit_load_instr(Module(name, collection, path))

    def emit_unload_commands(self, name, collection=False, path=None):
        '''Return the appropriate shell commands for unloading a module.

        Module mappings are not taken into account by this function.

        :arg name: The name of the module to unload.
        :arg collection: The module is a "module collection" (TMod4/LMod only)
        :arg path: The path where the module resides if not in the default
            ``MODULEPATH``.
        :returns: A list of shell commands.

        .. versionchanged:: 3.3
           The ``collection`` argument was added and module mappings are no
           more taken into account by this function.

        .. versionchanged:: 3.5.0
           The ``path`` argument is added.

        '''

        # See comment in emit_load_commands()
        return self._backend.emit_unload_instr(Module(name, collection, path))

    def __str__(self):
        return str(self._backend)
Exemplo n.º 20
0
class SystemPartition:
    '''A representation of a system partition inside ReFrame.

    This class is immutable.
    '''

    _name      = fields.TypedField('_name', typ.Str[r'(\w|-)+'])
    _descr     = fields.TypedField('_descr', str)
    _access    = fields.TypedField('_access', typ.List[str])
    _environs  = fields.TypedField('_environs', typ.List[Environment])
    _resources = fields.TypedField('_resources', typ.Dict[str, typ.List[str]])
    _local_env = fields.TypedField('_local_env', Environment, type(None))
    _container_environs = fields.TypedField('_container_environs',
                                            typ.Dict[str, Environment])

    # maximum concurrent jobs
    _max_jobs  = fields.TypedField('_max_jobs', int)

    def __init__(self, name, descr=None, scheduler=None, launcher=None,
                 access=[], environs=[], resources={}, local_env=None,
                 max_jobs=1):
        self._name  = name
        self._descr = descr or name
        self._scheduler = scheduler
        self._launcher  = launcher
        self._access    = list(access)
        self._environs  = list(environs)
        self._resources = dict(resources)
        self._max_jobs  = max_jobs
        self._local_env = local_env
        self._container_environs = {}

        # Parent system
        self._system = None

    @property
    def access(self):
        return utility.SequenceView(self._access)

    @property
    def descr(self):
        '''A detailed description of this partition.'''
        return self._descr

    @property
    def environs(self):
        return utility.SequenceView(self._environs)

    @property
    def container_environs(self):
        return utility.MappingView(self._container_environs)

    @property
    def fullname(self):
        '''Return the fully-qualified name of this partition.

        The fully-qualified name is of the form
        ``<parent-system-name>:<partition-name>``.

        :type: `str`
        '''
        if self._system is None:
            return self._name
        else:
            return '%s:%s' % (self._system.name, self._name)

    @property
    def local_env(self):
        return self._local_env

    @property
    def max_jobs(self):
        return self._max_jobs

    @property
    def name(self):
        '''The name of this partition.

        :type: `str`
        '''
        return self._name

    @property
    def resources(self):
        return utility.MappingView(self._resources)

    @property
    def scheduler(self):
        '''The type of the backend scheduler of this partition.

        :returns: a subclass of :class:`reframe.core.schedulers.Job`.

        .. note::
           .. versionchanged:: 2.8

           Prior versions returned a string representing the scheduler and job
           launcher combination.
        '''
        return self._scheduler

    @property
    def launcher(self):
        '''The type of the backend launcher of this partition.

        :returns: a subclass of :class:`reframe.core.launchers.JobLauncher`.

        .. note::
           .. versionadded:: 2.8
        '''
        return self._launcher

    def add_container_env(self, env_name, environ):
        self._container_environs[env_name] = environ

    # Instantiate managed resource `name` with `value`.
    def get_resource(self, name, **values):
        ret = []
        for r in self._resources.get(name, []):
            try:
                ret.append(r.format(**values))
            except KeyError:
                pass

        return ret

    def environment(self, name):
        for e in self._environs:
            if e.name == name:
                return e

        return None

    def __eq__(self, other):
        if not isinstance(other, type(self)):
            return NotImplemented

        return (self._name      == other.name and
                self._scheduler == other._scheduler and
                self._launcher  == other._launcher and
                self._access    == other._access and
                self._environs  == other._environs and
                self._resources == other._resources and
                self._local_env == other._local_env)

    def __str__(self):
        local_env = re.sub('(?m)^', 6*' ', ' - ' + self._local_env.details())
        lines = [
            '%s [%s]:' % (self._name, self._descr),
            '    fullname: ' + self.fullname,
            '    scheduler: ' + self._scheduler.registered_name,
            '    launcher: '  + self._launcher.registered_name,
            '    access: ' + ' '.join(self._access),
            '    local_env:\n' + local_env,
            '    environs: ' + ', '.join(str(e) for e in self._environs)
        ]
        return '\n'.join(lines)

    def __repr__(self):
        return debug.repr(self)
Exemplo n.º 21
0
class System:
    '''A representation of a system inside ReFrame.'''
    _name  = fields.TypedField('_name', typ.Str[r'(\w|-)+'])
    _descr = fields.TypedField('_descr', str)
    _hostnames  = fields.TypedField('_hostnames', typ.List[str])
    _partitions = fields.TypedField('_partitions', typ.List[SystemPartition])
    _modules_system = fields.TypedField('_modules_system',
                                        typ.Str[r'(\w|-)+'], type(None))
    _preload_env = fields.TypedField('_preload_env', Environment, type(None))
    _prefix = fields.TypedField('_prefix', str)
    _stagedir  = fields.TypedField('_stagedir', str, type(None))
    _outputdir = fields.TypedField('_outputdir', str, type(None))
    _perflogdir = fields.TypedField('_perflogdir', str, type(None))
    _resourcesdir = fields.TypedField('_resourcesdir', str)

    def __init__(self, name, descr=None, hostnames=[], partitions=[],
                 preload_env=None, prefix='.', stagedir=None, outputdir=None,
                 perflogdir=None, resourcesdir='.', modules_system=None):
        self._name  = name
        self._descr = descr or name
        self._hostnames  = list(hostnames)
        self._partitions = list(partitions)
        self._modules_system = modules_system
        self._preload_env = preload_env
        self._prefix = prefix
        self._stagedir = stagedir
        self._outputdir = outputdir
        self._perflogdir = perflogdir
        self._resourcesdir = resourcesdir

        # Set parent system for the given partitions
        for p in partitions:
            p._system = self

    @property
    def name(self):
        '''The name of this system.'''
        return self._name

    @property
    def descr(self):
        '''The description of this system.'''
        return self._descr

    @property
    def hostnames(self):
        '''The hostname patterns associated with this system.'''
        return self._hostnames

    @property
    def modules_system(self):
        '''The modules system name associated with this system.'''
        return self._modules_system

    @property
    def preload_environ(self):
        '''The environment to load whenever ReFrame runs on this system.

        .. note::
           .. versionadded:: 2.19
        '''
        return self._preload_env

    @property
    def prefix(self):
        '''The ReFrame prefix associated with this system.'''
        return self._prefix

    @property
    def stagedir(self):
        '''The ReFrame stage directory prefix associated with this system.'''
        return self._stagedir

    @property
    def outputdir(self):
        '''The ReFrame output directory prefix associated with this system.'''
        return self._outputdir

    @property
    def perflogdir(self):
        '''The ReFrame log directory prefix associated with this system.'''
        return self._perflogdir

    @property
    def resourcesdir(self):
        '''Global resources directory for this system.

        You may use this directory for storing large resource files of your
        regression tests.
        See `here <configure.html#system-configuration>`__ on how to configure
        this.

        :type: :class:`str`
        '''
        return self._resourcesdir

    @property
    def partitions(self):
        '''All the system partitions associated with this system.'''
        return utility.SequenceView(self._partitions)

    def add_partition(self, partition):
        partition._system = self
        self._partitions.append(partition)

    def __eq__(self, other):
        if not isinstance(other, type(self)):
            return NotImplemented

        return (self._name == other._name and
                self._hostnames  == other._hostnames and
                self._partitions == other._partitions)

    def __repr__(self):
        return debug.repr(self)
Exemplo n.º 22
0
class ProgEnvironment(Environment):
    '''A class representing a programming environment.

    This type of environment adds also properties for retrieving the compiler
    and compilation flags.

    .. warning::
       Users may not create :class:`ProgEnvironment` objects directly.
    '''

    _cc = fields.TypedField('_cc', str)
    _cxx = fields.TypedField('_cxx', str)
    _ftn = fields.TypedField('_ftn', str)
    _cppflags = fields.TypedField('_cppflags', typ.List[str])
    _cflags = fields.TypedField('_cflags', typ.List[str])
    _cxxflags = fields.TypedField('_cxxflags', typ.List[str])
    _fflags = fields.TypedField('_fflags', typ.List[str])
    _ldflags = fields.TypedField('_ldflags', typ.List[str])

    def __init__(self,
                 name,
                 modules=None,
                 variables=None,
                 cc='cc',
                 cxx='CC',
                 ftn='ftn',
                 nvcc='nvcc',
                 cppflags=None,
                 cflags=None,
                 cxxflags=None,
                 fflags=None,
                 ldflags=None,
                 **kwargs):
        super().__init__(name, modules, variables)
        self._cc = cc
        self._cxx = cxx
        self._ftn = ftn
        self._nvcc = nvcc
        self._cppflags = cppflags or []
        self._cflags   = cflags   or []
        self._cxxflags = cxxflags or []
        self._fflags   = fflags   or []
        self._ldflags  = ldflags  or []

    @property
    def cc(self):
        '''The C compiler of this programming environment.

        :type: :class:`str`
        '''
        return self._cc

    @property
    def cxx(self):
        '''The C++ compiler of this programming environment.

        :type: :class:`str`
        '''
        return self._cxx

    @property
    def ftn(self):
        '''The Fortran compiler of this programming environment.

        :type: :class:`str`
        '''
        return self._ftn

    @property
    def cppflags(self):
        '''The preprocessor flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._cppflags

    @property
    def cflags(self):
        '''The C compiler flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._cflags

    @property
    def cxxflags(self):
        '''The C++ compiler flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._cxxflags

    @property
    def fflags(self):
        '''The Fortran compiler flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._fflags

    @property
    def ldflags(self):
        '''The linker flags of this programming environment.

        :type: :class:`List[str]`
        '''
        return self._ldflags

    @property
    def nvcc(self):
        return self._nvcc
Exemplo n.º 23
0
class Job(abc.ABC):
    """A job descriptor.

    .. caution::
       This is an abstract class.
       Users may not create jobs directly.
    """

    #: Options to be passed to the backend job scheduler.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    options = fields.TypedField('options', typ.List[str])

    #: The parallel program launcher that will be used to launch the parallel
    #: executable of this job.
    #:
    #: :type: :class:`reframe.core.launchers.JobLauncher`
    launcher = fields.TypedField('launcher', JobLauncher)

    _jobid = fields.TypedField('_jobid', int, type(None))
    _exitcode = fields.TypedField('_exitcode', int, type(None))
    _state = fields.TypedField('_state', JobState, type(None))

    # The sched_* arguments are exposed also to the frontend
    def __init__(self,
                 name,
                 launcher,
                 workdir='.',
                 num_tasks=1,
                 num_tasks_per_node=None,
                 num_tasks_per_core=None,
                 num_tasks_per_socket=None,
                 num_cpus_per_task=None,
                 use_smt=None,
                 time_limit=None,
                 script_filename=None,
                 stdout=None,
                 stderr=None,
                 pre_run=[],
                 post_run=[],
                 sched_flex_alloc_tasks=None,
                 sched_access=[],
                 sched_account=None,
                 sched_partition=None,
                 sched_reservation=None,
                 sched_nodelist=None,
                 sched_exclude_nodelist=None,
                 sched_exclusive_access=None,
                 sched_options=[]):

        # Mutable fields
        self.options = list(sched_options)
        self.launcher = launcher

        self._name = name
        self._workdir = workdir
        self._num_tasks = num_tasks
        self._num_tasks_per_node = num_tasks_per_node
        self._num_tasks_per_core = num_tasks_per_core
        self._num_tasks_per_socket = num_tasks_per_socket
        self._num_cpus_per_task = num_cpus_per_task
        self._use_smt = use_smt
        self._script_filename = script_filename or '%s.sh' % name
        self._stdout = stdout or os.path.join(workdir, '%s.out' % name)
        self._stderr = stderr or os.path.join(workdir, '%s.err' % name)
        self._time_limit = time_limit
        self._nodelist = None

        # Backend scheduler related information
        self._sched_flex_alloc_tasks = sched_flex_alloc_tasks
        self._sched_access = sched_access
        self._sched_nodelist = sched_nodelist
        self._sched_exclude_nodelist = sched_exclude_nodelist
        self._sched_partition = sched_partition
        self._sched_reservation = sched_reservation
        self._sched_account = sched_account
        self._sched_exclusive_access = sched_exclusive_access

        # Live job information; to be filled during job's lifetime by the
        # scheduler
        self._jobid = None
        self._exitcode = None
        self._state = None

    def __repr__(self):
        return debug.repr(self)

    # Read-only properties
    @property
    def exitcode(self):
        return self._exitcode

    @property
    def jobid(self):
        return self._jobid

    @property
    def state(self):
        return self._state

    @property
    def name(self):
        return self._name

    @property
    def workdir(self):
        return self._workdir

    @property
    def num_tasks(self):
        """The number of tasks assigned to this job.

        This attribute is useful in a flexible regression test for determining
        the actual number of tasks that ReFrame assigned to the test.

        For more information on flexible task allocation, please refer to the
        `tutorial <advanced.html#flexible-regression-tests>`__.
        """
        return self._num_tasks

    @property
    def script_filename(self):
        return self._script_filename

    @property
    def stdout(self):
        return self._stdout

    @property
    def stderr(self):
        return self._stderr

    @property
    def time_limit(self):
        return self._time_limit

    @property
    def num_cpus_per_task(self):
        return self._num_cpus_per_task

    @property
    def num_tasks_per_core(self):
        return self._num_tasks_per_core

    @property
    def num_tasks_per_node(self):
        return self._num_tasks_per_node

    @property
    def num_tasks_per_socket(self):
        return self._num_tasks_per_socket

    @property
    def use_smt(self):
        return self._use_smt

    @property
    def sched_flex_alloc_tasks(self):
        return self._sched_flex_alloc_tasks

    @property
    def sched_access(self):
        return self._sched_access

    @property
    def sched_nodelist(self):
        return self._sched_nodelist

    @property
    def sched_exclude_nodelist(self):
        return self._sched_exclude_nodelist

    @property
    def sched_partition(self):
        return self._sched_partition

    @property
    def sched_reservation(self):
        return self._sched_reservation

    @property
    def sched_account(self):
        return self._sched_account

    @property
    def sched_exclusive_access(self):
        return self._sched_exclusive_access

    def prepare(self, commands, environs=None, **gen_opts):
        environs = environs or []
        if self.num_tasks <= 0:
            num_tasks_per_node = self.num_tasks_per_node or 1
            min_num_tasks = (-self.num_tasks
                             if self.num_tasks else num_tasks_per_node)

            try:
                guessed_num_tasks = self.guess_num_tasks()
            except NotImplementedError as e:
                raise JobError('flexible task allocation is not supported by '
                               'this backend') from e

            if guessed_num_tasks < min_num_tasks:
                nodes_required = min_num_tasks // num_tasks_per_node
                nodes_found = guessed_num_tasks // num_tasks_per_node
                raise JobError('could not find enough nodes: '
                               'required %s, found %s' %
                               (nodes_required, nodes_found))

            self._num_tasks = guessed_num_tasks
            getlogger().debug('flex_alloc_tasks: setting num_tasks to %s' %
                              self._num_tasks)

        with shell.generate_script(self.script_filename,
                                   **gen_opts) as builder:
            builder.write_prolog(self.emit_preamble())
            for e in environs:
                builder.write(e.emit_load_commands())

            for c in commands:
                builder.write_body(c)

    @abc.abstractmethod
    def emit_preamble(self):
        pass

    def guess_num_tasks(self):
        if isinstance(self.sched_flex_alloc_tasks, int):
            if self.sched_flex_alloc_tasks <= 0:
                raise JobError('invalid number of flex_alloc_tasks: %s' %
                               self.sched_flex_alloc_tasks)

            return self.sched_flex_alloc_tasks

        available_nodes = self.get_partition_nodes()
        getlogger().debug('flex_alloc_tasks: total available nodes in current '
                          'virtual partition: %s' % len(available_nodes))

        # Try to guess the number of tasks now
        available_nodes = self.filter_nodes(available_nodes, self.options)

        if self.sched_flex_alloc_tasks == 'idle':
            available_nodes = {n for n in available_nodes if n.is_available()}
            getlogger().debug('flex_alloc_tasks: selecting idle nodes: '
                              'available nodes now: %s' % len(available_nodes))

        num_tasks_per_node = self.num_tasks_per_node or 1
        num_tasks = len(available_nodes) * num_tasks_per_node
        return num_tasks

    @abc.abstractmethod
    def get_partition_nodes(self):
        # Get all nodes of the current virtual partition
        pass

    @abc.abstractmethod
    def filter_nodes(self, nodes, options):
        # Filter nodes according to the scheduler options
        pass

    @abc.abstractmethod
    def submit(self):
        pass

    @abc.abstractmethod
    def wait(self):
        if self._jobid is None:
            raise JobNotStartedError('cannot wait an unstarted job')

    @abc.abstractmethod
    def cancel(self):
        if self._jobid is None:
            raise JobNotStartedError('cannot cancel an unstarted job')

    @abc.abstractmethod
    def finished(self):
        if self._jobid is None:
            raise JobNotStartedError('cannot poll an unstarted job')

    @property
    def nodelist(self):
        """The list of node names assigned to this job.

        This attribute is :class:`None` if no nodes are assigned to the job
        yet.
        This attribute is set reliably only for the ``slurm`` backend, i.e.,
        Slurm *with* accounting enabled.
        The ``squeue`` scheduler backend, i.e., Slurm *without* accounting,
        might not set this attribute for jobs that finish very quickly.
        For the ``local`` scheduler backend, this returns an one-element list
        containing the hostname of the current host.

        This attribute might be useful in a flexible regression test for
        determining the actual nodes that were assigned to the test.

        For more information on flexible task allocation, please refer to the
        corresponding `section <advanced.html#flexible-regression-tests>`__ of
        the tutorial.

        This attribute is *not* supported by the ``pbs`` scheduler backend.

        .. versionadded:: 2.17

        """
        return self._nodelist
Exemplo n.º 24
0
class RegressionTest:
    """Base class for regression tests.

    All regression tests must eventually inherit from this class.
    This class provides the implementation of the pipeline phases that the
    regression test goes through during its lifetime.

    :arg name: The name of the test.
        If :class:`None`, the framework will try to assign a unique and
        human-readable name to the test.

    :arg prefix: The directory prefix of the test.
        If :class:`None`, the framework will set it to the directory containing
        the test file.

    .. note::
        The ``name`` and ``prefix`` arguments are just maintained for backward
        compatibility to the old (prior to 2.13) syntax of regression tests.
        Users are advised to use the new simplified syntax for writing
        regression tests.
        Refer to the :doc:`ReFrame Tutorial </tutorial>` for more information.

        This class is also directly available under the top-level
        :mod:`reframe` module.

       .. versionchanged:: 2.13

    """
    #: The name of the test.
    #:
    #: :type: string that can contain any character except ``/``
    name = fields.TypedField('name', typ.Str[r'[^\/]+'])

    #: List of programming environments supported by this test.
    #:
    #: If ``*`` is in the list then all programming environments are supported
    #: by this test.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    #:
    #: .. note::
    #:     .. versionchanged:: 2.12
    #:        Programming environments can now be specified using wildcards.
    #:
    #:     .. versionchanged:: 2.17
    #:        Support for wildcards is dropped.
    valid_prog_environs = fields.TypedField('valid_prog_environs',
                                            typ.List[str])

    #: List of systems supported by this test.
    #: The general syntax for systems is ``<sysname>[:<partname]``.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    valid_systems = fields.TypedField('valid_systems', typ.List[str])

    #: A detailed description of the test.
    #:
    #: :type: :class:`str`
    #: :default: ``self.name``
    descr = fields.TypedField('descr', str)

    #: The path to the source file or source directory of the test.
    #:
    #: It must be a path relative to the :attr:`sourcesdir`, pointing to a
    #: subfolder or a file contained in :attr:`sourcesdir`. This applies also
    #: in the case where :attr:`sourcesdir` is a Git repository.
    #:
    #: If it refers to a regular file, this file will be compiled using the
    #: :class:`SingleSource <reframe.core.buildsystems.SingleSource>` build
    #: system.
    #: If it refers to a directory, ReFrame will try to infer the build system
    #: to use for the project and will fall back in using the :class:`Make
    #: <reframe.core.buildsystems.Make>` build system, if it cannot find a more
    #: specific one.
    #:
    #: :type: :class:`str`
    #: :default: ``''``
    sourcepath = fields.TypedField('sourcepath', str)

    #: The directory containing the test's resources.
    #:
    #: This directory may be specified with an absolute path or with a path
    #: relative to the location of the test. Its contents will always be copied
    #: to the stage directory of the test.
    #:
    #: This attribute may also accept a URL, in which case ReFrame will treat
    #: it as a Git repository and will try to clone its contents in the stage
    #: directory of the test.
    #:
    #: If set to :class:`None`, the test has no resources an no action is
    #: taken.
    #:
    #: :type: :class:`str` or :class:`None`
    #: :default: ``'src'``
    #:
    #: .. note::
    #:     .. versionchanged:: 2.9
    #:        Allow :class:`None` values to be set also in regression tests
    #:        with a compilation phase
    #:
    #:     .. versionchanged:: 2.10
    #:        Support for Git repositories was added.
    sourcesdir = fields.TypedField('sourcesdir', str, type(None))

    #: The build system to be used for this test.
    #: If not specified, the framework will try to figure it out automatically
    #: based on the value of :attr:`sourcepath`.
    #:
    #: This field may be set using either a string referring to a concrete
    #: build system class name
    #: (see `build systems <reference.html#build-systems>`__) or an instance of
    #: :class:`reframe.core.buildsystems.BuildSystem`. The former is the
    #: recommended way.
    #:
    #:
    #: :type: :class:`str` or :class:`reframe.core.buildsystems.BuildSystem`.
    #: :default: :class:`None`.
    #:
    #: .. versionadded:: 2.14
    build_system = BuildSystemField('build_system', type(None))

    #: List of shell commands to be executed before compiling.
    #:
    #: These commands are executed during the compilation phase and from
    #: inside the stage directory. **Each entry in the list spawns a new
    #: shell.**
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    prebuild_cmd = fields.TypedField('prebuild_cmd', typ.List[str])

    #: List of shell commands to be executed after a successful compilation.
    #:
    #: These commands are executed during the compilation phase and from inside
    #: the stage directory. **Each entry in the list spawns a new shell.**
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    postbuild_cmd = fields.TypedField('postbuild_cmd', typ.List[str])

    #: The name of the executable to be launched during the run phase.
    #:
    #: :type: :class:`str`
    #: :default: ``os.path.join('.', self.name)``
    executable = fields.TypedField('executable', str)

    #: List of options to be passed to the :attr:`executable`.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    executable_opts = fields.TypedField('executable_opts', typ.List[str])

    #: List of shell commands to execute before launching this job.
    #:
    #: These commands do not execute in the context of ReFrame.
    #: Instead, they are emitted in the generated job script just before the
    #: actual job launch command.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    #:
    #: .. note::
    #:    .. versionadded:: 2.10
    pre_run = fields.TypedField('pre_run', typ.List[str])

    #: List of shell commands to execute after launching this job.
    #:
    #: See :attr:`pre_run` for a more detailed description of the semantics.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    #:
    #: .. note::
    #:    .. versionadded:: 2.10
    post_run = fields.TypedField('post_run', typ.List[str])

    #: List of files to be kept after the test finishes.
    #:
    #: By default, the framework saves the standard output, the standard error
    #: and the generated shell script that was used to run this test.
    #:
    #: These files will be copied over to the framework’s output directory
    #: during the :func:`cleanup` phase.
    #:
    #: Directories are also accepted in this field.
    #:
    #: Relative path names are resolved against the stage directory.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    keep_files = fields.TypedField('keep_files', typ.List[str])

    #: List of files or directories (relative to the :attr:`sourcesdir`) that
    #: will be symlinked in the stage directory and not copied.
    #:
    #: You can use this variable to avoid copying very large files to the stage
    #: directory.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    readonly_files = fields.TypedField('readonly_files', typ.List[str])

    #: Set of tags associated with this test.
    #:
    #: This test can be selected from the frontend using any of these tags.
    #:
    #: :type: :class:`Set[str]`
    #: :default: an empty set
    tags = fields.TypedField('tags', typ.Set[str])

    #: List of people responsible for this test.
    #:
    #: When the test fails, this contact list will be printed out.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    maintainers = fields.TypedField('maintainers', typ.List[str])

    #: Mark this test as a strict performance test.
    #:
    #: If a test is marked as non-strict, the performance checking phase will
    #: always succeed, unless the ``--strict`` command-line option is passed
    #: when invoking ReFrame.
    #:
    #: :type: boolean
    #: :default: :class:`True`
    strict_check = fields.TypedField('strict_check', bool)

    #: Number of tasks required by this test.
    #:
    #: If the number of tasks is set to a number ``<=0``, ReFrame will try
    #: to flexibly allocate the number of tasks, based on the command line
    #: option ``--flex-alloc-tasks``.
    #: A negative number is used to indicate the minimum number of tasks
    #: required for the test.
    #: In this case the minimum number of tasks is the absolute value of
    #: the number, while
    #: Setting ``num_tasks`` to ``0`` is equivalent to setting it to
    #: ``-num_tasks_per_node``.
    #:
    #: :type: integral
    #: :default: ``1``
    #:
    #: .. note::
    #:     .. versionchanged:: 2.15
    #:        Added support for flexible allocation of the number of tasks
    #:        according to the ``--flex-alloc-tasks`` command line option
    #:        (see `Flexible task allocation
    #:        <running.html#flexible-task-allocation>`__)
    #:        if the number of tasks is set to ``0``.
    #:     .. versionchanged:: 2.16
    #:        Negative ``num_tasks`` is allowed for specifying the minimum
    #:        number of required tasks by the test.
    num_tasks = fields.TypedField('num_tasks', int)

    #: Number of tasks per node required by this test.
    #:
    #: Ignored if :class:`None`.
    #:
    #: :type: integral or :class:`None`
    #: :default: :class:`None`
    num_tasks_per_node = fields.TypedField('num_tasks_per_node', int,
                                           type(None))

    #: Number of GPUs per node required by this test.
    #:
    #: :type: integral
    #: :default: ``0``
    num_gpus_per_node = fields.TypedField('num_gpus_per_node', int)

    #: Number of CPUs per task required by this test.
    #:
    #: Ignored if :class:`None`.
    #:
    #: :type: integral or :class:`None`
    #: :default: :class:`None`
    num_cpus_per_task = fields.TypedField('num_cpus_per_task', int, type(None))

    #: Number of tasks per core required by this test.
    #:
    #: Ignored if :class:`None`.
    #:
    #: :type: integral or :class:`None`
    #: :default: :class:`None`
    num_tasks_per_core = fields.TypedField('num_tasks_per_core', int,
                                           type(None))

    #: Number of tasks per socket required by this test.
    #:
    #: Ignored if :class:`None`.
    #:
    #: :type: integral or :class:`None`
    #: :default: :class:`None`
    num_tasks_per_socket = fields.TypedField('num_tasks_per_socket', int,
                                             type(None))

    #: Specify whether this tests needs simultaneous multithreading enabled.
    #:
    #: Ignored if :class:`None`.
    #:
    #: :type: boolean or :class:`None`
    #: :default: :class:`None`
    use_multithreading = fields.TypedField('use_multithreading', bool,
                                           type(None))

    #: Specify whether this test needs exclusive access to nodes.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    exclusive_access = fields.TypedField('exclusive_access', bool)

    #: Always execute this test locally.
    #:
    #: :type: boolean
    #: :default: :class:`False`
    local = fields.TypedField('local', bool)

    #: The set of reference values for this test.
    #:
    #: The reference values are specified as a scoped dictionary keyed on the
    #: performance variables defined in :attr:`perf_patterns` and scoped under
    #: the system/partition combinations.
    #: The reference itself is a three- or four-tuple that contains the
    #: reference value, the lower and upper thresholds and, optionally, the
    #: measurement unit.
    #: An example follows:
    #:
    #: .. code:: python
    #:
    #:    self.reference = {
    #:        'sys0:part0': {
    #:            'perfvar0': (50, -0.1, 0.1, 'Gflop/s'),
    #:            'perfvar1': (20, -0.1, 0.1, 'GB/s')
    #:        },
    #:        'sys0:part1': {
    #:            'perfvar0': (100, -0.1, 0.1, 'Gflop/s'),
    #:            'perfvar1': (40, -0.1, 0.1, 'GB/s')
    #:        }
    #:    }
    #:
    #: :type: A scoped dictionary with system names as scopes or :class:`None`
    #: :default: ``{}``
    reference = fields.ScopedDictField('reference', typ.Tuple[object])
    # FIXME: There is not way currently to express tuples of `float`s or
    # `None`s, so we just use the very generic `object`

    #:
    #: Refer to the :doc:`ReFrame Tutorial </tutorial>` for concrete usage
    #: examples.
    #:
    #: If set to :class:`None`, a sanity error will be raised during sanity
    #: checking.
    #:
    #: :type: A deferrable expression (i.e., the result of a :doc:`sanity
    #:     function </sanity_functions_reference>`) or :class:`None`
    #: :default: :class:`None`
    #:
    #: .. note::
    #:    .. versionchanged:: 2.9
    #:       The default behaviour has changed and it is now considered a
    #:       sanity failure if this attribute is set to :class:`None`.
    #:
    #:       If a test doesn't care about its output, this must be stated
    #:       explicitly as follows:
    #:
    #:       ::
    #:
    #:           self.sanity_patterns = sn.assert_found(r'.*', self.stdout)
    #:
    sanity_patterns = fields.TypedField('sanity_patterns', _DeferredExpression,
                                        type(None))

    #: Patterns for verifying the performance of this test.
    #:
    #: Refer to the :doc:`ReFrame Tutorial </tutorial>` for concrete usage
    #: examples.
    #:
    #: If set to :class:`None`, no performance checking will be performed.
    #:
    #: :type: A dictionary with keys of type :class:`str` and deferrable
    #:     expressions (i.e., the result of a :doc:`sanity function
    #:     </sanity_functions_reference>`) as values.
    #:     :class:`None` is also allowed.
    #: :default: :class:`None`
    perf_patterns = fields.TypedField('perf_patterns',
                                      typ.Dict[str, _DeferredExpression],
                                      type(None))

    #: List of modules to be loaded before running this test.
    #:
    #: These modules will be loaded during the :func:`setup` phase.
    #:
    #: :type: :class:`List[str]`
    #: :default: ``[]``
    modules = fields.TypedField('modules', typ.List[str])

    #: Environment variables to be set before running this test.
    #:
    #: These variables will be set during the :func:`setup` phase.
    #:
    #: :type: :class:`Dict[str, str]`
    #: :default: ``{}``
    variables = fields.TypedField('variables', typ.Dict[str, str])

    #: Time limit for this test.
    #:
    #: Time limit is specified as a three-tuple in the form ``(hh, mm, ss)``,
    #: with ``hh >= 0``, ``0 <= mm <= 59`` and ``0 <= ss <= 59``.
    #: If set to :class:`None`, no time limit will be set.
    #: The default time limit of the system partition's scheduler will be used.
    #:
    #:
    #: :type: :class:`tuple[int]`
    #: :default: ``(0, 10, 0)``
    #:
    #: .. note::
    #:    .. versionchanged:: 2.15
    #:
    #:    This attribute may be set to :class:`None`.
    #:
    time_limit = fields.TimerField('time_limit', type(None))

    #: Extra resources for this test.
    #:
    #: This field is for specifying custom resources needed by this test.
    #: These resources are defined in the :doc:`configuration </configure>`
    #: of a system partition.
    #: For example, assume that two additional resources, named ``gpu`` and
    #: ``datawarp``, are defined in the configuration file as follows:
    #:
    #: ::
    #:
    #:     'resources': {
    #:         'gpu': [
    #:             '--gres=gpu:{num_gpus_per_node}'
    #:         ],
    #:         'datawarp': [
    #:             '#DW jobdw capacity={capacity}',
    #:             '#DW stage_in source={stagein_src}'
    #:         ]
    #:     }
    #:
    #: A regression test then may instantiate the above resources by setting
    #: the :attr:`extra_resources` attribute as follows:
    #:
    #: ::
    #:
    #:     self.extra_resources = {
    #:         'gpu': {'num_gpus_per_node': 2}
    #:         'datawarp': {
    #:             'capacity': '100GB',
    #:             'stagein_src': '/foo'
    #:         }
    #:     }
    #:
    #: The generated batch script (for Slurm) will then contain the following
    #: lines:
    #:
    #: ::
    #:
    #:     #SBATCH --gres=gpu:2
    #:     #DW jobdw capacity=100GB
    #:     #DW stage_in source=/foo
    #:
    #: Notice that if the resource specified in the configuration uses an
    #: alternative directive prefix (in this case ``#DW``), this will replace
    #: the standard prefix of the backend scheduler (in this case ``#SBATCH``)
    #:
    #: If the resource name specified in this variable does not match a
    #: resource name in the partition configuration, it will be simply ignored.
    #: The :attr:`num_gpus_per_node` attribute translates internally to the
    #: ``_rfm_gpu`` resource, so that setting
    #: ``self.num_gpus_per_node = 2`` is equivalent to the following:
    #:
    #: ::
    #:
    #:     self.extra_resources = {'_rfm_gpu': {'num_gpus_per_node': 2}}
    #:
    #: :type: :class:`Dict[str, Dict[str, object]]`
    #: :default: ``{}``
    #:
    #: .. note::
    #:    .. versionadded:: 2.8
    #:    .. versionchanged:: 2.9
    #:
    #:    A new more powerful syntax was introduced
    #:    that allows also custom job script directive prefixes.
    #:
    extra_resources = fields.TypedField('extra_resources',
                                        typ.Dict[str, typ.Dict[str, object]])

    # Private properties
    _prefix = fields.TypedField('_prefix', str)
    _stagedir = fields.TypedField('_stagedir', str, type(None))
    _stdout = fields.TypedField('_stdout', str, type(None))
    _stderr = fields.TypedField('_stderr', str, type(None))
    _current_partition = fields.TypedField('_current_partition',
                                           SystemPartition, type(None))
    _current_environ = fields.TypedField('_current_environ', Environment,
                                         type(None))
    _user_environ = fields.TypedField('_user_environ', Environment, type(None))
    _job = fields.TypedField('_job', Job, type(None))
    _build_job = fields.TypedField('_build_job', Job, type(None))

    def __new__(cls, *args, **kwargs):
        obj = super().__new__(cls)

        # Create a test name from the class name and the constructor's
        # arguments
        name = cls.__qualname__
        if args or kwargs:
            arg_names = map(lambda x: util.toalphanum(str(x)),
                            itertools.chain(args, kwargs.values()))
            name += '_' + '_'.join(arg_names)

        obj._rfm_init(name,
                      os.path.abspath(os.path.dirname(inspect.getfile(cls))))
        return obj

    def __init__(self):
        pass

    def _rfm_init(self, name=None, prefix=None):
        if name is not None:
            self.name = name

        self.descr = self.name
        self.valid_prog_environs = []
        self.valid_systems = []
        self.sourcepath = ''
        self.prebuild_cmd = []
        self.postbuild_cmd = []
        self.executable = os.path.join('.', self.name)
        self.executable_opts = []
        self.pre_run = []
        self.post_run = []
        self.keep_files = []
        self.readonly_files = []
        self.tags = set()
        self.maintainers = []
        self._perfvalues = {}

        # Strict performance check, if applicable
        self.strict_check = True

        # Default is a single node check
        self.num_tasks = 1
        self.num_tasks_per_node = None
        self.num_gpus_per_node = 0
        self.num_cpus_per_task = None
        self.num_tasks_per_core = None
        self.num_tasks_per_socket = None
        self.use_multithreading = None
        self.exclusive_access = False

        # True only if check is to be run locally
        self.local = False

        # Static directories of the regression check
        if prefix is not None:
            self._prefix = os.path.abspath(prefix)

        self.sourcesdir = 'src'

        # Output patterns
        self.sanity_patterns = None

        # Performance patterns: None -> no performance checking
        self.perf_patterns = None
        self.reference = {}

        # Environment setup
        self.modules = []
        self.variables = {}

        # Time limit for the check
        self.time_limit = (0, 10, 0)

        # Runtime information of the test
        self._current_partition = None
        self._current_environ = None
        self._user_environ = None

        # Associated job
        self._job = None
        self.extra_resources = {}

        # Dynamic paths of the regression check; will be set in setup()
        self._stagedir = None
        self._outputdir = None
        self._stdout = None
        self._stderr = None

        # Compilation process output
        self._build_job = None
        self._compile_proc = None
        self.build_system = None

        # Performance logging
        self._perf_logger = logging.null_logger

        # List of dependencies specified by the user
        self._userdeps = []

        # Weak reference to the test case associated with this check
        self._case = None

    # Export read-only views to interesting fields
    @property
    def current_environ(self):
        """The programming environment that the regression test is currently
        executing with.

        This is set by the framework during the :func:`setup` phase.

        :type: :class:`reframe.core.environments.Environment`.
        """
        return self._current_environ

    @property
    def current_partition(self):
        """The system partition the regression test is currently executing on.

        This is set by the framework during the :func:`setup` phase.

        :type: :class:`reframe.core.systems.SystemPartition`.
        """
        return self._current_partition

    @property
    def current_system(self):
        """The system the regression test is currently executing on.

        This is set by the framework during the initialization phase.

        :type: :class:`reframe.core.runtime.HostSystem`.
        """
        return rt.runtime().system

    @property
    def perfvalues(self):
        return util.MappingView(self._perfvalues)

    @property
    def job(self):
        """The job descriptor associated with this test.

        This is set by the framework during the :func:`setup` phase.

        :type: :class:`reframe.core.schedulers.Job`.
        """
        return self._job

    @property
    def logger(self):
        """A logger associated with the this test.

        You can use this logger to log information for your test.
        """
        return logging.getlogger()

    @property
    def prefix(self):
        """The prefix directory of the test.

        :type: :class:`str`.
        """
        return self._prefix

    @property
    def stagedir(self):
        """The stage directory of the test.

        This is set during the :func:`setup` phase.

        :type: :class:`str`.
        """
        return self._stagedir

    @property
    def outputdir(self):
        """The output directory of the test.

        This is set during the :func:`setup` phase.

        .. versionadded:: 2.13

        :type: :class:`str`.
        """
        return self._outputdir

    @property
    @deferrable
    def stdout(self):
        """The name of the file containing the standard output of the test.

        This is set during the :func:`setup` phase.

        This attribute is evaluated lazily, so it can by used inside sanity
        expressions.

        :type: :class:`str`.
        """
        return self._job.stdout

    @property
    @deferrable
    def stderr(self):
        """The name of the file containing the standard error of the test.

        This is set during the :func:`setup` phase.

        This attribute is evaluated lazily, so it can by used inside sanity
        expressions.

        :type: :class:`str`.
        """
        return self._job.stderr

    @property
    @deferrable
    def build_stdout(self):
        return self._build_job.stdout

    @property
    @deferrable
    def build_stderr(self):
        return self._build_job.stderr

    def info(self):
        """Provide live information of a running test.

        This method is used by the front-end to print the status message during
        the test's execution.
        This function is also called to provide the message for the
        ``check_info`` `logging attribute <running.html#logging>`__.
        By default, it returns a message reporting the test name, the current
        partition and the current programming environment that the test is
        currently executing on.

        :returns: a string with an informational message about this test

        .. note ::
           When overriding this method, you should pay extra attention on how
           you use the :class:`RegressionTest`'s attributes, because this
           method may be called at any point of the test's lifetime.

           .. versionadded:: 2.10

        """
        ret = self.name
        if self.current_partition:
            ret += ' on %s' % self.current_partition.fullname

        if self.current_environ:
            ret += ' using %s' % self.current_environ.name

        return ret

    def supports_system(self, partition_name):
        if '*' in self.valid_systems:
            return True

        if self.current_system.name in self.valid_systems:
            return True

        # Check if this is a relative name
        if partition_name.find(':') == -1:
            partition_name = '%s:%s' % (self.current_system.name,
                                        partition_name)

        return partition_name in self.valid_systems

    def supports_environ(self, env_name):
        if '*' in self.valid_prog_environs:
            return True

        return env_name in self.valid_prog_environs

    def is_local(self):
        """Check if the test will execute locally.

        A test executes locally if the :attr:`local` attribute is set or if the
        current partition's scheduler does not support job submission.
        """
        if self._current_partition is None:
            return self.local

        return self.local or self._current_partition.scheduler.is_local

    def _setup_environ(self, environ):
        """Setup the current environment and load it."""

        self._current_environ = environ

        # Set up user environment
        self._user_environ = Environment(
            type(self).__name__, self.modules, self.variables.items())

        # Temporarily load the test's environment to record the actual module
        # load/unload sequence
        environ_save = EnvironmentSnapshot()
        # First load the local environment of the partition
        self.logger.debug('loading environment for the current partition')
        self._current_partition.local_env.load()

        self.logger.debug("loading current programming environment")
        self._current_environ.load()

        self.logger.debug("loading user's environment")
        self._user_environ.load()
        environ_save.load()

    def _setup_paths(self):
        """Setup the check's dynamic paths."""
        self.logger.debug('setting up paths')
        try:
            resources = rt.runtime().resources
            self._stagedir = resources.make_stagedir(
                self.current_system.name, self._current_partition.name,
                self._current_environ.name, self.name)
            self._outputdir = resources.make_outputdir(
                self.current_system.name, self._current_partition.name,
                self._current_environ.name, self.name)
        except OSError as e:
            raise PipelineError('failed to set up paths') from e

    def _setup_job(self, **job_opts):
        """Setup the job related to this check."""

        self.logger.debug('setting up the job descriptor')

        msg = 'job scheduler backend: {0}'
        self.logger.debug(
            msg.format('local' if self.is_local else self._current_partition.
                       scheduler.registered_name))

        # num_gpus_per_node is a managed resource
        if self.num_gpus_per_node > 0:
            self.extra_resources.setdefault(
                '_rfm_gpu', {'num_gpus_per_node': self.num_gpus_per_node})

        if self.local:
            scheduler_type = getscheduler('local')
            launcher_type = getlauncher('local')
        else:
            scheduler_type = self._current_partition.scheduler
            launcher_type = self._current_partition.launcher

        self._job = scheduler_type(
            name='rfm_%s_job' % self.name,
            launcher=launcher_type(),
            workdir=self._stagedir,
            num_tasks=self.num_tasks,
            num_tasks_per_node=self.num_tasks_per_node,
            num_tasks_per_core=self.num_tasks_per_core,
            num_tasks_per_socket=self.num_tasks_per_socket,
            num_cpus_per_task=self.num_cpus_per_task,
            use_smt=self.use_multithreading,
            time_limit=self.time_limit,
            sched_access=self._current_partition.access,
            sched_exclusive_access=self.exclusive_access,
            **job_opts)

        # Get job options from managed resources and prepend them to
        # job_opts. We want any user supplied options to be able to
        # override those set by the framework.
        resources_opts = []
        for r, v in self.extra_resources.items():
            resources_opts.extend(self._current_partition.get_resource(r, **v))

        self._job.options = resources_opts + self._job.options

    def _setup_perf_logging(self):
        self.logger.debug('setting up performance logging')
        self._perf_logger = logging.getperflogger(self)

    def setup(self, partition, environ, **job_opts):
        """The setup phase of the regression test pipeline.

        :arg partition: The system partition to set up this test for.
        :arg environ: The environment to set up this test for.
        :arg job_opts: Options to be passed through to the backend scheduler.
            When overriding this method users should always pass through
            ``job_opts`` to the base class method.
        :raises reframe.core.exceptions.ReframeError: In case of errors.
        """
        self._current_partition = partition
        self._setup_environ(environ)
        self._setup_paths()
        self._setup_job(**job_opts)
        if self.perf_patterns is not None:
            self._setup_perf_logging()

    def _copy_to_stagedir(self, path):
        self.logger.debug('copying %s to stage directory (%s)' %
                          (path, self._stagedir))
        self.logger.debug('symlinking files: %s' % self.readonly_files)
        try:
            os_ext.copytree_virtual(path, self._stagedir, self.readonly_files)
        except (OSError, ValueError, TypeError) as e:
            raise PipelineError('virtual copying of files failed') from e

    def _clone_to_stagedir(self, url):
        self.logger.debug('cloning URL %s to stage directory (%s)' %
                          (url, self._stagedir))
        os_ext.git_clone(self.sourcesdir, self._stagedir)

    def compile(self):
        """The compilation phase of the regression test pipeline.

        :raises reframe.core.exceptions.ReframeError: In case of errors.
        """
        if not self._current_environ:
            raise PipelineError('no programming environment set')

        # Copy the check's resources to the stage directory
        if self.sourcesdir:
            try:
                commonpath = os.path.commonpath(
                    [self.sourcesdir, self.sourcepath])
            except ValueError:
                commonpath = None

            if commonpath:
                self.logger.warn(
                    "sourcepath `%s' seems to be a subdirectory of "
                    "sourcesdir `%s', but it will be interpreted "
                    "as relative to it." % (self.sourcepath, self.sourcesdir))

            if os_ext.is_url(self.sourcesdir):
                self._clone_to_stagedir(self.sourcesdir)
            else:
                self._copy_to_stagedir(
                    os.path.join(self._prefix, self.sourcesdir))

        # Verify the sourcepath and determine the sourcepath in the stagedir
        if (os.path.isabs(self.sourcepath)
                or os.path.normpath(self.sourcepath).startswith('..')):
            raise PipelineError(
                'self.sourcepath is an absolute path or does not point to a '
                'subfolder or a file contained in self.sourcesdir: ' +
                self.sourcepath)

        staged_sourcepath = os.path.join(self._stagedir, self.sourcepath)
        self.logger.debug('Staged sourcepath: %s' % staged_sourcepath)
        if os.path.isdir(staged_sourcepath):
            if not self.build_system:
                # Try to guess the build system
                cmakelists = os.path.join(staged_sourcepath, 'CMakeLists.txt')
                configure_ac = os.path.join(staged_sourcepath, 'configure.ac')
                configure_in = os.path.join(staged_sourcepath, 'configure.in')
                if os.path.exists(cmakelists):
                    self.build_system = 'CMake'
                    self.build_system.builddir = 'rfm_build'
                elif (os.path.exists(configure_ac)
                      or os.path.exists(configure_in)):
                    self.build_system = 'Autotools'
                    self.build_system.builddir = 'rfm_build'
                else:
                    self.build_system = 'Make'

            self.build_system.srcdir = self.sourcepath
        else:
            if not self.build_system:
                self.build_system = 'SingleSource'

            self.build_system.srcfile = self.sourcepath
            self.build_system.executable = self.executable

        # Prepare build job
        build_commands = [
            *self.prebuild_cmd,
            *self.build_system.emit_build_commands(self._current_environ),
            *self.postbuild_cmd
        ]
        environs = [
            self._current_partition.local_env, self._current_environ,
            self._user_environ
        ]
        self._build_job = getscheduler('local')(
            name='rfm_%s_build' % self.name,
            launcher=getlauncher('local')(),
            workdir=self._stagedir)

        with os_ext.change_dir(self._stagedir):
            try:
                self._build_job.prepare(build_commands,
                                        environs,
                                        login=True,
                                        trap_errors=True)
            except OSError as e:
                raise PipelineError('failed to prepare build job') from e

            self._build_job.submit()

    def compile_wait(self):
        """Wait for compilation phase to finish.

        .. versionadded:: 2.13
        """
        self._build_job.wait()
        self.logger.debug('compilation finished')

        # FIXME: this check is not reliable for certain scheduler backends
        if self._build_job.exitcode != 0:
            raise BuildError(self._build_job.stdout, self._build_job.stderr)

    def run(self):
        """The run phase of the regression test pipeline.

        This call is non-blocking.
        It simply submits the job associated with this test and returns.
        """
        if not self.current_system or not self._current_partition:
            raise PipelineError('no system or system partition is set')

        exec_cmd = [
            self.job.launcher.run_command(self.job), self.executable,
            *self.executable_opts
        ]
        commands = [*self.pre_run, ' '.join(exec_cmd), *self.post_run]
        environs = [
            self._current_partition.local_env, self._current_environ,
            self._user_environ
        ]
        with os_ext.change_dir(self._stagedir):
            try:
                self._job.prepare(commands, environs, login=True)
            except OSError as e:
                raise PipelineError('failed to prepare job') from e

            self._job.submit()

        msg = ('spawned job (%s=%s)' %
               ('pid' if self.is_local() else 'jobid', self._job.jobid))
        self.logger.debug(msg)

    def poll(self):
        """Poll the test's state.

        :returns: :class:`True` if the associated job has finished,
            :class:`False` otherwise.

            If no job descriptor is yet associated with this test,
            :class:`True` is returned.
        :raises reframe.core.exceptions.ReframeError: In case of errors.
        """
        if not self._job:
            return True

        return self._job.finished()

    def wait(self):
        """Wait for this test to finish.

        :raises reframe.core.exceptions.ReframeError: In case of errors.
        """
        self._job.wait()
        self.logger.debug('spawned job finished')

    def sanity(self):
        self.check_sanity()

    def performance(self):
        try:
            self.check_performance()
        except PerformanceError:
            if self.strict_check:
                raise

    def check_sanity(self):
        """The sanity checking phase of the regression test pipeline.

        :raises reframe.core.exceptions.SanityError: If the sanity check fails.
        """
        if self.sanity_patterns is None:
            raise SanityError('sanity_patterns not set')

        with os_ext.change_dir(self._stagedir):
            success = evaluate(self.sanity_patterns)
            if not success:
                raise SanityError()

    def check_performance(self):
        """The performance checking phase of the regression test pipeline.

        :raises reframe.core.exceptions.SanityError: If the performance check
            fails.
        """
        if self.perf_patterns is None:
            return

        with os_ext.change_dir(self._stagedir):
            # Check if default reference perf values are provided and
            # store all the variables  tested in the performance check
            has_default = False
            variables = set()
            for key, ref in self.reference.items():
                keyparts = key.split(self.reference.scope_separator)
                system = keyparts[0]
                varname = keyparts[-1]
                try:
                    unit = ref[3]
                except IndexError:
                    unit = None

                variables.add((varname, unit))
                if system == '*':
                    has_default = True
                    break

            if not has_default:
                if not variables:
                    # If empty, it means that self.reference was empty, so try
                    # to infer their name from perf_patterns
                    variables = {(name, None)
                                 for name in self.perf_patterns.keys()}

                for var in variables:
                    name, unit = var
                    ref_tuple = (0, None, None)
                    if unit:
                        ref_tuple += (unit, )

                    self.reference.update({'*': {name: ref_tuple}})

            # We first evaluate and log all performance values and then we
            # check them against the reference. This way we always log them
            # even if the don't meet the reference.
            for tag, expr in self.perf_patterns.items():
                value = evaluate(expr)
                key = '%s:%s' % (self._current_partition.fullname, tag)
                if key not in self.reference:
                    raise SanityError(
                        "tag `%s' not resolved in references for `%s'" %
                        (tag, self._current_partition.fullname))

                self._perfvalues[key] = (value, *self.reference[key])
                self._perf_logger.log_performance(logging.INFO, tag, value,
                                                  *self.reference[key])

            for key, values in self._perfvalues.items():
                val, ref, low_thres, high_thres, *_ = values
                tag = key.split(':')[-1]
                try:
                    evaluate(
                        assert_reference(
                            val,
                            ref,
                            low_thres,
                            high_thres,
                            msg=('failed to meet reference: %s={0}, '
                                 'expected {1} (l={2}, u={3})' % tag)))
                except SanityError as e:
                    raise PerformanceError(e)

    def _copy_job_files(self, job, dst):
        if job is None:
            return

        stdout = os.path.join(self._stagedir, job.stdout)
        stderr = os.path.join(self._stagedir, job.stderr)
        script = os.path.join(self._stagedir, job.script_filename)
        shutil.copy(stdout, dst)
        shutil.copy(stderr, dst)
        shutil.copy(script, dst)

    def _copy_to_outputdir(self):
        """Copy checks interesting files to the output directory."""
        self.logger.debug('copying interesting files to output directory')
        self._copy_job_files(self._job, self.outputdir)
        self._copy_job_files(self._build_job, self.outputdir)

        # Copy files specified by the user
        for f in self.keep_files:
            f_orig = f
            if not os.path.isabs(f):
                f = os.path.join(self._stagedir, f)

            if os.path.isfile(f):
                shutil.copy(f, self.outputdir)
            elif os.path.isdir(f):
                shutil.copytree(f, os.path.join(self.outputdir, f_orig))

    def cleanup(self, remove_files=False, unload_env=True):
        """The cleanup phase of the regression test pipeline.

        :arg remove_files: If :class:`True`, the stage directory associated
            with this test will be removed.
        :arg unload_env: If :class:`True`, the environment that was used to run
            this test will be unloaded.
        """
        aliased = os.path.samefile(self._stagedir, self._outputdir)
        if aliased:
            self.logger.debug('skipping copy to output dir '
                              'since they alias each other')
        else:
            self._copy_to_outputdir()

        if remove_files:
            self.logger.debug('removing stage directory')
            os_ext.rmtree(self._stagedir)

        if unload_env:
            self.logger.debug("unloading test's environment")
            self._user_environ.unload()
            self._current_environ.unload()
            self._current_partition.local_env.unload()

    # Dependency API
    def user_deps(self):
        return util.SequenceView(self._userdeps)

    def depends_on(self, target, how=DEPEND_BY_ENV, subdeps=None):
        if not isinstance(target, str):
            raise TypeError("target argument must be of type: `str'")

        if not isinstance(how, int):
            raise TypeError("how argument must be of type: `int'")

        if (subdeps is not None
                and not isinstance(subdeps, typ.Dict[str, typ.List[str]])):
            raise TypeError("subdeps argument must be of type "
                            "`Dict[str, List[str]]' or `None'")

        self._userdeps.append((target, how, subdeps))

    def getdep(self, target, environ):
        if self._case is None or self._case() is None:
            raise DependencyError('no test case is associated with this test')

        for d in self._case().deps:
            if d.check.name == target and d.environ.name == environ:
                return d.check

        raise DependencyError('could not resolve dependency to (%s, %s)' %
                              (target, environ))

    def __str__(self):
        return "%s(name='%s', prefix='%s')" % (type(self).__name__, self.name,
                                               self.prefix)
Exemplo n.º 25
0
 class FieldTester:
     x = fields.DeprecatedField(fields.TypedField(int), 'deprecated',
                                from_version=str(next_version))
Exemplo n.º 26
0
    class FieldTester:
        field = fields.TypedField('field', ClassA)
        field_any = fields.TypedField('field_any', ClassA, str, type(None))

        def __init__(self, value):
            self.field = value
Exemplo n.º 27
0
 class FieldTester:
     fieldA = fields.TypedField(int, str)
     fieldB = fields.TypedField(str, int)
     fieldC = fields.TypedField(int)
Exemplo n.º 28
0
class ContainerPlatform(abc.ABC):
    '''The abstract base class of any container platform.'''

    #: The default mount location of the test case stage directory inside the
    #: container

    #: The container image to be used for running the test.
    #:
    #: :type: :class:`str` or :class:`None`
    #: :default: :class:`None`
    image = fields.TypedField(str, type(None))

    #: The command to be executed within the container.
    #:
    #: If no command is given, then the default command of the corresponding
    #: container image is going to be executed.
    #:
    #: .. versionadded:: 3.5.0
    #:    Changed the attribute name from `commands` to `command` and its type
    #:    to a string.
    #:
    #: :type: :class:`str` or :class:`None`
    #: :default: :class:`None`
    command = fields.TypedField(str, type(None))

    _commands = fields.TypedField(typ.List[str])
    #: The commands to be executed within the container.
    #:
    #: .. deprecated:: 3.5.0
    #:    Please use the `command` field instead.
    #:
    #: :type: :class:`list[str]`
    #: :default: ``[]``
    commands = fields.DeprecatedField(
        _commands,
        'The `commands` field is deprecated, please use the `command` field '
        'to set the command to be executed by the container.',
        fields.DeprecatedField.OP_SET,
        from_version='3.5.0')

    #: Pull the container image before running.
    #:
    #: This does not have any effect for the `Singularity` container platform.
    #:
    #: .. versionadded:: 3.5
    #:
    #: :type: :class:`bool`
    #: :default: ``True``
    pull_image = fields.TypedField(bool)

    #: List of mount point pairs for directories to mount inside the container.
    #:
    #: Each mount point is specified as a tuple of
    #: ``(/path/in/host, /path/in/container)``. The stage directory of the
    #: ReFrame test is always mounted under ``/rfm_workdir`` inside the
    #: container, independelty of this field.
    #:
    #: :type: :class:`list[tuple[str, str]]`
    #: :default: ``[]``
    mount_points = fields.TypedField(typ.List[typ.Tuple[str, str]])

    #: Additional options to be passed to the container runtime when executed.
    #:
    #: :type: :class:`list[str]`
    #: :default: ``[]``
    options = fields.TypedField(typ.List[str])

    #: The working directory of ReFrame inside the container.
    #:
    #: This is the directory where the test's stage directory is mounted inside
    #: the container. This directory is always mounted regardless if
    #: :attr:`mount_points` is set or not.
    #:
    #: :type: :class:`str`
    #: :default: ``/rfm_workdir``
    #:
    #: .. versionchanged:: 3.12.0
    #:    This attribute is no more deprecated.
    workdir = fields.TypedField(str, type(None))

    def __init__(self):
        self.image = None
        self.command = None

        # NOTE: Here we set the target fields directly to avoid the deprecation
        # warnings
        self._commands = []

        self.workdir = _STAGEDIR_MOUNT
        self.mount_points = []
        self.options = []
        self.pull_image = True

    @abc.abstractmethod
    def emit_prepare_commands(self, stagedir):
        '''Returns commands for preparing this container for running.

        Such a command could be for pulling the container image from a
        repository.

        .. note:

            This method is relevant only to developers of new container
            platform backends.

        :meta private:

        :arg stagedir: The stage directory of the test.
        '''

    @abc.abstractmethod
    def launch_command(self, stagedir):
        '''Returns the command for running :attr:`commands` with this container
        platform.

        .. note:
            This method is relevant only to developers of new container
            platforms.

        :meta private:

        :arg stagedir: The stage directory of the test.
        '''

    @classmethod
    def create(cls, name):
        '''Factory method to create a new container by name.'''
        name = name.capitalize()
        try:
            return globals()[name]()
        except KeyError:
            raise ValueError(f'unknown container platform: {name}') from None

    @classmethod
    def create_from(cls, name, other):
        new = cls.create(name)
        new.image = other.image
        new.command = other.command
        new.mount_points = other.mount_points
        new.options = other.options
        new.pull_image = other.pull_image
        new.workdir = other.workdir

        # Update deprecated fields
        with warn.suppress_deprecations():
            new.commands = other.commands

        return new

    @property
    def name(self):
        return type(self).__name__

    def __str__(self):
        return self.name

    def __rfm_json_encode__(self):
        return str(self)
Exemplo n.º 29
0
class ModulesSystem:
    '''A modules system.'''

    module_map = fields.TypedField('module_map',
                                   types.Dict[str, types.List[str]])

    @classmethod
    def create(cls, modules_kind=None):
        if modules_kind is None or modules_kind == 'nomod':
            return ModulesSystem(NoModImpl())
        elif modules_kind == 'tmod31':
            return ModulesSystem(TMod31Impl())
        elif modules_kind == 'tmod':
            return ModulesSystem(TModImpl())
        elif modules_kind == 'tmod32':
            return ModulesSystem(TModImpl())
        elif modules_kind == 'tmod4':
            return ModulesSystem(TMod4Impl())
        elif modules_kind == 'lmod':
            return ModulesSystem(LModImpl())
        else:
            raise ConfigError('unknown module system: %s' % modules_kind)

    def __init__(self, backend):
        self._backend = backend
        self.module_map = {}

    def resolve_module(self, name):
        '''Resolve module ``name`` in the registered module map.

        :returns: the list of real modules names pointed to by ``name``.
        :raises: :class:`reframe.core.exceptions.ConfigError` if the mapping
            contains a cycle.

        :meta private:
        '''
        ret = OrderedSet()
        visited = set()
        unvisited = [(name, None)]
        path = []
        while unvisited:
            node, parent = unvisited.pop()
            # Adjust the path
            while path and path[-1] != parent:
                path.pop()

            # Handle modules mappings with self loops
            if node == parent:
                ret.add(node)
                continue

            try:
                # We insert the adjacent nodes in reverse order, so as to
                # preserve the DFS access order
                adjacent = reversed(self.module_map[node])
            except KeyError:
                # We have reached a terminal node
                ret.add(node)
            else:
                path.append(node)
                for m in adjacent:
                    if m in path and m != node:
                        raise EnvironError('module cyclic dependency: ' +
                                           '->'.join(path + [m]))
                    if m not in visited:
                        unvisited.append((m, node))

            visited.add(node)

        return list(ret)

    @property
    def backend(self):
        return(self._backend)

    def loaded_modules(self):
        '''Return a list of loaded modules.

        :rtype: List[str]
        '''
        return [str(m) for m in self._backend.loaded_modules()]

    def conflicted_modules(self, name):
        '''Return the list of the modules conflicting with module ``name``.

        If module ``name`` resolves to multiple real modules, then the returned
        list will be the concatenation of the conflict lists of all the real
        modules.

        :rtype: List[str]
        '''
        ret = []
        for m in self.resolve_module(name):
            ret += self._conflicted_modules(m)

        return ret

    def _conflicted_modules(self, name):
        return [str(m) for m in self._backend.conflicted_modules(Module(name))]

    def load_module(self, name, force=False):
        '''Load the module ``name``.

        If ``force`` is set, forces the loading, unloading first any
        conflicting modules currently loaded. If module ``name`` refers to
        multiple real modules, all of the target modules will be loaded.

        :returns: the list of unloaded modules as strings.
        :rtype: List[str]
        '''
        ret = []
        for m in self.resolve_module(name):
            ret += self._load_module(m, force)

        return ret

    def _load_module(self, name, force=False):
        module = Module(name)
        loaded_modules = self._backend.loaded_modules()
        if module in loaded_modules:
            # Do not try to load the module if it is already present
            return []

        # Get the list of the modules that need to be unloaded
        unload_list = set()
        if force:
            conflict_list = self._backend.conflicted_modules(module)
            unload_list = set(loaded_modules) & set(conflict_list)

        for m in unload_list:
            self._backend.unload_module(m)

        self._backend.load_module(module)
        return [str(m) for m in unload_list]

    def unload_module(self, name):
        '''Unload module ``name``.

        If module ``name`` refers to multiple real modules, all the referred to
        modules will be unloaded in reverse order.
        '''
        for m in reversed(self.resolve_module(name)):
            self._unload_module(m)

    def _unload_module(self, name):
        self._backend.unload_module(Module(name))

    def is_module_loaded(self, name):
        '''Check if module ``name`` is loaded.

        If module ``name`` refers to multiple real modules, this method will
        return :class:`True` only if all the referees are loaded.
        '''
        return all(self._is_module_loaded(m)
                   for m in self.resolve_module(name))

    def _is_module_loaded(self, name):
        return self._backend.is_module_loaded(Module(name))

    def load_mapping(self, mapping):
        '''Update the internal module mappings using a single mapping.

        :arg mapping: a string specifying the module mapping.
            Example syntax: ``'m0: m1 m2'``.

        :meta private:
        '''
        key, *rest = mapping.split(':')
        if len(rest) != 1:
            raise ConfigError('invalid mapping syntax: %s' % mapping)

        key = key.strip()
        values = rest[0].split()
        if not key:
            raise ConfigError('no key found in mapping: %s' % mapping)

        if not values:
            raise ConfigError('no mapping defined for module: %s' % key)

        self.module_map[key] = list(OrderedDict.fromkeys(values))

    def load_mapping_from_file(self, filename):
        '''Update the internal module mappings from mappings read from file.

        :meta private:
        '''
        with open(filename) as fp:
            for lineno, line in enumerate(fp, start=1):
                line = line.strip().split('#')[0]
                if not line:
                    continue

                try:
                    self.load_mapping(line)
                except ConfigError as e:
                    raise ConfigError('%s:%s' % (filename, lineno)) from e

    @property
    def name(self):
        '''The name of this module system.'''
        return self._backend.name()

    @property
    def version(self):
        '''The version of this module system.'''
        return self._backend.version()

    def unload_all(self):
        '''Unload all loaded modules.'''
        return self._backend.unload_all()

    @property
    def searchpath(self):
        '''The module system search path as a list of directories.'''
        return self._backend.searchpath()

    def searchpath_add(self, *dirs):
        '''Add ``dirs`` to the module system search path.'''
        return self._backend.searchpath_add(*dirs)

    def searchpath_remove(self, *dirs):
        '''Remove ``dirs`` from the module system search path.'''
        return self._backend.searchpath_remove(*dirs)

    def emit_load_commands(self, name):
        '''Return the appropriate shell command for loading module ``name``.

        :rtype: List[str]
        '''
        return [self._backend.emit_load_instr(Module(name))
                for name in self.resolve_module(name)]

    def emit_unload_commands(self, name):
        '''Return the appropriate shell command for unloading module
        ``name``.

        :rtype: List[str]
        '''
        return [self._backend.emit_unload_instr(Module(name))
                for name in reversed(self.resolve_module(name))]

    def __str__(self):
        return str(self._backend)
Exemplo n.º 30
0
class ContainerPlatform(abc.ABC):
    '''The abstract base class of any container platform.

    Concrete container platforms inherit from this class and must override the
    :func:`emit_prepare_commands` and :func:`launch_command` abstract methods.
    '''

    #: The container image to be used for running the test.
    #:
    #: :type: :class:`str` or :class:`None`
    #: :default: :class:`None`
    image = fields.TypedField('image', str, type(None))

    #: The commands to be executed within the container.
    #:
    #: :type: :class:`list[str]`
    #: :default: ``[]``
    commands = fields.TypedField('commands', typ.List[str])

    #: List of mount point pairs for directories to mount inside the container.
    #:
    #: Each mount point is specified as a tuple of
    #: ``(/path/in/host, /path/in/container)``.
    #:
    #: :type: :class:`list[tuple[str, str]]`
    #: :default: ``[]``
    mount_points = fields.TypedField('mount_points', typ.List[typ.Tuple[str,
                                                                        str]])

    #: The working directory of ReFrame inside the container.
    #:
    #: This is the directory where the test's stage directory is mounted inside
    #: the container. This directory is always mounted regardless if
    #: :attr:`mount_points` is set or not.
    #:
    #: :type: :class:`str`
    #: :default: ``/rfm_workdir``
    workdir = fields.TypedField('workdir', str, type(None))

    def __init__(self):
        self.image = None
        self.commands = []
        self.mount_points = []
        self.workdir = '/rfm_workdir'

    @abc.abstractmethod
    def emit_prepare_commands(self):
        '''Returns commands for preparing this container for running.

        Such a command could be for pulling the container image from a
        repository.

        .. note:

            This method is relevant only to developers of new container
            platform backends.

        '''

    @abc.abstractmethod
    def launch_command(self):
        '''Returns the command for running :attr:`commands` with this container
        platform.

        .. note:
            This method is relevant only to developers of new container
            platforms.

        '''

    def validate(self):
        if self.image is None:
            raise ContainerError('no image specified')

        if not self.commands:
            raise ContainerError('no commands specified')