Ejemplo n.º 1
0
def make_osroot(root_dir, app, data):
    """Creates mandatory system directories in the container chroot."""
    _LOGGER.info('Creating os root in: %s', root_dir)
    # Mount .../tickets .../keytabs on tempfs, so that they will be cleaned
    # up when the container exits.
    #
    for tmpfsdir in [
            '/var/spool/tickets', '/var/spool/keytabs', '/var/spool/tokens'
    ]:
        fs_linux.mount_tmpfs(root_dir, tmpfsdir)

    # /var/empty must be owned by root and not group or world-writable.
    os.chmod(os.path.join(root_dir, 'var/empty'), 0o711)

    # Mount a new sysfs for the container, bring in the /sys/fs subtree from
    # the host.
    fs_linux.mount_sysfs(root_dir)
    fs_linux.mount_bind(root_dir,
                        os.path.join(os.sep, 'sys', 'fs'),
                        recursive=True,
                        read_only=False)

    make_dev(root_dir)
    # Passthrough node devices per the node config data
    extra_devices = data.get('runtime', {}).get('passthrough_devices', [])
    make_extra_dev(root_dir, extra_devices, app.proid)

    # Per FHS3 /var/run should be a symlink to /run which should be tmpfs
    fs.symlink_safe(os.path.join(root_dir, 'var', 'run'), '/run')
    # We create an unbounded tmpfs mount so that runtime data can be written to
    # it, counting against the memory limit of the container.
    fs_linux.mount_tmpfs(root_dir, '/run')

    # /etc/docker is a file neceesary for docker daemon
    _docker.prepare_docker_daemon_path(root_dir, app, data)
Ejemplo n.º 2
0
def store_ticket(tkt, tkt_spool_dir):
    """Store ticket received from ticket locker."""
    _LOGGER.info('store ticket: %s', tkt.princ)
    # Check if locker was able to get all tickets or there are
    # some pending.
    if not tkt.ticket:
        _LOGGER.info('Ticket pending for %r', tkt.princ)
        return False

    _LOGGER.info('Refreshing ticket for %r', tkt.princ)
    if not tkt.write():
        return False

    tkt_spool_path = os.path.join(tkt_spool_dir, tkt.princ)
    tkt.copy(tkt_spool_path)

    # Tickets are stored as fully qualified princ
    # files: foo@krbrealm.
    #
    # For backward compatablity, create "short"
    # ticket link:
    #
    # foo -> foo@krbrealm
    realm_sep = tkt_spool_path.rfind('@')
    tkt_spool_link = tkt_spool_path[:realm_sep]
    if realm_sep != -1:
        # Create relative link without full path.
        _LOGGER.info('Creating link: %s => %s', tkt_spool_link,
                     os.path.basename(tkt_spool_path))
        fs.symlink_safe(tkt_spool_link, os.path.basename(tkt_spool_path))
    return True
Ejemplo n.º 3
0
    def _add_cleanup_app(self, path):
        """Configure a new cleanup app.
        """
        name = os.path.basename(path)

        if name.startswith('.'):
            _LOGGER.warning('Ignore %s', name)
            return

        cleaning_link = os.path.join(self.tm_env.cleaning_dir, name)
        if os.path.islink(cleaning_link):
            _LOGGER.warning('Cleaning app already configured %s', name)
            return

        cleanup_link = os.path.join(self.tm_env.cleanup_dir, name)
        if not os.path.islink(cleanup_link):
            _LOGGER.info('Ignore - not a link: %s', cleanup_link)
            return

        _LOGGER.info('Configure cleaning app: %s', name)

        bin_name = 'scripts' if os.name == 'nt' else 'bin'
        command = ('{treadmill}/{bin}/treadmill sproc cleanup instance'
                   ' --approot {tm_root}'
                   ' {instance}').format(
                       treadmill=subproc.resolve('treadmill'),
                       bin=bin_name,
                       tm_root=self.tm_env.root,
                       instance=name)

        if os.name == 'posix':
            command = 'exec ' + command

        supervisor.create_service(
            self.tm_env.cleanup_apps_dir,
            name=name,
            app_run_script=command,
            userid='root',
            monitor_policy={
                'limit': 5,
                'interval': 60,
                'tombstone': {
                    'path': self.tm_env.cleanup_tombstone_dir,
                    'id': name,
                },
                'skip_path': os.path.join(self.tm_env.cleanup_dir, name)
            },
            log_run_script=None,
        )

        fs.symlink_safe(cleaning_link,
                        os.path.join(self.tm_env.cleanup_apps_dir, name))

        _LOGGER.debug('Cleanup app %s ready', name)

        self._refresh_supervisor()
Ejemplo n.º 4
0
def make_dev(newroot_norm):
    """Make /dev.
    """
    fs_linux.mount_tmpfs(newroot_norm,
                         '/dev',
                         nodev=False,
                         noexec=False,
                         nosuid=True,
                         relatime=False,
                         mode='0755')

    devices = [
        ('/dev/null', 0o666, 1, 3),
        ('/dev/zero', 0o666, 1, 5),
        ('/dev/full', 0o666, 1, 7),
        ('/dev/tty', 0o666, 5, 0),
        ('/dev/random', 0o444, 1, 8),
        ('/dev/urandom', 0o444, 1, 9),
    ]
    prev_umask = os.umask(0000)
    for device, permissions, major, minor in devices:
        os.mknod(newroot_norm + device, permissions | stat.S_IFCHR,
                 os.makedev(major, minor))
    os.umask(prev_umask)
    st = os.stat('/dev/tty')
    os.chown(newroot_norm + '/dev/tty', st.st_uid, st.st_gid)

    symlinks = [
        ('/dev/fd', '/proc/self/fd'),
        ('/dev/stdin', '/proc/self/fd/0'),
        ('/dev/stdout', '/proc/self/fd/1'),
        ('/dev/stderr', '/proc/self/fd/2'),
        ('/dev/core', '/proc/kcore'),
    ]
    for link, target in symlinks:
        fs.symlink_safe(newroot_norm + link, target)

    for directory in ['/dev/shm', '/dev/pts', '/dev/mqueue']:
        fs.mkdir_safe(newroot_norm + directory)
    fs_linux.mount_tmpfs(newroot_norm,
                         '/dev/shm',
                         nodev=True,
                         noexec=False,
                         nosuid=True,
                         relatime=False)
    fs_linux.mount_devpts(newroot_norm,
                          '/dev/pts',
                          gid=st.st_gid,
                          mode='0620',
                          ptmxmode='0666')
    fs.symlink_safe(newroot_norm + '/dev/ptmx', 'pts/ptmx')
    fs_linux.mount_mqueue(newroot_norm, '/dev/mqueue')

    # Passthrough container log to host system logger.
    fs_linux.mount_bind(newroot_norm, '/dev/log', read_only=False)
Ejemplo n.º 5
0
def _create_docker_log_symlink(svc_data_dir):
    """ crete a app/data/service/log link which points to app/log/
        :param app_data_dir:
            app/data path
    """
    docker_data_dir = os.path.join(os.path.realpath(svc_data_dir), 'services',
                                   'docker', 'data')

    fs.mkdir_safe(docker_data_dir)
    link = os.path.join(docker_data_dir, 'log')
    target = os.path.join(svc_data_dir, 'log')
    _LOGGER.info('linking %s -> %s', link, target)
    fs.symlink_safe(link, target)
Ejemplo n.º 6
0
    def _configure(self, instance_name):
        """Configures and starts the instance based on instance cached event.

        - Runs app_configure --approot <rootdir> cache/<instance>

        :param ``str`` instance_name:
            Name of the instance to configure
        :returns ``bool``:
            True for successfully configured container.
        """
        event_file = os.path.join(
            self.tm_env.cache_dir,
            instance_name
        )

        with lc.LogContext(_LOGGER, instance_name):
            try:
                _LOGGER.info('Configuring')
                container_dir = app_cfg.configure(self.tm_env, event_file,
                                                  self._runtime)
                if container_dir is None:
                    # configure step failed, skip.
                    fs.rm_safe(event_file)
                    return False

                # symlink_safe(link, target)
                fs.symlink_safe(
                    os.path.join(self.tm_env.running_dir, instance_name),
                    container_dir
                )
                return True

            except exc.ContainerSetupError as err:  # pylint: disable=W0703
                _LOGGER.exception('Error configuring (%r)', instance_name)
                app_abort.report_aborted(self.tm_env, instance_name,
                                         why=err.reason,
                                         payload=traceback.format_exc())
                fs.rm_safe(event_file)
                return False
            except Exception as err:  # pylint: disable=W0703
                _LOGGER.exception('Error configuring (%r)', instance_name)
                app_abort.report_aborted(self.tm_env, instance_name,
                                         why=app_abort.AbortedReason.UNKNOWN,
                                         payload=traceback.format_exc())
                fs.rm_safe(event_file)
                return False
Ejemplo n.º 7
0
    def _create_instance(self, path):
        """Create an spawn instance."""
        job, bucket, running = spawn_utils.get_instance_path(path, self.paths)

        _LOGGER.debug('Creating - (%r, %r)', job, running)

        if os.path.exists(running):
            _LOGGER.debug('Create %r failed - already exists', running)
            return

        inst = instance.Instance(path)
        data_dir = os.path.join(job, spawn.JOB_DATA_DIR)

        fs.mkdir_safe(job)
        fs.mkdir_safe(data_dir)

        utils.create_script(
            os.path.join(job, 'run'),
            'spawn.run',
            id=inst.id,
            name=inst.name,
            service_exit=inst.settings.get('service_exit', False),
            **subproc.get_aliases()
        )

        utils.create_script(
            os.path.join(job, 'finish'),
            'spawn.finish',
            id=inst.id,
            stop=inst.settings.get('stop', False),
            reconnect=inst.settings.get('reconnect', False),
            reconnect_timeout=inst.settings.get('reconnect_timeout', 0),
            **subproc.get_aliases()
        )

        with io.open(os.path.join(data_dir, 'manifest'), 'w') as f:
            f.writelines(
                utils.json_genencode(inst.manifest)
            )

        with io.open(os.path.join(job, 'timeout-finish'), 'w') as f:
            f.write(six.text_type(spawn.JOB_FINISH_TIMEOUT))

        fs.symlink_safe(running, job)

        self._scan(bucket)
Ejemplo n.º 8
0
    def _create_instance(self, path):
        """Create an spawn instance."""
        job, bucket, running = spawn_utils.get_instance_path(path, self.paths)

        _LOGGER.debug('Creating - (%r, %r)', job, running)

        if os.path.exists(running):
            _LOGGER.debug('Create %r failed - already exists', running)
            return

        inst = instance.Instance(path)
        data_dir = os.path.join(job, spawn.JOB_DATA_DIR)

        fs.mkdir_safe(job)
        fs.mkdir_safe(data_dir)

        utils.create_script(
            os.path.join(job, 'run'),
            'spawn.run',
            id=inst.id,
            name=inst.name,
            cellapi=self.paths.cellapi_sock,
            zk2fs=self.paths.zk_mirror_dir)

        utils.create_script(
            os.path.join(job, 'finish'),
            'spawn.finish',
            id=inst.id,
            cellapi=self.paths.cellapi_sock,
            cleanup=self.paths.cleanup_dir,
            stop=inst.settings['stop'],
            reconnect=inst.settings['reconnect'],
            reconnect_timeout=inst.settings['reconnect_timeout'])

        with open(os.path.join(data_dir, 'manifest'), 'w') as f:
            json.dump(inst.manifest, f)

        with open(os.path.join(job, 'timeout-finish'), 'w') as f:
            f.write(str(spawn.JOB_FINISH_TIMEOUT))

        fs.symlink_safe(running, job)

        self._scan(bucket)
Ejemplo n.º 9
0
    def setUp(self):
        self.root = tempfile.mkdtemp()
        os.environ['TREADMILL_APPROOT'] = self.root
        self.tm_env = appenv.AppEnvironment(root=self.root)

        fs.mkdir_safe(self.tm_env.apps_dir)
        fs.mkdir_safe(self.tm_env.archives_dir)

        full_names = (
            ('proid.simplehttp', '0001025686', 'ymweWiRm86C7A'),
            ('proid.myapi.test', '0001027473', 'kJoV4j0DU6dtJ'),
        )
        for app, instance, uniq in full_names:
            link = '#'.join([app, instance])
            fs.mkfile_safe(os.path.join(self.tm_env.running_dir, link))

            target = '-'.join([app, instance, uniq])
            fs.mkdir_safe(os.path.join(self.tm_env.apps_dir, target, 'data'))

            fs.symlink_safe(
                os.path.join(self.tm_env.running_dir, link),
                os.path.join(self.tm_env.apps_dir, target),
            )

        files = (
            # incorrect file
            'proid.app-foo-bar#123.sys.tar.gz',
            'proid.app#123.sys.tar.gz',
            # correct file
            'proid.app-123-uniq.sys.tar.gz',
            'proid.test.sleep-901-uniq.sys.tar.gz',
        )
        for f in files:
            fs.mkfile_safe(os.path.join(self.tm_env.archives_dir, f))

        self.api = local.API()
Ejemplo n.º 10
0
def _prepare_certs(confdir, registries):
    """prepare certficate for docker daemon
    """
    certs_dir = os.path.join(confdir, 'certs.d')

    for registry in registries:
        if registry.get('insecure', False):
            continue

        cert_dir = os.path.join(certs_dir, registry['host'])
        fs.mkdir_safe(cert_dir)

        # symlink ca/cert/key in /etc dir
        if 'ca_cert' in registry:
            fs.symlink_safe(os.path.join(cert_dir, 'ca.crt'),
                            registry['ca_cert'])
        if 'client_cert' in registry:
            fs.symlink_safe(os.path.join(cert_dir, 'client.cert'),
                            registry['client_cert'])
        if 'client_key' in registry:
            fs.symlink_safe(os.path.join(cert_dir, 'client.key'),
                            registry['client_key'])
Ejemplo n.º 11
0
def make_fsroot(root_dir, app, data):
    """Initializes directory structure for the container in a new root.

    The container uses pretty much a blank a FHS 3 layout.

     - Bind directories in parent / (with exceptions - see below.)
     - Skip /tmp, create /tmp in the new root with correct permissions.
     - Selectively create / bind /var.
       - /var/tmp (new)
       - /var/log (new)
       - /var/spool - create empty with dirs.
     - Bind everything in /var, skipping /spool/tickets

    :param ``str`` root_dit:
        Container root directory.
    :param app:
        Container app manifest.
    :param ``dict`` data:
        Local configuration data.
    """
    newroot_norm = fs.norm_safe(root_dir)

    emptydirs = [
        '/bin',
        '/dev',
        '/etc',
        '/home',
        '/lib',
        '/lib64',
        '/opt',
        '/proc',
        '/root',
        '/run',
        '/sbin',
        '/sys',
        '/tmp',
        '/usr',
        '/var/cache',
        '/var/empty',
        '/var/empty/sshd',
        '/var/lib',
        '/var/lock',
        '/var/log',
        '/var/opt',
        '/var/spool',
        '/var/tmp',
        '/var/spool/keytabs',
        '/var/spool/tickets',
        '/var/spool/tokens',
        # for SSS
        '/var/lib/sss',
    ]

    stickydirs = [
        '/opt',
        '/run',
        '/tmp',
        '/var/cache',
        '/var/lib',
        '/var/lock',
        '/var/log',
        '/var/opt',
        '/var/tmp',
        '/var/spool/keytabs',
        '/var/spool/tickets',
        '/var/spool/tokens',
    ]

    # these folders are shared with underlying host and other containers,
    mounts = [
        '/bin',
        '/etc',  # TODO: Add /etc/opt
        '/lib',
        '/lib64',
        '/root',
        '/sbin',
        '/usr',
        # for SSS
        '/var/lib/sss',
        # TODO: Remove below once PAM UDS is implemented
        os.path.expandvars('${TREADMILL_APPROOT}/env'),
        os.path.expandvars('${TREADMILL_APPROOT}/spool'),
    ]

    for directory in emptydirs:
        fs.mkdir_safe(newroot_norm + directory)

    for directory in stickydirs:
        os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)

    # /var/empty must be owned by root and not group or world-writable.
    os.chmod(os.path.join(newroot_norm, 'var/empty'), 0o711)

    # Mount a new sysfs for the container, bring in the /sys/fs subtree from
    # the host.
    fs_linux.mount_sysfs(newroot_norm)
    fs_linux.mount_bind(newroot_norm,
                        os.path.join(os.sep, 'sys', 'fs'),
                        recursive=True,
                        read_only=False)

    make_dev(newroot_norm)
    # Passthrough node devices per the node config data
    extra_devices = data.get('runtime', {}).get('passthrough_devices', [])
    make_extra_dev(newroot_norm, extra_devices, app.proid)

    # Per FHS3 /var/run should be a symlink to /run which should be tmpfs
    fs.symlink_safe(os.path.join(newroot_norm, 'var', 'run'), '/run')
    # We create an unbounded tmpfs mount so that runtime data can be written to
    # it, counting against the memory limit of the container.
    fs_linux.mount_tmpfs(newroot_norm, '/run')

    # Make shared directories/files readonly to container
    for mount in mounts:
        if os.path.exists(mount):
            fs_linux.mount_bind(newroot_norm,
                                mount,
                                recursive=True,
                                read_only=True)

    # /etc/docker is a file neceesary for docker daemon
    _docker.mount_docker_daemon_path(newroot_norm, app)
Ejemplo n.º 12
0
def make_fsroot(root_dir, app):
    """Initializes directory structure for the container in a new root.

    The container uses pretty much a blank a FHS 3 layout.

     - Bind directories in parent / (with exceptions - see below.)
     - Skip /tmp, create /tmp in the new root with correct permissions.
     - Selectively create / bind /var.
       - /var/tmp (new)
       - /var/log (new)
       - /var/spool - create empty with dirs.
     - Bind everything in /var, skipping /spool/tickets

     tm_env is used to deliver abort events
     """
    newroot_norm = fs.norm_safe(root_dir)

    emptydirs = [
        '/bin',
        '/dev',
        '/etc',
        '/home',
        '/lib',
        '/lib64',
        '/opt',
        '/proc',
        '/root',
        '/run',
        '/sbin',
        '/sys',
        '/tmp',
        '/usr',
        '/var/cache',
        '/var/empty',
        '/var/empty/sshd',
        '/var/lib',
        '/var/lock',
        '/var/log',
        '/var/opt',
        '/var/spool',
        '/var/tmp',
        '/var/spool/keytabs',
        '/var/spool/tickets',
        '/var/spool/tokens',
        # for SSS
        '/var/lib/sss',
    ]

    stickydirs = [
        '/opt',
        '/run',
        '/tmp',
        '/var/cache',
        '/var/lib',
        '/var/lock',
        '/var/log',
        '/var/opt',
        '/var/tmp',
        '/var/spool/keytabs',
        '/var/spool/tickets',
        '/var/spool/tokens',
    ]

    # these folders are shared with underlying host and other containers,
    mounts = [
        '/bin',
        '/etc',  # TODO: Add /etc/opt
        '/lib',
        '/lib64',
        '/root',
        '/sbin',
        '/usr',
        # for SSS
        '/var/lib/sss',
        # TODO: Remove below once PAM UDS is implemented
        os.path.expandvars('${TREADMILL_APPROOT}/env'),
        os.path.expandvars('${TREADMILL_APPROOT}/spool'),
    ]

    for directory in emptydirs:
        fs.mkdir_safe(newroot_norm + directory)

    for directory in stickydirs:
        os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)

    # /var/empty must be owned by root and not group or world-writable.
    os.chmod(os.path.join(newroot_norm, 'var/empty'), 0o711)

    fs_linux.mount_bind(newroot_norm,
                        os.path.join(os.sep, 'sys'),
                        source='/sys',
                        recursive=True,
                        read_only=False)

    make_dev(newroot_norm)

    # Per FHS3 /var/run should be a symlink to /run which should be tmpfs
    fs.symlink_safe(os.path.join(newroot_norm, 'var', 'run'), '/run')
    # We create an unbounded tmpfs mount so that runtime data can be written to
    # it, counting against the memory limit of the container.
    fs_linux.mount_tmpfs(newroot_norm, '/run')

    # Make shared directories/files readonly to container
    for mount in mounts:
        if os.path.exists(mount):
            fs_linux.mount_bind(newroot_norm,
                                mount,
                                recursive=True,
                                read_only=True)

    if hasattr(app, 'docker') and app.docker:
        # If unable to mount docker directory, we throw Aborted events
        try:
            _mount_docker_tmpfs(newroot_norm)
        except FileNotFoundError as err:
            _LOGGER.error('Failed to mount docker tmpfs: %s', err)
            # this exception is caught by sproc run to generate abort event
            raise exc.ContainerSetupError(
                msg=str(err),
                reason=app_abort.AbortedReason.UNSUPPORTED,
            )
Ejemplo n.º 13
0
    def _synchronize(self):
        """Synchronize apps to running/cleanup.

        We need to re-validate three things on startup:

          - All configured apps should have an associated cache entry.
            Otherwise, create a link to cleanup.

          - All configured apps with a cache entry and with a cleanup file
            should be linked to cleanup. Otherwise, link to running.

          - Additional cache entries should be configured to run.

        On restart we need to validate another three things:

          - All configured apps that have a running link should be checked
            if in the cache. If not then terminate the app.

          - All configured apps that have a cleanup link should be left
            alone as this is handled.

          - Additional cache entries should be configured to run.

        On startup run.sh will clear running and cleanup which simplifies
        the logic for us as we can check running/cleanup first. Then check
        startup conditions and finally non-configured apps that are in cache.

        NOTE: a link cannot exist in running and cleanup at the same time.

        """
        # Disable R0912(too-many-branches)
        # pylint: disable=R0912
        configured = {
            os.path.basename(filename)
            for filename in glob.glob(os.path.join(self.tm_env.apps_dir, '*'))
        }
        cached = {
            os.path.basename(filename): appcfg.eventfile_unique_name(filename)
            for filename in glob.glob(os.path.join(self.tm_env.cache_dir, '*'))
        }

        for container in configured:
            appname = appcfg.app_name(container)
            if os.path.exists(os.path.join(self.tm_env.running_dir, appname)):
                # App already running.. check if in cache.
                # No need to check if needs cleanup as that is handled
                if appname not in cached or cached[appname] != container:
                    self._terminate(appname)
                else:
                    _LOGGER.info('Ignoring %s as it is running', appname)

                cached.pop(appname, None)

            elif os.path.exists(os.path.join(self.tm_env.cleanup_dir,
                                             appname)):
                # Already in the process of being cleaned up
                _LOGGER.info('Ignoring %s as it is in cleanup', appname)
                cached.pop(appname, None)

            else:
                needs_cleanup = True
                if appname in cached and cached[appname] == container:
                    data_dir = os.path.join(self.tm_env.apps_dir, container,
                                            'data')
                    for cleanup_file in ['exitinfo', 'aborted', 'oom']:
                        path = os.path.join(data_dir, cleanup_file)
                        if os.path.exists(path):
                            _LOGGER.debug('Found cleanup file %r', path)
                            break
                    else:
                        if self._configure(appname):
                            needs_cleanup = False
                            _LOGGER.debug('Added existing app %r', appname)

                    cached.pop(appname, None)

                if needs_cleanup:
                    fs.symlink_safe(
                        os.path.join(self.tm_env.cleanup_dir, appname),
                        os.path.join(self.tm_env.apps_dir, container))
                    _LOGGER.debug('Removed %r', appname)

        for appname in six.iterkeys(cached):
            if self._configure(appname):
                _LOGGER.debug('Added new app %r', appname)

        self._refresh_supervisor()
Ejemplo n.º 14
0
def make_fsroot(root_dir):
    """Initializes directory structure for the container in a new root.

    The container uses pretty much a blank a FHS 3 layout.

     - Bind directories in parent / (with exceptions - see below.)
     - Skip /tmp, create /tmp in the new root with correct permissions.
     - Selectively create / bind /var.
       - /var/tmp (new)
       - /var/log (new)
       - /var/spool - create empty with dirs.
     - Bind everything in /var, skipping /spool/tickets
     """
    newroot_norm = fs.norm_safe(root_dir)

    emptydirs = [
        '/bin',
        '/dev',
        '/etc',
        '/home',
        '/lib',
        '/lib64',
        '/opt',
        '/proc',
        '/root',
        '/run',
        '/sbin',
        '/sys',
        '/tmp',
        '/usr',
        '/var/cache',
        '/var/empty',
        '/var/lib',
        '/var/lock',
        '/var/log',
        '/var/opt',
        '/var/spool',
        '/var/tmp',
        '/var/spool/keytabs',
        '/var/spool/tickets',
        '/var/spool/tokens',
        # for SSS
        '/var/lib/sss',
    ]

    stickydirs = [
        '/opt',
        '/run',
        '/tmp',
        '/var/cache',
        '/var/lib',
        '/var/lock',
        '/var/log',
        '/var/opt',
        '/var/tmp',
        '/var/spool/keytabs',
        '/var/spool/tickets',
        '/var/spool/tokens',
    ]

    # these folders are shared with underlying host and other containers,
    mounts = [
        '/bin',
        '/etc',  # TODO: Add /etc/opt
        '/lib',
        '/lib64',
        '/root',
        '/sbin',
        '/usr',
        # for SSS
        '/var/lib/sss',
        # TODO: Remove below once PAM UDS is implemented
        '/var/tmp/treadmill/env',
        '/var/tmp/treadmill/spool',
    ]

    # Add everything under /opt
    mounts += glob.glob('/opt/*')

    for directory in emptydirs:
        fs.mkdir_safe(newroot_norm + directory)

    for directory in stickydirs:
        os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)

    # /var/empty must be owned by root and not group or world-writable.
    os.chmod(os.path.join(newroot_norm, 'var/empty'), 0o711)

    fs_linux.mount_bind(newroot_norm,
                        os.path.join(os.sep, 'sys'),
                        source='/sys',
                        recursive=True,
                        read_only=False)
    # TODO: For security, /dev/ should be minimal and separated to each
    #       container.
    fs_linux.mount_bind(newroot_norm,
                        os.path.join(os.sep, 'dev'),
                        source='/dev',
                        recursive=True,
                        read_only=False)
    # Per FHS3 /var/run should be a symlink to /run which should be tmpfs
    fs.symlink_safe(os.path.join(newroot_norm, 'var', 'run'), '/run')
    # We create an unbounded tmpfs mount so that runtime data can be written to
    # it, counting against the memory limit of the container.
    fs_linux.mount_tmpfs(newroot_norm, '/run')

    # Make shared directories/files readonly to container
    for mount in mounts:
        if os.path.exists(mount):
            fs_linux.mount_bind(newroot_norm,
                                mount,
                                recursive=True,
                                read_only=True)
Ejemplo n.º 15
0
    def _add_cleanup_app(self, path):
        """Configure a new cleanup app.
        """
        name = os.path.basename(path)

        if name.startswith('.'):
            _LOGGER.warning('Ignore %s', name)
            return

        cleaning_link = os.path.join(self.tm_env.cleaning_dir, name)
        if os.path.islink(cleaning_link):
            _LOGGER.warning('Cleaning app already configured %s', name)
            return

        cleanup_link = os.path.join(self.tm_env.cleanup_dir, name)
        if not os.path.islink(cleanup_link):
            _LOGGER.info('Ignore - not a link: %s', cleanup_link)
            return

        _LOGGER.info('Configure cleaning app: %s', name)

        if os.name == 'posix':
            command = (
                'exec {tm} sproc cleanup instance'
                ' --approot {tm_root}'
                ' {instance}'
            ).format(
                tm=dist.TREADMILL_BIN,
                tm_root=self.tm_env.root,
                instance=name
            )
        else:
            command = (
                '{python} -m treadmill.ms sproc cleanup instance'
                ' --approot {tm_root}'
                ' {instance}'
            ).format(
                python=sys.executable,
                tm_root=self.tm_env.root,
                instance=name
            )

        supervisor.create_service(
            self.tm_env.cleanup_apps_dir,
            name=name,
            app_run_script=command,
            userid='root',
            monitor_policy={
                'limit': 5,
                'interval': 60,
            },
            log_run_script=None,
        )

        fs.symlink_safe(
            cleaning_link,
            os.path.join(self.tm_env.cleanup_apps_dir, name)
        )

        _LOGGER.debug('Cleanup app %s ready', name)

        self._refresh_supervisor()