Ejemplo n.º 1
0
def _install(package, src_dir, dst_dir, params, prefix_len=None, rec=None):
    """Interpolate source directory into target directory with params."""
    package_name = package.__name__
    contents = pkg_resources.resource_listdir(package_name, src_dir)

    if prefix_len is None:
        prefix_len = len(src_dir) + 1

    for item in contents:
        resource_path = os.path.join(src_dir, item)
        dst_path = os.path.join(dst_dir, resource_path[prefix_len:])
        if pkg_resources.resource_isdir(package_name,
                                        os.path.join(src_dir, item)):
            fs.mkdir_safe(dst_path)
            if rec:
                rec.write('%s\n' % os.path.join(dst_path, ''))
            _install(package,
                     os.path.join(src_dir, item),
                     dst_dir,
                     params,
                     prefix_len=prefix_len,
                     rec=rec)
        else:
            if resource_path.endswith('.swp'):
                continue

            _LOGGER.info('Render: %s => %s', resource_path, dst_path)
            resource_str = pkg_resources.resource_string(
                package_name, resource_path)

            if rec:
                rec.write('%s\n' % dst_path)
            _update(dst_path, _render(resource_str.decode('utf8'), params))
Ejemplo n.º 2
0
 def _write_app_yaml(self, event, manifest_str):
     """Helper method to create app.yaml file in the event directory."""
     fs.mkdir_safe(os.path.dirname(event))
     with tempfile.NamedTemporaryFile(dir=os.path.dirname(event),
                                      delete=False) as f:
         f.write(manifest_str)
     os.rename(f.name, event)
Ejemplo n.º 3
0
    def zk2fs_cmd(root, endpoints, identity_groups, identity_groups_meta,
                  appgroups, running, scheduled, servers, servers_data,
                  placement, trace, server_trace, app_monitors, once):
        """Starts appcfgmgr process."""

        fs.mkdir_safe(root)

        tmp_dir = os.path.join(root, '.tmp')
        fs.mkdir_safe(tmp_dir)

        zk2fs_sync = zk2fs.Zk2Fs(context.GLOBAL.zk.conn, root, tmp_dir)

        if servers or servers_data:
            zk2fs_sync.sync_children(z.path.server(), watch_data=servers_data)

        if running:
            # Running are ephemeral, and will be added/remove automatically.
            zk2fs_sync.sync_children(z.path.running())

        if endpoints:
            zk2fs_sync.sync_children(
                z.ENDPOINTS,
                on_add=lambda p: _on_add_endpoint_proid(zk2fs_sync, p),
                on_del=lambda p: _on_del_endpoint_proid(zk2fs_sync, p))

        if identity_groups:
            zk2fs_sync.sync_children(
                z.IDENTITY_GROUPS,
                on_add=lambda p: _on_add_identity(zk2fs_sync, p,
                                                  identity_groups_meta),
                on_del=lambda p: _on_del_identity(zk2fs_sync, p))

        if scheduled:
            zk2fs_sync.sync_children(z.path.scheduled())

        if appgroups:
            zk2fs_sync.sync_children(z.path.appgroup(), watch_data=True)

        if app_monitors:
            zk2fs_sync.sync_children(z.path.appmonitor(), watch_data=True)

        if placement:
            zk2fs_sync.sync_children(
                z.path.placement(),
                on_add=lambda p: _on_add_placement_server(zk2fs_sync, p),
                on_del=lambda p: _on_del_placement_server(zk2fs_sync, p))

        if trace:
            _sync_trace(zk2fs_sync, z.TRACE, z.TRACE_HISTORY,
                        app_zk.TRACE_SOW_DIR)

        if server_trace:
            _sync_trace(zk2fs_sync, z.SERVER_TRACE, z.SERVER_TRACE_HISTORY,
                        server_zk.SERVER_TRACE_SOW_DIR)

        zk2fs_sync.mark_ready()

        if not once:
            while True:
                time.sleep(100000)
Ejemplo n.º 4
0
 def write(self):
     """Write down the service definition.
     """
     super(LongrunService, self).write()
     # Mandatory settings
     if self._run_script is None and not os.path.exists(self._run_file):
         raise ValueError('Invalid LongRun service: not run script')
     elif self._run_script is not None:
         script_write(self._run_file, self._run_script)
         # Handle the case where the run script is a generator
         if not isinstance(self._run_script, str):
             self._run_script = None
     # Optional settings
     if self._finish_script is not None:
         script_write(self._finish_file, self._finish_script)
         # Handle the case where the finish script is a generator
         if not isinstance(self._finish_script, str):
             self._finish_script = None
     if self._log_run_script is not None:
         # Create the log dir on the spot
         fs.mkdir_safe(os.path.dirname(self._log_run_file))
         script_write(self._log_run_file, self._log_run_script)
         # Handle the case where the run script is a generator
         if not isinstance(self._log_run_script, str):
             self._log_run_script = None
     if self._default_down:
         data_write(os.path.join(self._dir, 'down'), None)
     else:
         fs.rm_safe(os.path.join(self._dir, 'down'))
     if self._timeout_finish is not None:
         value_write(os.path.join(self._dir, 'timeout-finish'),
                     self._timeout_finish)
     if self._notification_fd is not None:
         value_write(os.path.join(self._dir, 'notification-fd'),
                     self._notification_fd)
Ejemplo n.º 5
0
 def write(self):
     """Write down the service definition.
     """
     fs.mkdir_safe(self._control_dir)
     if self._services is not None:
         for svc in self._services.values():
             svc.write()
Ejemplo n.º 6
0
    def test_sync_children_datawatch(self):
        """Test data sync."""
        # accessing protexted members.
        # pylint: disable=W0212
        zk_content = {
            'a': {
                'x': '1',
                'y': '2',
                'z': '3',
            },
        }

        self.make_mock_zk(zk_content)

        zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
        fs.mkdir_safe(os.path.join(self.root, 'a'))
        zk2fs_sync._children_watch('/a', ['x', 'y', 'z'],
                                   True,
                                   zk2fs_sync._default_on_add,
                                   zk2fs_sync._default_on_del)

        self._check_file('a/x', '1')
        self._check_file('a/y', '2')
        self._check_file('a/z', '3')

        self.assertIn('/a/x', zk2fs_sync.watches)
        self.assertIn('/a/y', zk2fs_sync.watches)
        self.assertIn('/a/z', zk2fs_sync.watches)
Ejemplo n.º 7
0
    def test_wait(self):
        """Test waiting for service status change."""
        # Disable W0212: accessing protected member
        # pylint: disable=W0212

        svcroot = os.path.join(self.root, 'xxx')
        fs.mkdir_safe(os.path.join(svcroot, 'a'))
        fs.mkdir_safe(os.path.join(svcroot, 'b'))
        supervisor._service_wait(svcroot, '-u', '-o')
        expected_cmd = [
            's6-svwait', '-u', '-t', '0', '-o', svcroot + '/a', svcroot + '/b'
        ]
        actual_cmd = treadmill.subproc.check_call.call_args[0][0]
        self.assertItemsEqual(expected_cmd, actual_cmd)
        treadmill.subproc.check_call.assert_called_with(actual_cmd)

        treadmill.subproc.check_call.reset_mock()
        supervisor._service_wait(svcroot, '-u', '-o', subset=['a'])
        treadmill.subproc.check_call.assert_called_with(
            ['s6-svwait', '-u', '-t', '0', '-o', svcroot + '/a'])

        treadmill.subproc.check_call.reset_mock()
        supervisor._service_wait(svcroot, '-u', '-o', subset={'a': 1})
        treadmill.subproc.check_call.assert_called_with(
            ['s6-svwait', '-u', '-t', '0', '-o', svcroot + '/a'])

        treadmill.subproc.check_call.reset_mock()
        supervisor._service_wait(svcroot, '-u', '-o', subset=[])
        self.assertFalse(treadmill.subproc.check_call.called)
Ejemplo n.º 8
0
    def configure(self, container_dir, app):
        root_dir = os.path.join(container_dir, 'root')
        newroot_norm = fs.norm_safe(root_dir)
        mounts = [
        ]

        emptydirs = [
            '/u',
            '/var/account',
            '/var/empty',
            '/var/lock',
            '/var/log',
            '/var/run',
        ]

        stickydirs = [
            '/opt',
        ]

        for mount in mounts:
            if os.path.exists(mount):
                fs.mount_bind(newroot_norm, mount)

        for directory in emptydirs:
            fs.mkdir_safe(newroot_norm + directory)

        for directory in stickydirs:
            os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)
Ejemplo n.º 9
0
    def test_sync_data(self):
        """Test data sync."""
        # accessing protexted members.
        # pylint: disable=W0212
        zk_content = {
            'a': {
                'x': '1',
                'y': '2',
                'z': '3',
            },
        }

        self.make_mock_zk(zk_content)
        zk2fs_sync = zksync.Zk2Fs(kazoo.client.KazooClient(), self.root)
        fs.mkdir_safe(os.path.join(self.root, 'a'))

        event = kazoo.protocol.states.WatchedEvent(
            'CREATED', 'CONNECTED', '/a/x')
        zk2fs_sync._data_watch('/a/x', 'aaa', None, event)

        self._check_file('a/x', 'aaa')

        event = kazoo.protocol.states.WatchedEvent(
            'DELETED', 'CONNECTED', '/a/x')
        zk2fs_sync._data_watch('/a/x', 'aaa', None, event)
        self.assertFalse(os.path.exists(os.path.join(self.root, 'a/x')))

        event = kazoo.protocol.states.WatchedEvent(
            'CREATED', 'CONNECTED', '/a/x')
        zk2fs_sync._data_watch('/a/x', 'aaa', None, event)
        self._check_file('a/x', 'aaa')

        zk2fs_sync._data_watch('/a/x', None, None, None)
        self.assertFalse(os.path.exists(os.path.join(self.root, 'a/x')))
Ejemplo n.º 10
0
def make_fsroot(root_dir, emptydirs, stickydirs, mounts):
    """Initializes directory structure for the container in a new root.
    """
    _LOGGER.info('Creating fs root in: %s', root_dir)
    for directory in sorted(emptydirs):
        fs.mkdir_safe(root_dir + directory)

    for directory in sorted(stickydirs):
        os.chmod(root_dir + directory, 0o777 | stat.S_ISVTX)

    # Make shared directories/files readonly to container
    reserved = {'/run',
                '/sys/fs',
                '/var/spool/tickets',
                '/var/spool/keytabs',
                '/var/spool/tokens'}
    for mount, args in mounts.items():
        # These are reserved, mounted on memory in make_osroot.
        if mount in reserved:
            continue

        if not args:
            fs_linux.mount_bind(
                root_dir, mount,
                recursive=True, read_only=True
            )
        else:
            fs_linux.mount_bind(root_dir, mount, **args)
Ejemplo n.º 11
0
    def test_execute_pid1_aborted(self):
        """Test shutting down of the node.
        """
        mock_tm_env_class = collections.namedtuple(
            'MockTMEnv', ['running_dir', 'cleanup_dir'])
        mock_tm_env = mock_tm_env_class(os.path.join(self.root, 'running'),
                                        os.path.join(self.root, 'cleanup'))

        service_dir = os.path.join(mock_tm_env.running_dir, 'mock_service')
        fs.mkdir_safe(service_dir)

        with io.open(os.path.join(service_dir, 'type'), 'w') as f:
            f.write('longrun')

        mock_container_cleanup_action =\
            monitor.MonitorContainerCleanup(mock_tm_env, {})

        res = mock_container_cleanup_action.execute({
            'signal': 6,
            'id': 'mock_service',
        })

        # This MonitorContainerCleanup stops the monitor.
        self.assertEqual(res, True)

        treadmill.appcfg.abort.flag_aborted.assert_called_with(
            os.path.join(service_dir, 'data'),
            why=treadmill.appcfg.abort.AbortedReason.PID1)
        os.replace.assert_called()

        supervisor.control_svscan.assert_called_with(
            os.path.join(self.root, 'running'), [
                supervisor.SvscanControlAction.alarm,
                supervisor.SvscanControlAction.nuke
            ])
Ejemplo n.º 12
0
def configure(_approot, newroot, _app):
    """Configure layout in chroot."""
    newroot_norm = fs.norm_safe(newroot)
    mounts = [
    ]

    emptydirs = [
        '/u',
        '/var/account',
        '/var/empty',
        '/var/lock',
        '/var/log',
        '/var/run',
    ]

    stickydirs = [
        '/opt',
    ]

    for mount in mounts:
        if os.path.exists(mount):
            fs.mount_bind(newroot_norm, mount)

    for directory in emptydirs:
        fs.mkdir_safe(newroot_norm + directory)

    for directory in stickydirs:
        os.chmod(newroot_norm + directory, 0777 | stat.S_ISVTX)
Ejemplo n.º 13
0
def request_keytabs(zkclient, app_name, spool_dir, pattern):
    """Request VIP keytabs from the keytab locker.

    :param zkclient: Existing zk connection.
    :param app_name: Appname of container
    :param spool_dir: Path to keep keytabs fetched from keytab locker.
    :param pattern: app pattern for discovery endpoint of locker
    """
    hostports = _get_locker_hostports(zkclient, pattern)
    fs.mkdir_safe(spool_dir)

    for (host, port) in hostports:
        try:
            with connect_endpoint(host, port) as client:
                result = get_app_keytabs(client, app_name)
                _write_keytabs(result, spool_dir)
            return
        # pylint: disable=broad-except
        except Exception as err:
            _LOGGER.warning('Failed to get keytab from %s:%d: %r', host, port,
                            err)

    # if no host, port can provide keytab
    raise keytabs2.KeytabClientError(
        'Failed to get keytabs from {}'.format(hostports))
Ejemplo n.º 14
0
    def register(self, service):
        self._service = service
        try:
            with io.open(os.path.join(service.data_dir,
                                      supervisor.POLICY_JSON)) as f:
                policy_conf = json.load(f)
            self._policy_limit = policy_conf['limit']
            self._policy_interval = policy_conf['interval']

        except IOError as err:
            if err.errno == errno.ENOENT:
                _LOGGER.warning('No policy file found for %r', service)
                return None
            else:
                raise

        service_exits_log = os.path.join(service.data_dir,
                                         supervisor.EXITS_DIR)
        fs.mkdir_safe(service_exits_log)
        self._service_exits_log = service_exits_log

        _LOGGER.info('monitoring %r with limit:%d interval:%d', self._service,
                     self._policy_limit, self._policy_interval)

        return os.path.realpath(service_exits_log)
Ejemplo n.º 15
0
    def configure(self, container_dir, app):
        root_dir = os.path.join(container_dir, 'root')
        newroot_norm = fs.norm_safe(root_dir)

        emptydirs = [
            '/opt/s6',
            '/opt/treadmill',
            '/opt/treadmill-bind',
        ]

        stickydirs = []

        mounts = [
            '/opt/s6',
            '/opt/treadmill',
            '/opt/treadmill-bind',
        ]

        for directory in emptydirs:
            fs.mkdir_safe(newroot_norm + directory)

        for directory in stickydirs:
            os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)

        for mount in mounts:
            if os.path.exists(mount):
                fs_linux.mount_bind(newroot_norm,
                                    mount,
                                    recursive=True,
                                    read_only=True)
Ejemplo n.º 16
0
def _get_app_metrics(endpoint, instance, uniq='running', outdir=None):
    """Retreives app metrics."""
    fs.mkdir_safe(outdir)

    api = 'http://{}'.format(endpoint['hostport'])
    _download_rrd(api, _metrics_url(instance, uniq),
                  _rrdfile(outdir, instance, uniq))
Ejemplo n.º 17
0
def request_keytabs(zkclient, proid, vips, spool_dir):
    """Request VIP keytabs from the keytab locker.

    :param zkclient: Existing zk connection.
    :param proid: Proid in container appname.
    :param vips: VIP host list defined in manifest.
    :param spool_dir: Path to keep keytabs fetched from keytab locker.
    """
    pattern = "{0}.keytabs-v2".format(os.environ['TREADMILL_ID'])
    iterator = discovery.iterator(zkclient, pattern, 'keytabs', False)
    hostports = []

    for (_app, hostport) in iterator:
        if not hostport:
            continue
        host, port = hostport.split(':')
        hostports.append((host, int(port)))

    random.shuffle(hostports)

    for (host, port) in hostports:
        fs.mkdir_safe(spool_dir)
        if _get_keytabs_from(host, port, proid, vips, spool_dir):
            return True

    return False
Ejemplo n.º 18
0
def make_extra_dev(newroot_norm, extra_devices, owner):
    """Create all the configured "extra" passthrough devices.

    :param ``str`` newroot_norm:
        Path to the container root directory.
    :param ``list`` extra_devices:
        List of extra device specification.
    :param ``str`` owner:
        Username of the owner of the new devices.
    """
    (uid, gid) = utils.get_uid_gid(owner)
    for extra_dev in extra_devices:
        if not extra_dev.startswith('/dev'):
            _LOGGER.warning('Bad passthrough device %r.', extra_dev)
            continue

        try:
            dev_stat = os.stat(extra_dev)
        except OSError as err:
            _LOGGER.warning('Failed to stat() %r: Skipping.', extra_dev)
            continue

        if stat.S_ISDIR(dev_stat.st_mode):
            _LOGGER.warning('Cannot Passthrough directory %r', extra_dev)
            continue

        passthrough_dev = os.path.join(newroot_norm, extra_dev[1:])

        if os.path.dirname(extra_dev) != '/dev':
            # We have to create more directories under '/dev'
            fs.mkdir_safe(os.path.dirname(passthrough_dev))

        os.mknod(passthrough_dev,
                 stat.S_IFMT(dev_stat.st_mode) | 0o600, dev_stat.st_rdev)
        os.chown(passthrough_dev, uid, gid)
Ejemplo n.º 19
0
def request_keytabs(zkclient, app_name, spool_dir, pattern):
    """Request VIP keytabs from the keytab locker.

    :param zkclient: Existing zk connection.
    :param app_name: Appname of container
    :param spool_dir: Path to keep keytabs fetched from keytab locker.
    :param pattern: app pattern for discovery endpoint of locker
    """
    iterator = discovery.iterator(zkclient, pattern, 'keytabs', False)
    hostports = []

    for (_app, hostport) in iterator:
        if not hostport:
            continue
        host, port = hostport.split(':')
        hostports.append((host, int(port)))

    random.shuffle(hostports)

    for (host, port) in hostports:
        fs.mkdir_safe(spool_dir)
        try:
            with connect_endpoint(host, port) as client:
                dump_keytabs(client, app_name, spool_dir)
            return
        # pylint: disable=broad-except
        except Exception as err:
            _LOGGER.warning('Failed to get keytab from %s:%d: %r', host, port,
                            err)

    # if no host, port can provide keytab
    raise keytabs2.KeytabClientError(
        'Failed to get keytabs from {}'.format(hostports))
Ejemplo n.º 20
0
    def sync_children(self,
                      zkpath,
                      watch_data=False,
                      on_add=None,
                      on_del=None):
        """Sync children of zkpath to fpath."""

        _LOGGER.info('sync children: zk = %s, watch_data: %s', zkpath,
                     watch_data)

        fpath = self.fpath(zkpath)
        fs.mkdir_safe(fpath)

        if not on_del:
            on_del = self._default_on_del
        if not on_add:
            on_add = self._default_on_add

        @self.zkclient.ChildrenWatch(zkpath)
        @exc.exit_on_unhandled
        def _children_watch(children):
            """Callback invoked on children watch."""
            renew = self._children_watch(zkpath, children, watch_data, on_add,
                                         on_del)

            self._update_last()
            return renew
Ejemplo n.º 21
0
    def get(self, url):
        images_dir = os.path.join(self.tm_env.images_dir, TAR_DIR)
        fs.mkdir_safe(images_dir)

        image = urllib.parse.urlparse(url)
        sha256 = urllib.parse.parse_qs(image.query).get('sha256', None)

        with tempfile.NamedTemporaryFile(dir=images_dir,
                                         delete=False,
                                         prefix='.tmp') as temp:
            if image.scheme == 'http':
                _download(url, temp)
            else:
                _copy(image.path, temp)

        if not tarfile.is_tarfile(temp.name):
            _LOGGER.error('File %r is not a tar file.', url)
            raise Exception('File {0} is not a tar file.', url)

        new_sha256 = _sha256sum(temp.name)

        if sha256 is not None and sha256[0] != new_sha256:
            _LOGGER.error('Hash does not match %r - %r', sha256[0], new_sha256)
            raise Exception(
                'Given hash of {0} does not match.'.format(new_sha256), url)

        # TODO: rename tar file to sha256 to allow for caching.
        return TarImage(self.tm_env, temp.name)
Ejemplo n.º 22
0
    def test__archive_cleanup(self):
        """Tests cleanup of local logs."""
        # Access protected module _ARCHIVE_LIMIT, _cleanup_archive_dir
        #
        # pylint: disable=W0212
        fs.mkdir_safe(self.app_env.archives_dir)

        # Cleanup does not care about file extensions, it will cleanup
        # oldest file if threshold is exceeded.
        app_finish._ARCHIVE_LIMIT = 20
        file1 = os.path.join(self.app_env.archives_dir, '1')
        with open(file1, 'w+') as f:
            f.write('x' * 10)

        app_finish._cleanup_archive_dir(self.app_env)
        self.assertTrue(os.path.exists(file1))

        os.utime(file1, (time.time() - 1, time.time() - 1))
        file2 = os.path.join(self.app_env.archives_dir, '2')
        with open(file2, 'w+') as f:
            f.write('x' * 10)

        app_finish._cleanup_archive_dir(self.app_env)
        self.assertTrue(os.path.exists(file1))

        with open(os.path.join(self.app_env.archives_dir, '2'), 'w+') as f:
            f.write('x' * 15)
        app_finish._cleanup_archive_dir(self.app_env)
        self.assertFalse(os.path.exists(file1))
        self.assertTrue(os.path.exists(file2))
Ejemplo n.º 23
0
def install(package, dst_dir, params, run=None, profile=None):
    """Installs the services.
    """
    _LOGGER.info('install: %s - %s, profile: %s', package, dst_dir, profile)

    packages = [package]

    module = plugin_manager.load('treadmill.bootstrap', package)
    extension_module = None
    if profile:
        _LOGGER.info('Installing profile: %s', profile)
        extension_name = '{}.{}'.format(package, profile)
        packages.append(extension_name)

        try:
            extension_module = plugin_manager.load('treadmill.bootstrap',
                                                   extension_name)
        except KeyError:
            _LOGGER.info('Extension not defined: %s, profile: %s', package,
                         profile)

    subproc.load_packages(packages, lazy=False)

    # Store resolved aliases
    aliases_path = os.path.join(dst_dir, '.aliases.json')
    aliases = subproc.get_aliases()
    with io.open(aliases_path, 'w') as f_aliases:
        f_aliases.write(json.dumps(aliases))

    defaults = {}
    defaults.update(getattr(module, 'DEFAULTS', {}))

    if extension_module:
        defaults.update(getattr(extension_module, 'DEFAULTS', {}))

    # TODO: this is ugly, error prone and should go away.
    #       aliases should be in default scope, everything else in _args.
    defaults['_alias'] = aliases
    defaults.update(aliases)
    defaults.update(params)

    defaults['aliases_path'] = aliases_path
    os.environ['TREADMILL_ALIASES_PATH'] = defaults['aliases_path']

    interpolated = _interpolate(defaults, defaults)

    fs.mkdir_safe(dst_dir)
    with io.open(os.path.join(dst_dir, '.install'), 'w') as rec:

        _install(module, PLATFORM, dst_dir, interpolated, rec=rec)

        if extension_module:
            _install(extension_module,
                     '.'.join([profile, PLATFORM]),
                     dst_dir,
                     interpolated,
                     rec=rec)

    if run:
        _run(run)
Ejemplo n.º 24
0
def _prepare_ldpreload(container_dir, app):
    """Add mandatory ldpreloads to the container environment.
    """
    etc_dir = os.path.join(container_dir, 'overlay', 'etc')
    fs.mkdir_safe(etc_dir)
    new_ldpreload = os.path.join(etc_dir, 'ld.so.preload')

    try:
        shutil.copyfile('/etc/ld.so.preload', new_ldpreload)
    except IOError as err:
        if err.errno != errno.ENOENT:
            raise
        _LOGGER.info('/etc/ld.so.preload not found, creating empty.')
        utils.touch(new_ldpreload)

    ldpreloads = []
    if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
        treadmill_bind_preload = subproc.resolve('treadmill_bind_preload.so')
        ldpreloads.append(treadmill_bind_preload)

    if not ldpreloads:
        return

    _LOGGER.info('Configuring /etc/ld.so.preload: %r', ldpreloads)
    with open(new_ldpreload, 'a') as f:
        f.write('\n'.join(ldpreloads) + '\n')
Ejemplo n.º 25
0
def _install(package, src_dir, dst_dir, params, prefix_len=None, rec=None):
    """Interpolate source directory into target directory with params.
    """
    package_name = package.__name__
    _LOGGER.info('Installing package: %s %s %s',
                 package_name, src_dir, dst_dir)

    contents = pkg_resources.resource_listdir(package_name, src_dir)

    if prefix_len is None:
        prefix_len = len(src_dir) + 1

    for item in contents:
        resource_path = os.path.join(src_dir, item)
        dst_path = os.path.join(dst_dir, resource_path[prefix_len:])
        if pkg_resources.resource_isdir(package_name,
                                        os.path.join(src_dir, item)):
            fs.mkdir_safe(dst_path)
            try:
                with io.open(os.path.join(dst_path, '.owner'), 'r') as f:
                    owner = str(f.read().strip())
                    _LOGGER.info('Setting owner: %r - %r', dst_path, owner)
                    owner_pw = pwd.getpwnam(owner)
                    os.chown(dst_path, owner_pw.pw_uid, owner_pw.pw_gid)
            except (IOError, OSError) as err:
                if err.errno != errno.ENOENT:
                    raise

            if rec:
                rec.write('%s\n' % os.path.join(dst_path, ''))

            install_fn = _install

            # Test if is a scan dir first
            if _is_scan_dir(package, os.path.join(src_dir, item), dst_path):
                _LOGGER.info('Scan dir found: %s => %s', resource_path,
                             dst_path)
                install_fn = _install_scan_dir

            install_fn(
                package,
                os.path.join(src_dir, item),
                dst_dir,
                params,
                prefix_len=prefix_len,
                rec=rec
            )
        else:
            if resource_path.endswith('.swp'):
                continue

            resource_str = pkg_resources.resource_string(
                package_name,
                resource_path
            )

            if rec:
                rec.write('%s\n' % dst_path)
            _update(dst_path, _render(resource_str.decode('utf8'), params))
Ejemplo n.º 26
0
    def __init__(self, root, buckets=spawn.BUCKETS):
        self.paths = spawn.SpawnPaths(root, buckets)
        fs.mkdir_safe(self.paths.manifest_dir)
        os.chmod(self.paths.manifest_dir, 0o1777)

        tmp_path = os.path.join(self.paths.manifest_dir, '.tmp')
        fs.mkdir_safe(tmp_path)
        os.chmod(tmp_path, 0o1777)
Ejemplo n.º 27
0
 def ensure_exists(self, path):
     """Ensure storage path exists."""
     fpath = _fpath(self.fsroot, path)
     try:
         fs.mkdir_safe(os.path.dirname(fpath))
         utils.touch(fpath)
     except OSError:
         raise backend.ObjectNotFoundError()
Ejemplo n.º 28
0
def create_environ_dir(env_dir, env):
    """Create environment directory for s6-envdir."""
    fs.mkdir_safe(env_dir)

    for key, value in env.items():
        with open(os.path.join(env_dir, key), 'w+') as f:
            if value is not None:
                f.write(str(value))
Ejemplo n.º 29
0
def _create_sysrun(sys_dir, name, command, down=False):
    """Create system script."""
    fs.mkdir_safe(os.path.join(sys_dir, name))
    utils.create_script(os.path.join(sys_dir, name, 'run'),
                        'supervisor.run_sys',
                        cmd=command)
    _create_logrun(os.path.join(sys_dir, name))
    if down:
        utils.touch(os.path.join(sys_dir, name, 'down'))
Ejemplo n.º 30
0
 def put(self, path, value):
     """Store object at a given path."""
     fpath = _fpath(self.fsroot, path)
     try:
         fs.mkdir_safe(os.path.dirname(fpath))
         with io.open(fpath, 'w') as node:
             node.write(yaml.dump(value))
     except OSError:
         raise backend.ObjectNotFoundError()