Пример #1
0
    def setUpClass(cls):
        td_ctx = temp_dir()
        cls._shadow = td_ctx.__enter__()
        # NB: This may leak on SystemExit et al
        cls.addClassCleanup(td_ctx.__exit__, None, None, None)

        os.environ['FS_IMAGE_SHADOWED_PATHS_ROOT'] = f'{cls._shadow}'

        lib_ctx = Path.resource(__package__,
                                'librename_shadowed.so',
                                exe=False)
        lib_path = lib_ctx.__enter__()
        # NB: This may leak a tempfile on SystemExit et al
        cls.addClassCleanup(lib_ctx.__exit__, None, None, None)

        lib = ctypes.cdll.LoadLibrary(lib_path)

        cls._get_shadowed_original = lib.get_shadowed_original
        cls._get_shadowed_original.restype = ctypes.c_char_p
        cls._get_shadowed_original.argtypes = [ctypes.c_char_p]

        cls._get_shadowed_rename_dest = lib.get_shadowed_rename_dest
        cls._get_shadowed_rename_dest.restype = ctypes.c_char_p
        cls._get_shadowed_rename_dest.argtypes = [
            ctypes.c_char_p, ctypes.c_char_p
        ]

        cls._rename = lib.rename
        cls._rename.restype = ctypes.c_int
        cls._rename.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
Пример #2
0
def populate_versionlock_conf(
    yum_dnf: YumDnf,
    out_dir: Path,
    install_dir: Path,
):
    with create_ro(out_dir / 'versionlock.conf', 'w') as outf:
        outf.write(
            textwrap.dedent(f'''\
            [main]
            enabled = 1
            locklist = {install_dir.decode()}/versionlock.list
        '''))

    # Write an empty lock-list. This will be bind-mounted in at runtime.
    with create_ro(out_dir / 'versionlock.list', 'w'):
        pass

    # Side-load the appropriate versionlock plugin, we currently don't have
    # a good way to install this via an RPM.
    with Path.resource(
        __package__, f'{yum_dnf.value}_versionlock.gz', exe=False,
    ) as p, \
            gzip.open(p) as rf, \
            create_ro(out_dir / 'versionlock.py', 'wb') as wf:
        wf.write(rf.read())
Пример #3
0
def gold_demo_sendstreams():
    with Path.resource(
            __package__,
            'gold_demo_sendstreams.pickle',
            exe=False,
    ) as pickle_path, open(pickle_path, 'rb') as f:
        return pickle.load(f)
Пример #4
0
def build_rpm(package_dir: Path, arch: str, rpm: Rpm,
              gpg_signing_key: str) -> Path:
    'Returns the filename of the built RPM.'
    with temp_dir(dir=package_dir) as td, tempfile.NamedTemporaryFile() as tf, \
            Path.resource(__package__, 'busybox', exe=True) as busybox_path:
        tf.write(rpm.spec(busybox_path).encode())
        tf.flush()

        work_dir = Path(generate_work_dir())

        format_kwargs = {
            "quoted_arch": shlex.quote(arch),
            "quoted_buildroot": Path(work_dir / 'build').shell_quote(),
            "quoted_home": Path(work_dir / 'home').shell_quote(),
            "quoted_spec_file": shlex.quote(tf.name),
            "quoted_work_dir": work_dir.shell_quote(),
            # We get the uid of the current user so that we can chown the
            # work_dir *inside* the running container.  The nspawn'd build
            # appliance container needs to run as root so that it can mkdir
            # the `work_dir` which exists at /.  If we don't chown the
            # resulting tree that `rpmbuild` creates the rename would would
            # fail.
            "current_uid": os.getuid(),
        }

        opts = new_nspawn_opts(
            cmd=[
                'sh',
                '-uec',
                '''\
                /usr/bin/rpmbuild \
                -bb \
                --target {quoted_arch} \
                --buildroot {quoted_buildroot} \
                {quoted_spec_file} \
                && chown -R {current_uid} {quoted_work_dir} \
                '''.format(**format_kwargs),
            ],
            layer=_build_appliance(),
            bindmount_ro=[(tf.name, tf.name), (busybox_path, busybox_path)],
            bindmount_rw=[(td, work_dir)],
            user=pwd.getpwnam('root'),
            setenv=['HOME={quoted_home}'.format(**format_kwargs)],
        )
        run_non_booted_nspawn(opts, PopenArgs())

        # `rpmbuild` has a non-configurable output layout, so
        # we'll move the resulting rpm into our package dir.
        rpms_dir = td / 'home/rpmbuild/RPMS' / arch
        rpm_name, = rpms_dir.listdir()
        os.rename(rpms_dir / rpm_name, package_dir / rpm_name)
        sign_rpm(package_dir / rpm_name, gpg_signing_key)
        return rpm_name
Пример #5
0
def get_volume_for_current_repo(min_free_bytes, artifacts_dir):
    '''
    Multiple repos need to be able to concurrently build images on the same
    host.  The cleanest way to achieve such isolation is to supply each repo
    with its own volume, which will store the repo's image build outputs.

    It is easiest to back this volume with a loop device. The appropriate
    size of the loop device depends on the expected size of the target being
    built.  To address this this by ensuring that prior to every build, the
    volume has at least a specified amount of space.  The default in
    `image_layer` is large enough for most builds, but really huge
    `image_layer` targets can further increase their requested
    `min_free_bytes`.

    Image-build tooling **must never** access paths in this volume without
    going through this function.  Otherwise, the volume will not get
    remounted correctly if the host containing the repo got rebooted.

    PRE-CONDITION: `artifacts_dir` exists and is writable by `root`.
    '''
    if not os.path.exists(artifacts_dir):  # pragma: no cover
        raise RuntimeError(f'{artifacts_dir} must exist')

    volume_dir = os.path.join(artifacts_dir, VOLUME_DIR)
    with Path.resource(__package__, 'set_up_volume.sh', exe=True) as binary:
        subprocess.check_call([
            # While Buck probably does not call this concurrently under normal
            # circumstances, the worst-case outcome is that we lose or corrupt
            # the whole buld cache, so add some locking to be on the safe side.
            'flock',
            os.path.join(
                artifacts_dir, '.lock.set_up_volume.sh.never.rm.or.mv',
            ),
            'sudo',
            binary,
            str(int(min_free_bytes)),  # Accepts floats & ints
            os.path.join(artifacts_dir, IMAGE_FILE),
            volume_dir,
        ])
    # We prefer to have the volume owned by the repo user, instead of root:
    #  - The trusted repo user has to be able to access the built
    #    subvolumes, but nobody else should be able to (they might contain
    #    setuid binaries & similar).  Thus, subvols ought to have wrapper
    #    directories owned by the user, with mode 0700.
    #  - This reduces the number of places we have to `sudo` to create
    #    directories inside the subvolume.
    subprocess.check_call([
        'sudo', 'chown', f'{os.getuid()}:{os.getgid()}', volume_dir,
    ])
    return volume_dir
Пример #6
0
    def mock(*args, **kwargs):
        with temp_dir() as td, Path.resource(
            __package__, 'mock-s3-cli', exe=True,
        ) as mock_s3_cli_path:
            # We mock `_path_for_storage_id` such that the base dir
            # is always going to be the TempDir we created
            def _mock_path_for_storage_id(sid):
                return (td / sid).decode()

            # Instead of calls to `aws s3`, we want to call `mock-s3-cli`
            with unittest.mock.patch.object(
                S3Storage, '_base_cmd',
                return_value=[mock_s3_cli_path],
            ), unittest.mock.patch.object(
                S3Storage, '_path_for_storage_id',
                side_effect=_mock_path_for_storage_id,
            ):
                return fn(*args, **kwargs)
Пример #7
0
def get_test_signing_key() -> str:
    with Path.resource(__package__, 'signing_key', exe=False) as keypath:
        with open(keypath, 'r') as keyfile:
            return keyfile.read()