def main(argv, from_file: BytesIO, to_file: BytesIO): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) Storage.add_argparse_arg( parser, '--storage', required=True, help='JSON blob for creating a Storage instance.', ) parser.add_argument('--debug', action='store_true', help='Log more?') subparsers = parser.add_subparsers(help='Sub-commands have help.') parser_get = subparsers.add_parser('get', help='Download blob to stdout') parser_get.add_argument('storage_id', help='String of the form KEY:ID') parser_get.set_defaults(to_file=to_file) parser_get.set_defaults(func=get) parser_put = subparsers.add_parser( 'put', help='Write a blob from stdin, print its ID to stdout', ) parser_put.set_defaults(from_file=from_file) parser_put.set_defaults(to_file=to_file) # For the storage ID parser_put.set_defaults(func=put) args = Path.parse_args(parser, argv) init_logging(debug=args.debug) args.func(args)
def main( argv, get_db_info_factory, *, how_to_generate, overview_doc, options_doc, ): ''' Implements the "update DB" CLI using your custom logic for obtaiing `DbInfo` objects for package:tag pairs. `get_db_info_factory` is a context manager so that it can establish a single connection (or pool) to an external service, and reuse it for all `GetDbInfoFn` queries. To implement "identity" example from the `GetDbInfoFn` docblock above: main( sys.argv[1:], contextlib.nullcontext(lambda _pkg, _tag, opts: opts), how_to_generate='buck run //your-project:manually-update-db', overview_doc='', options_doc='OPTIONS are written directly into the DB as ' 'the "how to fetch" info for this PACKAGE/TAG.", ) In reality, you would want your `GetDbInfoFn` to do some schema validation, and to check that the "how to fetch" info does actually refer to a valid package in your package store. ''' args = _parse_args( argv, overview_doc=overview_doc, options_doc=options_doc, ) init_logging(debug=args.debug) db, write_db_in_same_format = _read_db_and_get_writer(args.db) write_db = _FORMAT_NAME_TO_WRITER[args.out_format] if args.out_format \ else write_db_in_same_format with get_db_info_factory as get_db_info_fn: write_db( _get_updated_db(existing_db=db, update_existing=args.update_existing, create_items=_parse_updates('create', args.create), replace_items=_parse_updates( 'replace', args.replace), get_db_info_fn=get_db_info_fn), args.out_db or args.db, how_to_generate, )
def _set_up_run_cli(argv: Iterable[str]) -> _CliSetup: args = _parse_cli_args(argv, allow_debug_only_opts=True) init_logging(debug=args.opts.debug_only_opts.debug) with (open(args.append_boot_console, 'ab') # By default, we send `systemd` console to `stderr`. if args.boot and args.append_boot_console else nullcontext()) as boot_console: yield _CliSetup( boot=args.boot, boot_console=boot_console, opts=args.opts, popen_wrappers=[ functools.partial( inject_yum_dnf_versionlock, args.snapshot_to_versionlock, ), functools.partial( inject_repo_servers, args.serve_rpm_snapshots, ), ] if args.serve_rpm_snapshots else [], )
def snapshot_repos_from_args(argv: List[str]): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) add_standard_args(parser) parser.add_argument( '--dnf-conf', type=Path.from_argparse, help='Snapshot this `dnf.conf`, and all the repos that it lists. ' 'Can be set together with `--yum-conf`, in which case repos from ' 'both configs must be identical. At least one of these `--*-conf` ' 'options is required.', ) parser.add_argument( '--yum-conf', type=Path.from_argparse, help='Snapshot this `yum.conf`; see help for `--dnf-conf`', ) parser.add_argument( '--exclude', action='append', default=[], help='Repos to be excluded in the snapshot.', ) universe_warning = ( 'Described in the `repo_db.py` docblock. In production, it is ' 'important for the universe name to match existing conventions -- ' 'DO NOT JUST MAKE ONE UP.') universe_group = parser.add_mutually_exclusive_group(required=True) universe_group.add_argument( '--repo-to-universe-json', type=Path.from_argparse, help='JSON dict of repo name to universe name. ' + universe_warning, ) universe_group.add_argument( '--one-universe-for-all-repos', help='Snapshot all repos under this universe name. ' + universe_warning, ) args = Path.parse_args(parser, argv) init_logging(debug=args.debug) if args.one_universe_for_all_repos: def repo_to_universe(_repo): return args.one_universe_for_all_repos elif args.repo_to_universe_json: with open(args.repo_to_universe_json) as ru_json: repo_to_universe_json = json.load(ru_json) def repo_to_universe(repo): return repo_to_universe_json[repo.name] else: # pragma: no cover raise AssertionError(args) with populate_temp_dir_and_rename(args.snapshot_dir, overwrite=True) as td: snapshot_repos( dest=td, repo_to_universe=repo_to_universe, yum_conf_content=args.yum_conf.read_text() if args.yum_conf else None, dnf_conf_content=args.dnf_conf.read_text() if args.dnf_conf else None, db_cfg=args.db, storage_cfg=args.storage, rpm_shard=args.rpm_shard, gpg_key_whitelist_dir=args.gpg_key_whitelist_dir, exclude=frozenset(args.exclude), threads=args.threads, )
from contextlib import contextmanager from unittest import mock from fs_image.fs_utils import create_ro, Path, temp_dir from fs_image.common import init_logging from fs_image.rpm.find_snapshot import snapshot_install_dir from fs_image.rpm.yum_dnf_conf import YumDnf from ..common import has_yum, yum_is_dnf from .. import yum_dnf_from_snapshot _INSTALL_ARGS = ['install', '--assumeyes', 'rpm-test-carrot', 'rpm-test-milk'] _SNAPSHOT_DIR = snapshot_install_dir('//fs_image/rpm:repo-snapshot-for-tests') init_logging() class YumFromSnapshotTestImpl: @contextmanager def _install( self, *, protected_paths, install_args=None ): if install_args is None: install_args = _INSTALL_ARGS install_root = Path(tempfile.mkdtemp()) try: # IMAGE_ROOT/meta/ is always required since it's always protected for p in set(protected_paths) | {'meta/'}: if p.endswith('/'):
def snapshot_repo(argv): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) add_standard_args(parser) parser.add_argument( '--repo-universe', required=True, help='This is explained in the `repo_db.py` docblock. In production, ' 'it is important for the universe name to match existing ' 'conventions -- DO NOT JUST MAKE ONE UP.', ) parser.add_argument( '--repo-name', required=True, help="Used to distinguish this repo's metadata from others' in the DB.", ) parser.add_argument( '--repo-url', required=True, help='The base URL of the repo -- the part before repodata/repomd.xml. ' 'Supported protocols include file://, https://, and http://.', ) parser.add_argument( '--gpg-url', required=True, action='append', help='(May be repeated) Yum will need to import this key to gpgcheck ' 'the repo. To avoid placing blind trust in these keys (e.g. in ' 'case this is an HTTP URL), they are verified against ' '`--gpg-key-whitelist-dir`', ) args = Path.parse_args(parser, argv) init_logging(debug=args.debug) with populate_temp_dir_and_rename( args.snapshot_dir, overwrite=True, ) as td, RepoSnapshot.add_sqlite_to_storage( Storage.from_json(args.storage), td) as sqlite_db: sizer = RepoSizer() snapshot_gpg_keys( key_urls=args.gpg_url, whitelist_dir=args.gpg_key_whitelist_dir, snapshot_dir=td, ) repo = YumDnfConfRepo(name=args.repo_name, base_url=args.repo_url, gpg_key_urls=args.gpg_url) _, snapshot = next( download_repos( repos_and_universes=[(repo, args.repo_universe)], cfg=DownloadConfig( db_cfg=args.db, storage_cfg=args.storage, rpm_shard=args.rpm_shard, threads=args.threads, ), )) snapshot.visit(sizer).to_sqlite(args.repo_name, sqlite_db) log.info(sizer.get_report(f'This {args.rpm_shard} snapshot weighs'))
# We cannot validate or sort `ImageItem`s until the phases are # materialized since the items may depend on the output of the phases. for item in dep_graph.gen_dependency_order_items(PhasesProvideItem( from_target=args.child_layer_target, subvol=subvol, )): item.build(subvol, layer_opts) # Build artifacts should never change. Run this BEFORE the exit_stack # cleanup to enforce that the cleanup does not touch the image. subvol.set_readonly(True) try: return SubvolumeOnDisk.from_subvolume_path( # Converting to a path here does not seem too risky since this # class shouldn't have a reason to follow symlinks in the subvol. subvol.path().decode(), args.subvolumes_dir, ) # The complexity of covering this is high, but the only thing that can # go wrong is a typo in the f-string. except Exception as ex: # pragma: no cover raise RuntimeError(f'Serializing subvolume {subvol.path()}') from ex if __name__ == '__main__': # pragma: no cover from fs_image.common import init_logging args = parse_args(sys.argv[1:]) init_logging(debug=args.debug) build_image(args).to_json_file(sys.stdout)