def remove( self, x: Union[ValidImage, List[ValidImage]], force: bool = False, prune: bool = True, ): """Remove one or more docker images. # Arguments x: Single image or list of Docker images to remove. You can use tags or `python_on_whales.Image` objects. force: Force removal of the image prune: Delete untagged parents # Raises `python_on_whales.exceptions.NoSuchImage` if one of the images does not exists. """ full_cmd = self.docker_cmd + ["image", "remove"] full_cmd.add_flag("--force", force) full_cmd.add_flag("--no-prune", not prune) for image in to_list(x): full_cmd.append(image) run(full_cmd)
def ps( self, x: Union[ValidNode, List[ValidNode]] = [] ) -> List[python_on_whales.components.task.cli_wrapper.Task]: """Returns the list of swarm tasks running on one or more nodes. ```python from python_on_whales import docker tasks = docker.node.ps("my-node-name") print(tasks[0].desired_state) # running ``` # Arguments x: One or more nodes (can be id, name or `python_on_whales.Node` object.). If the argument is not provided, it defaults to the current node. # Returns `List[python_on_whales.Task]` """ full_cmd = (self.docker_cmd + ["node", "ps", "--quiet", "--no-trunc"] + to_list(x)) ids = run(full_cmd).splitlines() return [ python_on_whales.components.task.cli_wrapper.Task( self.client_config, id_, is_immutable_id=True) for id_ in ids ]
def push(self, x: Union[str, List[str]], quiet: bool = False): """Push a tag or a repository to a registry Alias: `docker.push(...)` # Arguments x: Tag(s) or repo(s) to push. Can be a string or a list of strings. If it's a list of string, python-on-whales will push all the images with multiple threads. The progress bars might look strange as multiple processes are drawing on the terminal at the same time. quiet: If you don't want to see the progress bars. # Raises `python_on_whales.exceptions.NoSuchImage` if one of the images does not exists. """ x = to_list(x) # this is just to raise a correct exception if the images don't exist self.inspect(x) if len(x) == 0: return elif len(x) == 1: self._push_single_tag(x[0], quiet=quiet) elif len(x) >= 2: pool = ThreadPool(4) generator = self._generate_args_push_pull(x, quiet) pool.starmap(self._push_single_tag, generator) pool.close() pool.join()
def ps( self, x: Union[ValidService, List[ValidService]] ) -> List[python_on_whales.components.task.Task]: """Returns the list of swarm tasks associated with this service. You can pass multiple services at once at this function. ```python from python_on_whales import docker tasks = docker.service.ps("my-service-name") print(tasks[0].desired_state) # running ``` # Arguments x: One or more services (can be id, name or `python_on_whales.Service` object.) # Returns `List[python_on_whales.Task]` """ full_cmd = (self.docker_cmd + ["service", "ps", "--quiet", "--no-trunc"] + to_list(x)) ids = run(full_cmd).splitlines() return [ python_on_whales.components.task.Task(self.client_config, id_, is_immutable_id=True) for id_ in ids ]
def create( self, services: Union[str, List[str]] = [], build: bool = False, force_recreate: bool = False, no_build: bool = False, no_recreate=False, ): """Creates containers for a service. # Arguments build: Build images before starting containers. force_recreate: Recreate containers even if their configuration and image haven't changed. no_build: Don't build an image, even if it's missing. no_recreate: If containers already exist, don't recreate them. Incompatible with `force_recreate=True`. """ full_cmd = self.docker_compose_cmd + ["create"] full_cmd.add_flag("--build", build) full_cmd.add_flag("--force-recreate", force_recreate) full_cmd.add_flag("--no-build", no_build) full_cmd.add_flag("--no-recreate", no_recreate) full_cmd += to_list(services) run(full_cmd, capture_stdout=False)
def save( self, images: Union[ValidImage, List[ValidImage]], output: Optional[ValidPath] = None, ) -> Optional[Iterator[bytes]]: """Save one or more images to a tar archive. Returns a stream if output is `None` Alias: `docker.save(...)` # Arguments images: Single docker image or list of docker images to save output: Path of the tar archive to produce. If `output` is None, a generator of bytes is produced. It can be used to stream those bytes elsewhere, to another Docker daemon for example. # Returns `Optional[Iterator[bytes]]`. If output is a path, nothing is returned. # Raises `python_on_whales.exceptions.NoSuchImage` if one of the images does not exists. # Example An example of transfer of an image from a local Docker daemon to a remote Docker daemon. We assume that the remote machine has an ssh access: ```python from python_on_whales import DockerClient local_docker = DockerClient() remote_docker = DockerClient(host="ssh://[email protected]") image_name = "busybox:1" local_docker.pull(image_name) bytes_iterator = local_docker.image.save(image_name) remote_docker.image.load(bytes_iterator) ``` Of course the best solution is to use a registry to transfer image but it's a cool example nonetheless. """ full_cmd = self.docker_cmd + ["image", "save"] images = to_list(images) # trigger an exception early self.inspect(images) if output is not None: full_cmd += ["--output", str(output)] full_cmd += images if output is None: # we stream the bytes return self._save_generator(full_cmd) else: run(full_cmd)
def start(self, services: Union[str, List[str]] = []): """Start the specified services. # Arguments services: The names of one or more services to start """ full_cmd = self.docker_compose_cmd + ["start"] full_cmd += to_list(services) run(full_cmd)
def promote(self, x: Union[ValidNode, List[ValidNode]]): """Promote one or more nodes to manager in the swarm # Arguments x: One or a list of nodes. """ full_cmd = self.docker_cmd + ["node", "promote"] full_cmd += to_list(x) run(full_cmd)
def remove(self, x: Union[ValidSecret, List[ValidSecret]]) -> None: """Removes one or more secrets # Arguments x: One or more secrets. Name, ids or `python_on_whales.Secret` objects are valid inputs. """ full_cmd = self.docker_cmd + ["secret", "remove"] + to_list(x) run(full_cmd)
def remove(self, x: Union[ValidStack, List[ValidStack]]) -> None: """Removes one or more stacks. # Arguments x: One or more stacks """ full_cmd = self.docker_cmd + ["stack", "remove"] + to_list(x) run(full_cmd)
def remove(self, x: Union[ValidConfig, List[ValidConfig]]): """Remove one or more configs. # Arguments x: One or a list of configs. Valid values are the id of the config or a `python_on_whales.Config` object. """ full_cmd = self.docker_cmd + ["config", "rm"] full_cmd += to_list(x) run(full_cmd)
def remove(self, networks: Union[ValidNetwork, List[ValidNetwork]]): """Removes a Docker network # Arguments networks: One or more networks. """ full_cmd = self.docker_cmd + ["network", "remove"] for network in to_list(networks): full_cmd.append(network) run(full_cmd)
def remove(self, networks: Union[ValidNetwork, List[ValidNetwork]]): """Removes a Docker network # Arguments networks: One or more networks. """ if networks == []: return full_cmd = self.docker_cmd + ["network", "remove"] full_cmd += to_list(networks) run(full_cmd)
def remove(self, services: Union[ValidService, List[ValidService]]): """Removes a service # Arguments services: One or a list of services to remove. """ full_cmd = self.docker_cmd + ["service", "remove"] for service in to_list(services): full_cmd.append(service) run(full_cmd)
def remove(self, x: Union[ValidVolume, List[ValidVolume]]): """Removes one or more volumes # Arguments x: A volume or a list of volumes. """ full_cmd = self.docker_cmd + ["volume", "remove"] for v in to_list(x): full_cmd.append(str(v)) run(full_cmd)
def remove(self, x: Union[ValidContext, List[ValidContext]], force: bool = False): """Removes one or more contexts # Arguments x: One or more contexts force: Force the removal of this context """ full_cmd = self.docker_cmd + ["context", "remove"] full_cmd.add_flag("--force", force) full_cmd += to_list(x) run(full_cmd)
def remove(self, x: Union[ValidPlugin, List[ValidPlugin]], force: bool = False) -> None: """Removes one or more plugins # Arguments plugin: One or more plugins to remove. force: Force the removal of this plugin. """ full_cmd = self.docker_cmd + ["plugin", "remove"] full_cmd.add_flag("--force", force) full_cmd += to_list(x) run(full_cmd)
def remove(self, x: Union[ValidNode, List[ValidNode]], force: bool = False): """Remove one or more nodes from the swarm # Arguments x: One node or a list of nodes. You can use the id or the hostname of a node. You can also use a `python_on_whales.Node`. force: Force remove a node from the swarm """ full_cmd = self.docker_cmd + ["node", "remove"] full_cmd.add_flag("--force", force) full_cmd += to_list(x) run(full_cmd)
def kill(self, services: Union[str, List[str]] = [], signal: Optional[str] = None): """Kills the container(s) of a service # Arguments services: One or more service(s) to kill signal: the signal to send to the container. Default is `"SIGKILL"` """ services = to_list(services) full_cmd = self.docker_compose_cmd + ["kill"] full_cmd.add_simple_arg("--signal", signal) full_cmd += services run(full_cmd)
def logs( self, services: Union[str, List[str]] = [], tail: Optional[str] = None, follow: bool = False, no_log_prefix: bool = False, timestamps: bool = False, since: Optional[str] = None, until: Optional[str] = None, stream: bool = False, ): """View output from containers # Arguments services: One or more service(s) to view tail: Number of lines to show from the end of the logs for each container. (default "all") follow: Follow log output ***WARNING***: With this option, `docker.compose.logs()` will not return at all. Use it exclusively with `stream=True`. You can loop on the logs but the loop will never end. no_log_prefix: Don't print prefix in logs timestamps: Show timestamps since: Show logs since timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) until: Show logs before a timestamp (e.g. 2013-01-02T13:23:37Z) or relative (e.g. 42m for 42 minutes) stream: Similar to the `stream` argument of `docker.run()`. This function will then returns and iterator that will yield a tuple `(source, content)` with `source` being `"stderr"` or `"stdout"`. `content` is the content of the line as bytes. Take a look at [the user guide](https://gabrieldemarmiesse.github.io/python-on-whales/user_guide/docker_run/#stream-the-output) to have an example of the output. # Returns `str` if `stream=False` (the default), `Iterable[Tuple[str, bytes]]` if `stream=True`. """ full_cmd = self.docker_compose_cmd + ["logs", "--no-color"] full_cmd.add_simple_arg("--tail", tail) full_cmd.add_flag("--follow", follow) full_cmd.add_flag("--no-log-prefix", no_log_prefix) full_cmd.add_flag("--timestamps", timestamps) full_cmd.add_simple_arg("--since", since) full_cmd.add_simple_arg("--until", until) full_cmd += to_list(services) iterator = stream_stdout_and_stderr(full_cmd) if stream: return iterator else: return "".join(x[1].decode() for x in iterator)
def remove(self, services: Union[ValidService, List[ValidService]]) -> None: """Removes a service # Arguments services: One or a list of services to remove. # Raises `python_on_whales.exceptions.NoSuchService` if one of the services doesn't exists. """ full_cmd = self.docker_cmd + ["service", "remove"] for service in to_list(services): full_cmd.append(service) run(full_cmd)
def stop( self, services: Union[str, List[str]] = [], timeout: Union[int, timedelta, None] = None, ): """Stop services # Arguments services: The names of one or more services to stop (str or list of str) timeout: Number of seconds or timedelta (will be converted to seconds). Specify a shutdown timeout. Default is 10s. """ if isinstance(timeout, timedelta): timeout = int(timeout.total_seconds()) full_cmd = self.docker_compose_cmd + ["stop"] full_cmd.add_simple_arg("--timeout", timeout) full_cmd += to_list(services) run(full_cmd)
def restart( self, services: Union[str, List[str]] = [], timeout: Union[int, timedelta, None] = None, ): """Restart containers # Arguments services: The names of one or more services to restart (str or list of str) timeout: The shutdown timeout (`int` are interpreted as seconds). `None` means the CLI default value (10s). See [the docker stop docs](https://docs.docker.com/engine/reference/commandline/stop/) for more details about this argument. """ full_cmd = self.docker_compose_cmd + ["restart"] if isinstance(timeout, timedelta): timeout = int(timeout.total_seconds()) full_cmd.add_simple_arg("--timeout", timeout) full_cmd += to_list(services) run(full_cmd)
def rm( self, services: Union[str, List[str]] = [], stop: bool = False, volumes: bool = False, ): """ Removes stopped service containers By default, anonymous volumes attached to containers will not be removed. You can override this with `volumes=True`. Any data which is not in a volume will be lost. # Arguments services: The names of one or more services to remove (str or list of str) stop: Stop the containers, if required, before removing volumes: Remove any anonymous volumes attached to containers """ full_cmd = self.docker_compose_cmd + ["rm", "--force"] full_cmd.add_flag("--stop", stop) full_cmd.add_flag("--volumes", volumes) full_cmd += to_list(services) run(full_cmd)
def create( self, image: str, command: Union[str, List[str], None], ): """Creates a Docker swarm service. Consider using 'docker stack deploy' instead as it's idempotent and easier to read for complex applications. docker stack deploy is basically docker compose for swarm clusters. # Arguments: image: The image to use as the base for the service. command: The command to execute in the container(s). """ full_cmd = self.docker_cmd + ["service", "create", "--quiet"] full_cmd.append(image) if command is not None: for string in to_list(command): full_cmd.append(string) service_id = run(full_cmd) return Service(self.client_config, service_id, is_immutable_id=True)
def build( self, context_path: ValidPath, add_hosts: Dict[str, str] = {}, allow: List[str] = [], build_args: Dict[str, str] = {}, builder: Optional[ValidBuilder] = None, cache: bool = True, cache_from: Union[str, Dict[str, str], None] = None, cache_to: Union[str, Dict[str, str], None] = None, file: Optional[ValidPath] = None, labels: Dict[str, str] = {}, load: bool = False, network: Optional[str] = None, output: Dict[str, str] = {}, platforms: Optional[List[str]] = None, progress: Union[str, bool] = "auto", pull: bool = False, push: bool = False, secrets: Union[str, List[str]] = [], ssh: Optional[str] = None, tags: Union[str, List[str]] = [], target: Optional[str] = None, ) -> Optional[python_on_whales.components.image.Image]: """Build a Docker image with builkit as backend. Alias: `docker.build(...)` A `python_on_whales.Image` is returned, even when using multiple tags. That is because it will produce a single image with multiple tags. If no image is loaded into the Docker daemon (if `push=True` for ex), then `None` is returned. # Arguments context_path: The path of the build context. add_hosts: Hosts to add. `add_hosts={"my_host1": "192.168.32.35"}` allow: List of extra privileges. Eg `allow=["network.host", "security.insecure"]` build_args: The build arguments. ex `build_args={"PY_VERSION": "3.7.8", "UBUNTU_VERSION": "20.04"}`. builder: Specify which builder to use. cache: Whether or not to use the cache cache_from: Works only with the container driver. Loads the cache (if needed) from a registry `cache_from="user/app:cache"` or a directory on the client `cache_from="type=local,src=path/to/dir"`. It's also possible to use a dict form for this argument. e.g. `cache_from=dict(type="local", src="path/to/dir")` cache_to: Works only with the container driver. Sends the resulting docker cache either to a registry `cache_to="user/app:cache"`, or to a local directory `cache_to="type=local,dest=path/to/dir"`. It's also possible to use a dict form for this argument. e.g. `cache_to=dict(type="local", dest="path/to/dir", mode="max")` file: The path of the Dockerfile labels: Dict of labels to add to the image. `labels={"very-secure": "1", "needs-gpu": "0"}` for example. load: Shortcut for `output=dict(type="docker")` If `True`, `docker.buildx.build` will return a `python_on_whales.Image`. network: which network to use when building the Docker image output: Output destination (format: `output={"type": "local", "dest": "path"}` Possible output types are `["local", "tar", "oci", "docker", "image", "registry"]`. See [this link](https://github.com/docker/buildx#-o---outputpath-typetypekeyvalue) for more details about each exporter. platforms: List of target platforms when building the image. Ex: `platforms=["linux/amd64", "linux/arm64"]` progress: Set type of progress output (auto, plain, tty, or False). Use plain to keep the container output on screen pull: Always attempt to pull a newer version of the image push: Shorthand for `output=dict(type="registry")`. secrets: One or more secrets passed as string(s). For example `secrets="id=aws,src=/home/my_user/.aws/credentials"` ssh: SSH agent socket or keys to expose to the build (format is `default|<id>[=<socket>|<key>[,<key>]]` as a string) tags: Tag or tags to put on the resulting image. target: Set the target build stage to build. # Returns A `python_on_whales.Image` if a Docker image is loaded in the daemon after the build (the default behavior when calling `docker.build(...)`). Otherwise, `None`. """ tags = to_list(tags) full_cmd = self.docker_cmd + ["buildx", "build"] if progress != "auto" and isinstance(progress, str): full_cmd += ["--progress", progress] full_cmd.add_args_list("--add-host", format_dict_for_cli(add_hosts, separator=":")) full_cmd.add_args_list("--allow", allow) full_cmd.add_args_list("--build-arg", format_dict_for_cli(build_args)) full_cmd.add_simple_arg("--builder", builder) full_cmd.add_args_list("--label", format_dict_for_cli(labels)) full_cmd.add_simple_arg("--ssh", ssh) full_cmd.add_flag("--pull", pull) full_cmd.add_flag("--push", push) full_cmd.add_flag("--load", load) full_cmd.add_simple_arg("--file", file) full_cmd.add_simple_arg("--target", target) if isinstance(cache_from, dict): full_cmd.add_simple_arg("--cache-from", format_dict_for_buildx(cache_from)) else: full_cmd.add_simple_arg("--cache-from", cache_from) if isinstance(cache_to, dict): full_cmd.add_simple_arg("--cache-to", format_dict_for_buildx(cache_to)) else: full_cmd.add_simple_arg("--cache-to", cache_to) full_cmd.add_args_list("--secret", to_list(secrets)) if output != {}: full_cmd += ["--output", format_dict_for_buildx(output)] if platforms is not None: full_cmd += ["--platform", ",".join(platforms)] full_cmd.add_simple_arg("--network", network) full_cmd.add_flag("--no-cache", not cache) full_cmd.add_args_list("--tag", tags) will_load_image = self._build_will_load_image(builder, push, load, output) # very special_case, must be fixed https://github.com/docker/buildx/issues/420 if (will_load_image and not tags and self.inspect(builder).driver == "docker-container"): # we have no way of fetching the image because iidfile is wrong in this case. will_load_image = False if not will_load_image: full_cmd.append(context_path) run(full_cmd, capture_stderr=progress is False) return docker_image = python_on_whales.components.image.ImageCLI( self.client_config) if self._method_to_get_image(builder) == GetImageMethod.TAG: full_cmd.append(context_path) run(full_cmd, capture_stderr=progress is False) return docker_image.inspect(tags[0]) else: with tempfile.TemporaryDirectory() as tmp_dir: tmp_dir = Path(tmp_dir) iidfile = tmp_dir / "id_file.txt" full_cmd.add_simple_arg("--iidfile", iidfile) full_cmd.append(context_path) run(full_cmd, capture_stderr=progress is False) image_id = iidfile.read_text() return docker_image.inspect(image_id)
def unpause(self, services: Union[str, List[str]] = []): """Unpause one or more services""" full_cmd = self.docker_compose_cmd + ["unpause"] full_cmd += to_list(services) run(full_cmd)
def add_args_list(self, arg_name: str, list_values: list): for value in to_list(list_values): self.extend([arg_name, value])
def create( self, image: str, command: Union[str, List[str], None], cap_add: List[str] = [], cap_drop: List[str] = [], constraints: List[str] = [], detach: bool = False, dns: List[str] = [], dns_options: List[str] = [], dns_search: List[str] = [], endpoint_mode: Optional[str] = None, entrypoint: Optional[str] = None, envs: Dict[str, str] = {}, env_files: Union[ValidPath, List[ValidPath]] = [], generic_resources: List[str] = [], groups: List[str] = [], healthcheck: bool = True, health_cmd: Optional[str] = None, health_interval: Union[None, int, timedelta] = None, health_retries: Optional[int] = None, health_start_period: Union[None, int, timedelta] = None, health_timeout: Union[None, int, timedelta] = None, hosts: Dict[str, str] = {}, hostname: Optional[str] = None, init: bool = False, isolation: Optional[str] = None, labels: Dict[str, str] = {}, limit_cpu: Optional[float] = None, limit_memory: Optional[str] = None, limit_pids: Optional[int] = None, log_driver: Optional[str] = None, ): """Creates a Docker swarm service. Consider using 'docker stack deploy' instead as it's idempotent and easier to read for complex applications. docker stack deploy is basically docker compose for swarm clusters. # Arguments: image: The image to use as the base for the service. command: The command to execute in the container(s). """ full_cmd = self.docker_cmd + ["service", "create", "--quiet"] full_cmd.add_args_list("--cap-add", cap_add) full_cmd.add_args_list("--cap-drop", cap_drop) full_cmd.add_args_list("--constraint", constraints) full_cmd.add_flag("--detach", detach) full_cmd.add_args_list("--dns", dns) full_cmd.add_args_list("--dns-option", dns_options) full_cmd.add_args_list("--dns-search", dns_search) full_cmd.add_simple_arg("--endpoint-mode", endpoint_mode) full_cmd.add_simple_arg("--entrypoint", entrypoint) full_cmd.add_args_list("--env", format_dict_for_cli(envs)) full_cmd.add_args_list("--env-file", env_files) full_cmd.add_args_list("--generic-resource", generic_resources) full_cmd.add_args_list("--group", groups) full_cmd.add_flag("--no-healthcheck", not healthcheck) full_cmd.add_simple_arg("--health-cmd", health_cmd) full_cmd.add_simple_arg("--health-interval", to_seconds(health_interval)) full_cmd.add_simple_arg("--health-retries", health_retries) full_cmd.add_simple_arg("--health-start-period", to_seconds(health_start_period)) full_cmd.add_simple_arg("--health-timeout", to_seconds(health_timeout)) for key, value in hosts: full_cmd += ["--host", f"{key}:{value}"] full_cmd.add_simple_arg("--hostname", hostname) full_cmd.add_flag("--init", init) full_cmd.add_simple_arg("--isolation", isolation) full_cmd.add_args_list("--label", format_dict_for_cli(labels)) full_cmd.add_simple_arg("--limit-cpu", limit_cpu) full_cmd.add_simple_arg("--limit-memory", limit_memory) full_cmd.add_simple_arg("--limit-pids", limit_pids) full_cmd.add_simple_arg("--log-driver", log_driver) full_cmd.append(image) if command is not None: for string in to_list(command): full_cmd.append(string) service_id = run(full_cmd) return Service(self.client_config, service_id, is_immutable_id=True)
def bake( self, targets: Union[str, List[str]] = [], builder: Optional[ValidBuilder] = None, files: Union[ValidPath, List[ValidPath]] = [], load: bool = False, cache: bool = True, print: bool = False, progress: Union[str, bool] = "auto", pull: bool = False, push: bool = False, set: Dict[str, str] = {}, variables: Dict[str, str] = {}, ) -> Dict[str, Dict[str, Dict[str, Any]]]: """Bake is similar to make, it allows you to build things declared in a file. For example it allows you to build multiple docker image in parallel. The CLI docs is [here](https://github.com/docker/buildx#buildx-bake-options-target) and it contains a lot more information. # Arguments targets: Targets or groups of targets to build. builder: The builder to use. files: Build definition file(s) load: Shorthand for `set=["*.output=type=docker"]` cache: Whether to use the cache or not. print: Do nothing, just returns the config. progress: Set type of progress output (`"auto"`, `"plain"`, `"tty"`, or `False`). Use plain to keep the container output on screen pull: Always try to pull the newer version of the image push: Shorthand for `set=["*.output=type=registry"]` set: A list of overrides in the form `"targetpattern.key=value"`. variables: A dict containing the values of the variables defined in the hcl file. See <https://github.com/docker/buildx#hcl-variables-and-functions> # Returns The configuration used for the bake (files merged + override with the arguments used in the function). It's the loaded json you would obtain by running `docker buildx bake --print --load my_target` if your command was `docker buildx bake --load my_target`. Some example here. ```python from python_on_whales import docker # returns the config used and runs the builds config = docker.buildx.bake(["my_target1", "my_target2"], load=True) assert config == { "target": { "my_target1": { "context": "./", "dockerfile": "Dockerfile", "tags": ["pretty_image1:1.0.0"], "target": "out1", "output": ["type=docker"] }, "my_target2": { "context": "./", "dockerfile": "Dockerfile", "tags": ["pretty_image2:1.0.0"], "target": "out2", "output": ["type=docker"] } } } # returns the config only, doesn't run the builds config = docker.buildx.bake(["my_target1", "my_target2"], load=True, print=True) ``` """ full_cmd = self.docker_cmd + ["buildx", "bake"] full_cmd.add_flag("--no-cache", not cache) full_cmd.add_simple_arg("--builder", builder) full_cmd.add_flag("--load", load) full_cmd.add_flag("--pull", pull) full_cmd.add_flag("--push", push) full_cmd.add_flag("--print", print) if progress != "auto" and isinstance(progress, str): full_cmd += ["--progress", progress] for file in to_list(files): full_cmd.add_simple_arg("--file", file) full_cmd.add_args_list("--set", format_dict_for_cli(set)) targets = to_list(targets) env = dict(os.environ) env.update(variables) if print: return json.loads(run(full_cmd + targets, env=env)) else: run(full_cmd + targets, capture_stderr=progress is False, env=env) return json.loads(run(full_cmd + ["--print"] + targets, env=env))