def tuning( service: SupportedServices = typer.Argument(..., help="Service name"), cpu: int = typer.Option(None, "--cpu", help="Force the amount of cpus", min=1), ram: int = typer.Option(None, "--ram", help="Force the amount of ram", min=1), ) -> None: Application.print_command( Application.serialize_parameter("--cpu", cpu, IF=cpu), Application.serialize_parameter("--ram", ram, IF=ram), Application.serialize_parameter("", service), ) Application.get_controller().controller_init() if not cpu: cpu = os.cpu_count() or 1 if not ram: ram = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") log.info("Number of CPU(s): {}", cpu) log.info("Amount of RAM: {}", system.bytes_to_str(ram)) log.info("Suggested settings:") module = TUNING_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit(f"{service.value} misconfiguration, module not found") module.tuning(ram, cpu)
def check_resources(self) -> None: total_cpus = 0.0 total_memory = 0.0 for service in Application.data.active_services: config = Application.data.compose_config[service] # frontend container has no deploy options if not config.deploy: continue if config.deploy.resources.reservations: # int() are needed because python on whales 0.25 extended type of # cpus and replicas to Union[float, str] according to compose-cli typing cpus = int(config.deploy.resources.reservations.cpus) or 0 memory = config.deploy.resources.reservations.memory # the proxy container is now defined as global and without any replicas # => replicas is None => defaulted to 1 replicas = int(config.deploy.replicas or 1) total_cpus += replicas * cpus total_memory += replicas * memory nodes_cpus = 0.0 nodes_memory = 0.0 for node in self.docker.node.list(): nodes_cpus += round(node.description.resources.nano_cpus / 1000000000) nodes_memory += node.description.resources.memory_bytes if total_cpus > nodes_cpus: log.critical( "Your deployment requires {} cpus but your nodes only have {}", total_cpus, nodes_cpus, ) if total_memory > nodes_memory: log.critical( "Your deployment requires {} of RAM but your nodes only have {}", system.bytes_to_str(total_memory), system.bytes_to_str(nodes_memory), )
def images( remove_images: List[str] = typer.Option( [], "--rm", "--remove", help="Remove the specified image(s)", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--remove", remove_images, IF=remove_images), ) Application.get_controller().controller_init() urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning) # type: ignore # https://docs.docker.com/registry/spec/api/ docker = Docker() docker.registry.ping() registry = docker.registry.get_host() host = f"https://{registry}" # Docker Registry API Reference # https://docs.docker.com/registry/spec/api/ # Retrieve a sorted, json list of repositories available in the registry r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() images: List[Tuple[str, str, str, int, Optional[datetime]]] = [] for repository in catalog.get("repositories", {}): # Fetch the tags under the repository identified by <name> r = docker.registry.send_request(f"{host}/v2/{repository}/tags/list") # tags can be None if all the tags of a repository have deleted # this or ensure that every None will be converted in an empty dictionary tags = r.json().get("tags") or {} for tag in tags: # Fetch the manifest identified by name and reference r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}") manifest = r.json() size = 0 for layer in manifest.get("layers", []): size += layer.get("size", 0) headers = r.headers _id = cast(str, headers.get("Docker-Content-Digest", "N/A")) # Creation date is only available on schema version 1 :\ r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}", version="1") manifest = r.json() layers = manifest.get("history", []) created: Optional[datetime] = None if len(layers) > 0: first_layer = json.loads(layers[0].get("v1Compatibility", {})) creation_date = first_layer.get("created", "N/A") if creation_date != "N/A": creation_date = creation_date[0:19] created = datetime.strptime(creation_date, "%Y-%m-%dT%H:%M:%S") images.append( (_id, cast(str, repository), cast(str, tag), size, created)) if not images: log.warning("This registry contains no images") else: log.info("This registry contains {} image(s):", len(images)) images_to_be_removed: List[Tuple[str, str, str]] = [] table: List[List[str]] = [] for img in images: digest = img[0] # to be replaced with removeprefix starting from py39 if digest.startswith("sha256:"): digest = digest[7:] _id = digest[0:12] repository = img[1] tag = img[2] SIZE = system.bytes_to_str(img[3]) d = img[4] to_be_removed = (_id in remove_images or f"{repository}:{tag}" in remove_images) creation_date = d.strftime("%Y-%m-%d %H:%M:%S") if d else "N/A" image_line: List[str] = [] if to_be_removed: image_line.append(RED(repository)) image_line.append(RED(tag)) image_line.append(RED(_id)) image_line.append(RED(creation_date)) image_line.append(RED(SIZE)) creation_date = "DELETING ..." images_to_be_removed.append((repository, digest, tag)) else: image_line.append(repository) image_line.append(tag) image_line.append(_id) image_line.append(creation_date) image_line.append(SIZE) table.append(image_line) print("") print( tabulate( table, tablefmt=TABLE_FORMAT, headers=["REPOSITORY", "TAG", "IMAGE ID", "CREATED", "SIZE"], )) if len(remove_images) != len(images_to_be_removed): log.error( "Some of the images that you specified are not found in this registry" ) # DELETE /v2/<name>/manifests/<reference> for image in images_to_be_removed: repository = image[0] reference = image[1] # digest without sha256: tag = image[2] # For deletes reference must be a digest or the delete will fail r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/sha256:{reference}", method="DELETE") log.info("Image {}:{} deleted from {}", repository, tag, host) if images_to_be_removed: log.info("Executing registry garbage collector...") command = "/bin/registry garbage-collect -m /etc/docker/registry/config.yml" docker.exec_command("registry", user="******", command=command) log.info("Registry garbage collector successfully executed") # A restart is needed to prevent clashes beetween gc and cache # https://gist.github.com/jaytaylor/86d5efaddda926a25fa68c263830dac1#gistcomment-3653760 # The garbage collector doesn't communicate with the cache, or unlink layers # from the repository so if you immediately try to repush a layer that was # just deleted, the registry will find it for stat calls, but actually # serving the blob will fail. docker.client.container.restart("registry") log.info("Registry restarted to clean the layers cache")
def status(self, services: List[str]) -> None: nodes: Dict[str, str] = {} nodes_table: List[List[str]] = [] headers = [ "Role", "State", "Name", "IP", "CPUs", "RAM", "LABELS", "Version" ] for node in self.docker.node.list(): nodes[node.id] = node.description.hostname state = f"{node.status.state.title()}+{node.spec.availability.title()}" cpu = str(round(node.description.resources.nano_cpus / 1000000000)) ram = system.bytes_to_str(node.description.resources.memory_bytes) if state == "Ready+Active": color_fn = GREEN else: color_fn = RED nodes_table.append([ color_fn(node.spec.role.title()), color_fn(state), color_fn(node.description.hostname), color_fn(node.status.addr), color_fn(cpu), color_fn(ram), color_fn(",".join(node.spec.labels)), color_fn(f"v{node.description.engine.engine_version}"), ]) print(tabulate(nodes_table, tablefmt=TABLE_FORMAT, headers=headers)) stack_services = self.docker.service.list() print("") if not stack_services: log.info("No service is running") return prefix = f"{Configuration.project}_" for service in stack_services: service_name = service.spec.name tmp_service_name = service_name if tmp_service_name.startswith(prefix): # to be replaced with removeprefix tmp_service_name = tmp_service_name[len(prefix):] if tmp_service_name not in services: continue print(f"{colors.RESET}Inspecting {service_name}...", end="\r") tasks_lines: List[str] = [] running_tasks = 0 for task in self.docker.service.ps(service_name): if task.status.state == "shutdown" or task.status.state == "complete": COLOR = colors.BLUE elif task.status.state == "running": COLOR = colors.GREEN running_tasks += 1 elif task.status.state == "starting" or task.status.state == "ready": COLOR = colors.YELLOW elif task.status.state == "failed": COLOR = colors.RED else: COLOR = colors.RESET if task.slot: slot = f" \\_ [{task.slot}]" container_name = f"{service_name}.{task.slot}.{task.id}" else: slot = " \\_ [H]" container_name = f"{service_name}.{task.node_id}.{task.id}" node_name = nodes.get(task.node_id, "") status = f"{COLOR}{task.status.state:8}{colors.RESET}" errors = f"err={task.status.err}" if task.status.err else "" labels = ",".join(task.labels) ts = task.status.timestamp.strftime("%d-%m-%Y %H:%M:%S") tasks_lines.append("\t".join(( slot, status, ts, node_name, container_name, errors, labels, ))) # Very ugly, to reset the color with \r print(" ", end="\r") replicas = self.get_replicas(service) if replicas == 0: COLOR = colors.YELLOW elif replicas != running_tasks: COLOR = colors.RED else: COLOR = colors.GREEN if service.endpoint.ports: ports_list = [ f"{p.published_port}->{p.target_port}" for p in service.endpoint.ports ] else: ports_list = [] image = service.spec.task_template.container_spec.image.split( "@")[0] ports = ",".join(ports_list) print( f"{COLOR}{service_name:23}{colors.RESET} [{replicas}] {image}\t{ports}" ) for line in tasks_lines: print(line) print("")
def test_bytes_to_str() -> None: assert system.bytes_to_str(0) == "0" assert system.bytes_to_str(1) == "1" assert system.bytes_to_str(1023) == "1023" assert system.bytes_to_str(1024) == "1KB" assert system.bytes_to_str(1424) == "1KB" assert system.bytes_to_str(1824) == "2KB" assert system.bytes_to_str(18248) == "18KB" assert system.bytes_to_str(1024 * 1024 - 1) == "1024KB" assert system.bytes_to_str(1024 * 1024) == "1MB" assert system.bytes_to_str(18248377) == "17MB" assert system.bytes_to_str(418248377) == "399MB" assert system.bytes_to_str(1024 * 1024 * 1024 - 1) == "1024MB" assert system.bytes_to_str(1024 * 1024 * 1024) == "1GB" assert system.bytes_to_str(1024 * 1024 * 1024 * 1024 - 1) == "1024GB" assert system.bytes_to_str(1024 * 1024 * 1024 * 1024) == "1024GB" assert system.bytes_to_str(1024 * 1024 * 1024 * 1024 * 1024) == "1048576GB"