def validate_env(env: Dict[str, EnvType]) -> None: try: BaseEnvModel(**env) except ValidationError as e: for field in str(e).split("\n")[1::2]: log.error("Invalid value for {}: {}", field, env.get(field, "N/A")) print_and_exit(str(e))
def get_active_branch(gitobj): if gitobj is None: log.error("git object is None, cannot retrieve active branch") return None try: return str(gitobj.active_branch) except TypeError as e: log.warning(e) return None
def get_active_branch(gitobj: Optional[Repo]) -> Optional[str]: if not gitobj: log.error("git object is None, cannot retrieve active branch") return None try: return gitobj.active_branch.name except AttributeError as e: # pragma: no cover log.warning(e) return None
def install_compose() -> None: cli_plugin = Path.home().joinpath(".docker", "cli-plugins") cli_plugin.mkdir(parents=True, exist_ok=True) compose_bin = cli_plugin.joinpath("docker-compose") url = "https://github.com/docker/compose/releases/download/" url += f"{COMPOSE_VERSION}/docker-compose-linux-x86_64" log.info("Downloading compose binary: {}", url) f = Packages.download(url, EXPECTED_COMPOSE_BIN_MD5) f.rename(compose_bin) compose_bin.chmod(compose_bin.stat().st_mode | stat.S_IEXEC) if docker.compose.is_installed(): log.info("Docker compose is installed") else: # pragma: no cover log.error("Docker compose is NOT installed")
def get_bin_version(cls, exec_cmd: str, option: List[str] = ["--version"], clean_output: bool = True) -> Optional[str]: """ Retrieve the version of a binary """ try: if os.name == "nt": # pragma: no cover exec_cmd = cls.convert_bin_to_win32(exec_cmd) output = Packages.execute_command(exec_cmd, option) if clean_output: # then last element on spaces # get up to the first open round bracket if any, # or return the whole string output = output.split("(")[0] # get up to the first comma if any, or return the whole string output = output.split(",")[0] # split on spaces and take the last element output = output.split()[-1] # Remove trailing spaces output = output.strip() # Removed single quotes output = output.replace("'", "") # That's all... this magic receipt is able to extract # version information from most of outputs, e.g. # Python 3.8.2 # Docker version 19.03.8, build afacb8b7f0 # git version 2.25.1 # rapydo version 0.7.x return output # Note that in may other cases it fails... # but we are interested in a very small list of programs, so it's ok # echo --version -> --version # ls --version -> ls # pip3 --version -> a path except ExecutionException as e: log.error(e) return None
def validate_configuration(conf: Configuration, core: bool) -> None: if conf: try: if core: CoreConfigurationModel(**conf) else: CustomConfigurationModel(**conf) except ValidationError as e: for field in str(e).split("\n")[1::2]: # field is like: # "variables -> env -> XYZ" # this way it is converted in key = variables.env.XYZ key = ".".join(field.split(" -> ")) log.error( "Invalid value for {}: {}", field, glom(conf, key, default=None) ) print_and_exit(str(e))
def update(path: str, gitobj: Repo) -> None: if not gitobj.active_branch: # pragma: no cover log.error("Can't update {}, no active branch found", path) return None for remote in gitobj.remotes: if remote.name == "origin": try: branch = gitobj.active_branch.name log.info("Updating {} {}@{}", remote, path, branch) fetch(path, gitobj) commits_behind = gitobj.iter_commits( f"{branch}..origin/{branch}", max_count=MAX_FETCHED_COMMITS ) try: commits_behind_list = list(commits_behind) except GitCommandError: # pragma: no cover log.info( "Remote branch {} not found for {}. Is it a local branch?", branch, path, ) else: if commits_behind_list: # pragma: no cover for c in commits_behind_list: message = str(c.message).strip().replace("\n", "") if message.startswith("#"): continue sha = c.hexsha[0:7] if len(message) > 60: message = message[0:57] + "..." log.info("... pulling commit {}: {}", sha, message) remote.pull(branch) except GitCommandError as e: # pragma: no cover log.error("Unable to update {} repo\n{}", path, e) except TypeError as e: # pragma: no cover print_and_exit("Unable to update {} repo, {}", path, str(e))
def install_controller_from_folder(version: str) -> None: do_path = SUBMODULES_DIR.joinpath("do") try: Application.git_submodules() except SystemExit: log.info( """You asked to install rapydo {ver} in editable mode, but {p} is missing. You can force the installation by disabling the editable mode: rapydo install {ver} --no-editable """, ver=version, p=do_path, ) raise log.info( "You asked to install rapydo {}. It will be installed in editable mode", version, ) do_repo = Application.gits.get("do") b = git.get_active_branch(do_repo) if b is None: log.error( "Unable to read local controller repository") # pragma: no cover elif b == version: log.info("Controller repository already at {}", version) elif git.switch_branch(do_repo, version): log.info("Controller repository switched to {}", version) else: print_and_exit("Invalid version") Packages.install(do_path, editable=True) log.info("Controller version {} installed from local folder", version)
def switch_branch(gitobj, branch_name='master', remote=True): if branch_name is None: log.error("Unable to switch to a none branch") return False if gitobj.active_branch.name == branch_name: path = os.path.basename(gitobj.working_dir) log.info("You are already on branch {} on {}", branch_name, path) return False if remote: branches = gitobj.remotes[0].fetch() else: branches = gitobj.branches branch = None branch_found = False for branch in branches: if remote: branch_found = branch.name.endswith('/' + branch_name) else: branch_found = branch.name == branch_name if branch_found: break if not branch_found or branch is None: log.warning("Branch {} not found", branch_name) return False try: gitobj.git.checkout(branch_name) except GitCommandError as e: log.warning(e) return False path = os.path.basename(gitobj.working_dir) log.info("Switched branch to {} on {}", branch, path) return True
def update(path, gitobj): unstaged = get_unstaged_files(gitobj) changed = len(unstaged['changed']) > 0 untracked = len(unstaged['untracked']) > 0 if changed or untracked: log.critical("Unable to update {} repo, you have unstaged files", path) print_diff(gitobj, unstaged) sys.exit(1) for remote in gitobj.remotes: if remote.name == 'origin': try: branch = gitobj.active_branch log.info("Updating {} {} (branch {})", remote, path, branch) remote.pull(branch) except GitCommandError as e: log.error("Unable to update {} repo\n{}", path, e) except TypeError as e: if TESTING: log.warning("Unable to update {} repo, {}", path, e) else: log.exit("Unable to update {} repo, {}", path, e)
def switch_branch(gitobj: Optional[Repo], branch_name: str) -> bool: if not gitobj: log.error("git object is None, cannot switch the active branch") return False current_branch = gitobj.active_branch.name path: str = "N/A" if gitobj.working_dir: path = Path(gitobj.working_dir).name if current_branch == branch_name: log.info("{} already set on branch {}", path, branch_name) return True branches = gitobj.remotes[0].fetch() branch = None for b in branches: if b.name.endswith(f"/{branch_name}"): branch = b break if not branch: log.warning("Branch {} not found", branch_name) return False try: gitobj.git.checkout(branch_name) except GitCommandError as e: # pragma: no cover log.error(e) return False log.info("Switched {} branch from {} to {}", path, current_branch, branch_name) return True
def remove( services: List[str] = typer.Argument( None, help="Services to be removed", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command(Application.serialize_parameter("", services)) remove_extras: List[str] = [] for extra in ( REGISTRY, "adminer", "swaggerui", ): if services and extra in services: # services is a tuple, even if defined as List[str] ... services = list(services) services.pop(services.index(extra)) remove_extras.append(extra) Application.get_controller().controller_init(services) docker = Docker() if remove_extras: for extra_service in remove_extras: if not docker.client.container.exists(extra_service): log.error("Service {} is not running", extra_service) continue docker.client.container.remove(extra_service, force=True) log.info("Service {} removed", extra_service) # Nothing more to do if not services: return all_services = Application.data.services == Application.data.active_services if all_services: docker.swarm.remove() # This is needed because docker stack remove does not support a --wait flag # To make the remove command sync and chainable with a start command engine = Application.env.get("DEPLOY_ENGINE", "swarm") network_name = f"{Configuration.project}_{engine}_default" wait_network_removal(docker, network_name) log.info("Stack removed") else: if not docker.swarm.stack_is_running(): print_and_exit( "Stack {} is not running, deploy it with {command}", Configuration.project, command=RED("rapydo start"), ) scales: Dict[Union[str, Service], int] = {} for service in Application.data.services: service_name = Docker.get_service(service) scales[service_name] = 0 docker.client.service.scale(scales, detach=False) log.info("Services removed")
def test( test: str = typer.Argument(None, help="Name of the test to be executed"), swarm_mode: bool = typer.Option( False, "--swarm", help="Execute the test in swarm mode", show_default=False, ), no_remove: bool = typer.Option( False, "--no-rm", help="Do not remove the container", show_default=False, ), # I have no need to test a command to locally execute tests # and I would like to preventany recursive test execution! ) -> None: # pragma: no cover Application.print_command( Application.serialize_parameter("--swarm", swarm_mode, IF=swarm_mode), Application.serialize_parameter("--no-rm", no_remove, IF=no_remove), Application.serialize_parameter("", test), ) controller_path = Packages.get_installation_path("rapydo") # Can't really happen... if not controller_path: # pragma: no cover print_and_exit("Controller path not found") if not test: log.info("Choose a test to be executed:") for f in sorted(controller_path.joinpath("tests").glob("test_*.py")): test_name = f.with_suffix("").name.replace("test_", "") print(f" - {test_name}") return None test_file = Path("tests", f"test_{test}.py") if not controller_path.joinpath(test_file).exists(): print_and_exit("Invalid test name {}", test) image_name = f"rapydo/controller:{__version__}" container_name = "controller" docker.image.pull(image_name) if docker.container.exists(container_name): docker.container.remove(container_name, force=True, volumes=True) docker.container.run( image_name, detach=True, privileged=True, remove=True, volumes=[(controller_path, "/code")], name=container_name, envs={ "TESTING": "1", "SWARM_MODE": "1" if swarm_mode else "0", }, ) docker.container.execute( container_name, command="syslogd", interactive=False, tty=False, stream=False, detach=True, ) # Wait few seconds to let the docker daemon to start log.info("Waiting for docker daemon to start...") time.sleep(3) command = ["py.test", "-s", "-x", f"/code/{test_file}"] log.info("Executing command: {}", " ".join(command)) try: docker.container.execute( container_name, command=command, workdir="/tmp", interactive=True, tty=True, stream=False, detach=False, ) except DockerException as e: log.error(e) # Do not remove the container to let for some debugging if not no_remove: docker.container.remove(container_name, force=True, volumes=True) log.info("Test container ({}) removed", container_name)
def images( remove_images: List[str] = typer.Option( [], "--rm", "--remove", help="Remove the specified image(s)", show_default=False, shell_complete=Application.autocomplete_submodule, ), ) -> None: Application.print_command( Application.serialize_parameter("--remove", remove_images, IF=remove_images), ) Application.get_controller().controller_init() urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning) # type: ignore # https://docs.docker.com/registry/spec/api/ docker = Docker() docker.registry.ping() registry = docker.registry.get_host() host = f"https://{registry}" # Docker Registry API Reference # https://docs.docker.com/registry/spec/api/ # Retrieve a sorted, json list of repositories available in the registry r = docker.registry.send_request(f"{host}/v2/_catalog") catalog = r.json() images: List[Tuple[str, str, str, int, Optional[datetime]]] = [] for repository in catalog.get("repositories", {}): # Fetch the tags under the repository identified by <name> r = docker.registry.send_request(f"{host}/v2/{repository}/tags/list") # tags can be None if all the tags of a repository have deleted # this or ensure that every None will be converted in an empty dictionary tags = r.json().get("tags") or {} for tag in tags: # Fetch the manifest identified by name and reference r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}") manifest = r.json() size = 0 for layer in manifest.get("layers", []): size += layer.get("size", 0) headers = r.headers _id = cast(str, headers.get("Docker-Content-Digest", "N/A")) # Creation date is only available on schema version 1 :\ r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/{tag}", version="1") manifest = r.json() layers = manifest.get("history", []) created: Optional[datetime] = None if len(layers) > 0: first_layer = json.loads(layers[0].get("v1Compatibility", {})) creation_date = first_layer.get("created", "N/A") if creation_date != "N/A": creation_date = creation_date[0:19] created = datetime.strptime(creation_date, "%Y-%m-%dT%H:%M:%S") images.append( (_id, cast(str, repository), cast(str, tag), size, created)) if not images: log.warning("This registry contains no images") else: log.info("This registry contains {} image(s):", len(images)) images_to_be_removed: List[Tuple[str, str, str]] = [] table: List[List[str]] = [] for img in images: digest = img[0] # to be replaced with removeprefix starting from py39 if digest.startswith("sha256:"): digest = digest[7:] _id = digest[0:12] repository = img[1] tag = img[2] SIZE = system.bytes_to_str(img[3]) d = img[4] to_be_removed = (_id in remove_images or f"{repository}:{tag}" in remove_images) creation_date = d.strftime("%Y-%m-%d %H:%M:%S") if d else "N/A" image_line: List[str] = [] if to_be_removed: image_line.append(RED(repository)) image_line.append(RED(tag)) image_line.append(RED(_id)) image_line.append(RED(creation_date)) image_line.append(RED(SIZE)) creation_date = "DELETING ..." images_to_be_removed.append((repository, digest, tag)) else: image_line.append(repository) image_line.append(tag) image_line.append(_id) image_line.append(creation_date) image_line.append(SIZE) table.append(image_line) print("") print( tabulate( table, tablefmt=TABLE_FORMAT, headers=["REPOSITORY", "TAG", "IMAGE ID", "CREATED", "SIZE"], )) if len(remove_images) != len(images_to_be_removed): log.error( "Some of the images that you specified are not found in this registry" ) # DELETE /v2/<name>/manifests/<reference> for image in images_to_be_removed: repository = image[0] reference = image[1] # digest without sha256: tag = image[2] # For deletes reference must be a digest or the delete will fail r = docker.registry.send_request( f"{host}/v2/{repository}/manifests/sha256:{reference}", method="DELETE") log.info("Image {}:{} deleted from {}", repository, tag, host) if images_to_be_removed: log.info("Executing registry garbage collector...") command = "/bin/registry garbage-collect -m /etc/docker/registry/config.yml" docker.exec_command("registry", user="******", command=command) log.info("Registry garbage collector successfully executed") # A restart is needed to prevent clashes beetween gc and cache # https://gist.github.com/jaytaylor/86d5efaddda926a25fa68c263830dac1#gistcomment-3653760 # The garbage collector doesn't communicate with the cache, or unlink layers # from the repository so if you immediately try to repush a layer that was # just deleted, the registry will find it for stat calls, but actually # serving the blob will fail. docker.client.container.restart("registry") log.info("Registry restarted to clean the layers cache")
def check_placeholders_and_passwords(compose_services: ComposeServices, active_services: List[str]) -> None: if not active_services: # pragma: no cover print_and_exit("""You have no active service \nSuggestion: to activate a top-level service edit your project_configuration and add the variable "ACTIVATE_DESIREDSERVICE: 1" """) elif Configuration.check: log.info("Active services: {}", ", ".join(active_services), log_to_file=True) extra_services: List[str] = [] if Configuration.swarm_mode and REGISTRY not in active_services: extra_services.append(REGISTRY) all_services = active_services + extra_services missing: Dict[str, Set[str]] = {} passwords: Dict[str, str] = {} passwords_services: Dict[str, Set[str]] = {} for service_name in all_services: # This can happens with `rapydo run swagger` because in case of run # the controller_init method is executed without passing the service # This is because interfaces are not enabled on the base stack and the # controller_init([service]) would fail # As side effect, non-existing services are not blocked if service_name not in compose_services: continue service = compose_services[service_name] if service: for key, value in service.environment.items(): if str(value) == PLACEHOLDER: key = services.normalize_placeholder_variable(key) missing.setdefault(key, set()) missing[key].add(service_name) elif key.endswith("_PASSWORD") and value: key = services.normalize_placeholder_variable(key) passwords.setdefault(key, value) passwords_services.setdefault(key, set()) passwords_services[key].add(service_name) placeholders = [] for variable, raw_services in missing.items(): serv = services.vars_to_services_mapping.get( variable) or raw_services active_serv = [s for s in serv if s in all_services] if active_serv: placeholders.append([variable, ", ".join(active_serv)]) MIN_PASSWORD_SCORE = int( Application.env.get("MIN_PASSWORD_SCORE", 2) # type: ignore ) for variable, raw_services in passwords_services.items(): serv = services.vars_to_services_mapping.get( variable) or raw_services active_serv = [s for s in serv if s in all_services] if active_serv: password = passwords.get(variable) result = zxcvbn(password) score = result["score"] if score < MIN_PASSWORD_SCORE: if score == MIN_PASSWORD_SCORE - 1: log.warning("The password used in {} is weak", variable) elif score == MIN_PASSWORD_SCORE - 2: log.error("The password used in {} is very weak", variable) else: log.critical( "The password used in {} is extremely weak", variable) if placeholders: log.critical( "The following variables are missing in your configuration:") print("") print( tabulate( placeholders, tablefmt=TABLE_FORMAT, headers=["VARIABLE", "SERVICE(S)"], )) print("") log.info("You can fix this error by updating your .projectrc file") sys.exit(1) return None
def remove( services: List[str] = typer.Argument( None, help="Services to be removed", shell_complete=Application.autocomplete_service, ), rm_all: bool = typer.Option( False, "--all", help="Also remove persistent data stored in docker volumes", show_default=False, ), ) -> None: Application.print_command( Application.serialize_parameter("--all", rm_all, IF=rm_all), Application.serialize_parameter("", services), ) remove_extras: List[str] = [] for extra in ( REGISTRY, "adminer", "swaggerui", ): if services and extra in services: # services is a tuple, even if defined as List[str] ... services = list(services) services.pop(services.index(extra)) remove_extras.append(extra) Application.get_controller().controller_init(services) docker = Docker() if remove_extras: for extra_service in remove_extras: if not docker.client.container.exists(extra_service): log.error("Service {} is not running", extra_service) continue docker.client.container.remove(extra_service, force=True) log.info("Service {} removed", extra_service) # Nothing more to do if not services: return all_services = Application.data.services == Application.data.active_services if all_services and rm_all: # Networks are not removed, but based on docker compose down --help they should # Also docker-compose down removes network from what I remember # Should be reported as bug? If corrected a specific check in test_remove.py # will start to fail docker.client.compose.down( remove_orphans=False, remove_images="local", # Remove named volumes declared in the volumes section of the # Compose file and anonymous volumes attached to containers. volumes=rm_all, ) else: # Important note: volumes=True only destroy anonymous volumes, # not named volumes like down should do docker.client.compose.rm(Application.data.services, stop=True, volumes=rm_all) log.info("Stack removed")