def register_auto_ts(self, new_name, old_name=None): """ Registers auto_ts feature """ # Infer and update default config init_cfg_conn = self._sonic_db.get_initial_db_connector() def_cfg = DEFAULT_AUTO_TS_FEATURE_CONFIG.copy() (auto_ts_add_cfg, auto_ts_state) = self.infer_auto_ts_capability(init_cfg_conn) def_cfg['state'] = auto_ts_state if not auto_ts_add_cfg: log.debug( "Skip adding AUTO_TECHSUPPORT_FEATURE table because no AUTO_TECHSUPPORT|GLOBAL entry is found" ) return False for conn in self._sonic_db.get_connectors(): new_cfg = copy.deepcopy(def_cfg) if old_name: current_cfg = conn.get_entry(AUTO_TS_FEATURE, old_name) conn.set_entry(AUTO_TS_FEATURE, old_name, None) new_cfg.update(current_cfg) conn.set_entry(AUTO_TS_FEATURE, new_name, new_cfg) return True
def get_token(realm, service, scope) -> str: """ Retrieve an authentication token. Args: realm: Realm: url to request token. service: service to request token for. scope: scope to requests token for. Returns: token value as a string. """ log.debug(f'getting authentication token: realm={realm} service={service} scope={scope}') response = requests.get(f'{realm}?scope={scope}&service={service}') if response.status_code != requests.codes.ok: raise AuthenticationServiceError('Failed to retrieve token') content = json.loads(response.content) token = content['token'] expires_in = content['expires_in'] log.debug(f'authentication token for realm={realm} service={service} scope={scope}: ' f'token={token} expires_in={expires_in}') return token
def get_token(bearer: Dict) -> str: """ Retrieve an authentication token. Args: bearer: Bearer token. Returns: token value as a string. """ log.debug(f'getting authentication token {bearer}') if 'realm' not in bearer: raise AuthenticationServiceError(f'Realm is required in bearer') url = bearer.pop('realm') response = requests.get(url, params=bearer) if response.status_code != requests.codes.ok: raise AuthenticationServiceError('Failed to retrieve token') content = json.loads(response.content) token = content['token'] expires_in = content['expires_in'] log.debug(f'authentication token for bearer={bearer}: ' f'token={token} expires_in={expires_in}') return token
def rmi(self, image: str, **kwargs): """ Docker 'rmi -f' command. """ log.debug(f'removing image {image} kwargs={kwargs}') self.client.images.remove(image, **kwargs) log.debug(f'image {image} removed successfully')
def labels(self, image: str): """ Returns a list of labels associated with image. """ log.debug(f'inspecting image labels {image}') labels = self.client.images.get(image).labels log.debug(f'image {image} labels successfully: {labels}') return labels
def tag(self, image: str, repotag: str, **kwargs): """ Docker 'tag' command """ log.debug(f'tagging image {image} {repotag} kwargs={kwargs}') img = self.client.images.get(image) img.tag(repotag, **kwargs) log.debug(f'image {image} tagged {repotag} successfully')
def blobs(self, repository: str, digest: str): log.debug(f'retrieving blob for {repository}:{digest}') _, repository = reference.Reference.split_docker_domain(repository) headers = {'Accept': self.MIME_DOCKER_MANIFEST} url = f'{self._get_base_url(repository)}/blobs/{digest}' response = self._execute_get_request(url, headers) if response.status_code != requests.codes.ok: raise RegistryApiError(f'Failed to retrieve blobs for {repository}:{digest}', response) content = json.loads(response.content) log.debug(f'retrieved blob for {repository}:{digest}: {content}') return content
def tags(self, repository: str) -> List[str]: log.debug(f'getting tags for {repository}') _, repository = reference.Reference.split_docker_domain(repository) headers = {'Accept': 'application/json'} url = f'{self._get_base_url(repository)}/tags/list' response = self._execute_get_request(url, headers) if response.status_code != requests.codes.ok: raise RegistryApiError(f'Failed to retrieve tags from {repository}', response) content = json.loads(response.content) log.debug(f'tags list api response: f{content}') return content['tags']
def _execute_get_request(url, headers): response = requests.get(url, headers=headers) if response.status_code == requests.codes.unauthorized: # Get authentication details from headers # Registry should tell how to authenticate www_authenticate_details = response.headers['Www-Authenticate'] log.debug(f'unauthorized: retrieving authentication details ' f'from response headers {www_authenticate_details}') bearer = www_authenticate.parse(www_authenticate_details)['bearer'] token = AuthenticationService.get_token(**bearer) headers['Authorization'] = f'Bearer {token}' # Repeat request response = requests.get(url, headers=headers) return response
def get_shutdown_sequence(self, reboot_type: str, packages: Dict[str, Package]): """ Returns shutdown sequence file for particular reboot type. Args: reboot_type: Reboot type to generated service shutdown sequence for. packages: Dict of installed packages. Returns: Ordered list of service names. """ shutdown_graph = defaultdict(set) def service_exists(service): for package in packages.values(): if package.manifest['service']['name'] == service: return True log.info(f'Service {service} is not installed, it is skipped...') return False def filter_not_available(services): return set(filter(service_exists, services)) for package in packages.values(): service_props = package.manifest['service'] after = filter_not_available( service_props[f'{reboot_type}-shutdown']['after']) before = filter_not_available( service_props[f'{reboot_type}-shutdown']['before']) if not after and not before: continue name = package.manifest['service']['name'] shutdown_graph[name].update(after) for service in before: shutdown_graph[service].update({name}) log.debug(f'shutdown graph {pformat(shutdown_graph)}') try: order = toposort_flatten(shutdown_graph) except CircularDependencyError as err: raise ServiceCreatorError( f'Circular dependency found in {reboot_type} error: {err}') log.debug(f'shutdown order {pformat(order)}') return order
def manifest(self, repository: str, ref: str) -> Dict: log.debug(f'getting manifest for {repository}:{ref}') _, repository = reference.Reference.split_docker_domain(repository) headers = {'Accept': self.MIME_DOCKER_MANIFEST} url = f'{self._get_base_url(repository)}/manifests/{ref}' response = self._execute_get_request(url, headers) if response.status_code != requests.codes.ok: raise RegistryApiError(f'Failed to retrieve manifest for {repository}:{ref}', response) content = json.loads(response.content) log.debug(f'manifest content for {repository}:{ref}: {content}') return content
def run_command(command: str): """ Run arbitrary bash command. Args: command: String command to execute as bash script Raises: PackageManagerError: Raised when the command return code is not 0. """ log.debug(f'running command: {command}') proc = subprocess.Popen(command, shell=True, executable='/bin/bash', stdout=subprocess.PIPE) (out, _) = proc.communicate() if proc.returncode != 0: raise ServiceCreatorError(f'Failed to execute "{command}"')
def validate_config(self, config): """ Validate configuration through YANG. Args: config: Config DB data. Returns: None. Raises: Exception: if config does not pass YANG validation. """ config = sonic_cfggen.FormatConverter.to_serialized(config) log.debug(f'validating configuration {pformat(config)}') # This will raise exception if configuration is not valid. # NOTE: loadData() modifies the state of ConfigMgmt instance. # This is not desired for configuration validation only purpose. # Although the config loaded into ConfigMgmt instance is not # interesting in this application so we don't care. self.cfg_mgmt.loadData(config)
def uninstall_autogen_cli(self, package: Package, command: str): """ Uninstall autogenerated CLI plugins for package for particular command. Args: package: Package. command: Name of command to remove CLI. Returns: None """ if package.metadata.yang_module_str is None: return if f'auto-generate-{command}' not in package.manifest['cli']: return if not package.manifest['cli'][f'auto-generate-{command}']: return module_name = self.cfg_mgmt.get_module_name( package.metadata.yang_module_str) self.cli_gen.remove_cli_plugin(command, module_name) log.debug( f'{command} command line interface removed for {module_name}')
def render_template(in_template: str, outfile: str, render_ctx: Dict, executable: bool = False): """ Template renderer helper routine. Args: in_template: Input file with template content outfile: Output file to render template to render_ctx: Dictionary used to generate jinja2 template executable: Set executable bit on rendered file """ log.debug( f'Rendering {in_template} to {outfile} with {pformat(render_ctx)}') with open(in_template, 'r') as instream: template = jinja2.Template(instream.read()) with open(outfile, 'w') as outstream: outstream.write(template.render(**render_ctx)) if executable: set_executable_bit(outfile)
def pull(self, repository: str, reference: Optional[str] = None): """ Docker 'pull' command. Args: repository: repository to pull reference: tag or digest """ log.debug(f'pulling image from {repository} reference={reference}') api = self.client.api progress_manager = self.progress_manager digest = None with progress_manager or contextlib.nullcontext(): for line in api.pull(repository, reference, stream=True, decode=True): log.debug(f'pull status: {line}') status = get_status(line) # Record pulled digest digest_match = re.match(r'Digest: (?P<sha>.*)', status) if digest_match: digest = digest_match.groupdict()['sha'] if progress_manager: process_progress(progress_manager, line) log.debug(f'Digest: {digest}') log.debug( f'image from {repository} reference={reference} pulled successfully' ) return self.get_image(f'{repository}@{digest}')
def load(self, imgpath: str): """ Docker 'load' command. Args: """ log.debug(f'loading image from {imgpath}') api = self.client.api progress_manager = self.progress_manager imageid = None repotag = None with progress_manager or contextlib.nullcontext(): with open(imgpath, 'rb') as imagefile: for line in api.load_image(imagefile, quiet=False): log.debug(f'pull status: {line}') if progress_manager: process_progress(progress_manager, line) if 'stream' not in line: continue stream = line['stream'] repotag_match = re.match( r'Loaded image: (?P<repotag>.*)\n', stream) if repotag_match: repotag = repotag_match.groupdict()['repotag'] imageid_match = re.match( r'Loaded image ID: sha256:(?P<id>.*)\n', stream) if imageid_match: imageid = imageid_match.groupdict()['id'] imagename = repotag if repotag else imageid log.debug(f'Loaded image {imagename}') return self.get_image(imagename)
def validate_package_tree(packages: Dict[str, Package]): """ Verify that all dependencies are met in all packages passed to this function. Args: packages: list of packages to check Raises: PackageDependencyError: if dependency is missing PackageConflictError: if there is a conflict between packages """ for name, package in packages.items(): log.debug(f'checking dependencies for {name}') for dependency in package.manifest['package']['depends']: dependency_package = packages.get(dependency.name) if dependency_package is None: raise PackageDependencyError(package.name, dependency) installed_version = dependency_package.version log.debug( f'dependency package is installed {dependency.name}: {installed_version}' ) if not dependency.constraint.allows(installed_version): raise PackageDependencyError(package.name, dependency, installed_version) dependency_components = dependency.components if not dependency_components: dependency_components = {} for component, version in package.components.items(): implicit_constraint = VersionConstraint.parse( f'^{version.major}.{version.minor}.0') dependency_components[component] = implicit_constraint for component, constraint in dependency_components.items(): if component not in dependency_package.components: raise PackageComponentDependencyError( package.name, dependency, component, constraint) component_version = dependency_package.components[component] log.debug( f'dependency package {dependency.name}: ' f'component {component} version is {component_version}') if not constraint.allows(component_version): raise PackageComponentDependencyError( package.name, dependency, component, constraint, component_version) log.debug(f'checking conflicts for {name}') for conflict in package.manifest['package']['breaks']: conflicting_package = packages.get(conflict.name) if conflicting_package is None: continue installed_version = conflicting_package.version log.debug( f'conflicting package is installed {conflict.name}: {installed_version}' ) if conflict.constraint.allows(installed_version): raise PackageConflictError(package.name, conflict, installed_version) for component, constraint in conflicting_package.components.items( ): if component not in conflicting_package.components: continue component_version = conflicting_package.components[component] log.debug( f'conflicting package {conflict.name}: ' f'component {component} version is {component_version}') if constraint.allows(component_version): raise PackageComponentConflictError( package.name, dependency, component, constraint, component_version)
def rm(self, container: str, **kwargs): """ Docker 'rm' command. """ self.client.containers.get(container).remove(**kwargs) log.debug(f'removed container {container}')