def install_by_version(cls, gadgets, context=None, verbose=False): """Install Linux kernel with specified version. Args: gadgets: Kernel gadgets (e.g. kernel). context: Currently not used. verbose: Verbose or not. Returns: Boolean indicating whether kernel is successfully installed or not. """ # refer to # https://blog.csdn.net/u013431916/article/details/82530523 # https://wiki.ubuntu.com/KernelTeam/KernelMaintenance # and # https://en.wikipedia.org/wiki/Linux_kernel#Version_numbering version = gadgets[0]['version'] color_print.debug('switching kernel by version') for repo in config.kernel_apt_repo_entries: cls._add_apt_repository(repo_entry=repo, verbose=verbose) if cls._is_version_available_in_apt(version, verbose=verbose): return cls._install_by_version_with_apt(version, verbose=verbose) else: color_print.warning( 'no apt package for kernel %s' % version) if version.endswith('.0'): version = version.rstrip('.0') + '-' return cls._install_by_version_with_download( version, verbose=verbose)
def allocate_ports(entries): """Allocate new node ports for services. Args: entries: List of services names or paths. Returns: List of ports allocated. """ ports = list() with open(config.runtime_host_ports_usage_file, 'r') as f: content = yaml.load(f, Loader=yaml.SafeLoader) ports_usage = content if content else [] ports_used_list = [svc['port'] for svc in ports_usage] for entry in entries: port_to_be_allocated = _get_next_available_port( config.runtime_host_port_lower_bound, ports_used_list) color_print.debug( 'node port {port} is allocated for service in {svc_file}'. format(port=port_to_be_allocated, svc_file=entry)) ports.append(port_to_be_allocated) # will be returned ports_usage.append({ # will be write back into host_ports_usage file 'name': entry, 'port': port_to_be_allocated, }) # will not be allocated in the next iteration ports_used_list.append(port_to_be_allocated) with open(config.runtime_host_ports_usage_file, 'w') as f: yaml.dump(ports_usage, f) return ports
def _add_apt_repository(cls, repo_entry, gpg_url=None, verbose=False): stdout, stderr = verbose_func.verbose_output(verbose) color_print.debug('adding apt repository %s' % repo_entry) try: if gpg_url: cmd_curl_gpg = 'curl -fsSL {gpg_url}'.format( gpg_url=gpg_url).split() res = subprocess.run(cmd_curl_gpg, stdout=subprocess.PIPE, stderr=stderr, check=True) subprocess.run(cls.cmd_apt_add_key, input=res.stdout, stdout=stdout, stderr=stderr, check=True) # add apt repository cmd_apt_add_repository = 'add-apt-repository\n' \ '{repo_entry}'.format( repo_entry=repo_entry).split('\n') subprocess.run(cmd_apt_add_repository, stdout=stdout, stderr=stderr, check=True) return True except subprocess.CalledProcessError: return False
def _install_flannel(cls, k8s_version, context, mappings=None, verbose=False): # refer to # https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md#older-versions-of-kubernetes # and # https://github.com/coreos/flannel#getting-started-on-kubernetes color_print.debug('installing flannel') k8s_version = version.parse(k8s_version) if version.parse('1.6') <= k8s_version <= version.parse('1.15'): cls._pull_quay_image(config.flannel_image_k8s_1_6_to_1_15, domestic=context.get('domestic', False), mappings=mappings, verbose=verbose) cls._create_k8s_resources(config.flannel_yaml_k8s_1_6_to_1_15_rbac, verbose=verbose) cls._create_k8s_resources(config.flannel_yaml_k8s_1_6_to_1_15, verbose=verbose) else: cls._pull_quay_image(config.flannel_image_k8s_from_1_16, domestic=context.get('domestic', False), mappings=mappings, verbose=verbose) if k8s_version.minor == version.parse( '1.16').minor and k8s_version.major == version.parse( '1.16').major: cls._create_k8s_resources(config.flannel_yaml_k8s_16, verbose=verbose) elif k8s_version > version.parse('1.16'): cls._create_k8s_resources(config.flannel_yaml_k8s_over_16, verbose=verbose)
def download_file(url, save_path, proxies=None): """Download file from URL. Download file from URL and save it locally. Args: url: File's URL. save_path: Path where file will be saved. proxies: HTTP proxy if necessary. Returns: None. """ # refer to # https://zhuanlan.zhihu.com/p/106309634 color_print.debug('downloading {url} to {dst}'.format(url=url, dst=save_path)) res = requests.get(url, stream=True, proxies=proxies) total_length = int(int(res.headers.get('content-length')) / 1024) + 1 dst = save_path with open(dst, 'wb') as f: bar = tqdm(iterable=res.iter_content(chunk_size=1024), total=total_length, unit='k', desc='downloading process', ncols=80) for chunk in bar: if chunk: f.write(chunk)
def reload_and_restart_docker(verbose=False): """Reload configurations and restart Docker. systemctl daemon-reload && systemctl restart docker Args: verbose: Verbose or not. Returns: Boolean indicating whether configurations is successfully reload and Docker is successfully restarted or not. """ # reload docker daemon configurations if not system_func.reload_daemon_config(verbose=verbose): return False stdout, stderr = verbose_func.verbose_output(verbose) color_print.debug('restarting docker') try: subprocess.run('systemctl restart docker'.split(), stdout=stdout, stderr=stderr, check=True) return True except subprocess.CalledProcessError: color_print.error('failed to restart docker') return False
def install_cni_plugin(cls, k8s_version, context, mappings=None, verbose=False): """Install CNI plugin. Install a CNI plugin specified in context for the current Kubernetes cluster. Args: k8s_version: Version of the current Kubernetes cluster. context: Context of installation process. mappings: Dict used to store info which will be used to generate worker script later. verbose: Verbose or not. Returns: None. """ color_print.debug('installing cni plugin') if context.get('cni_plugin', None) == 'flannel': CNIPluginInstaller._install_flannel(k8s_version=k8s_version, context=context, mappings=mappings, verbose=verbose) if context.get('cni_plugin', None) == 'calico': CNIPluginInstaller._install_calico(k8s_version=k8s_version, context=context, mappings=mappings, verbose=verbose) if context.get('cni_plugin', None) == 'cilium': CNIPluginInstaller._install_cilium(k8s_version=k8s_version, context=context, mappings=mappings, verbose=verbose)
def _install_one_gadget_by_version(cls, name, version, mappings=None, verbose=False): stdout, stderr = verbose_func.verbose_output(verbose) # get complete version, e.g. 18.03.1~ce-0~ubuntu complete_version = cls._get_apt_complete_version(name, version, verbose=verbose) if complete_version: color_print.debug( 'installing {gadget} with {version} version'.format( gadget=name, version=complete_version)) # install with the specified version temp_cmd = copy.copy(cls.cmd_apt_install) temp_cmd.append('{name}={version}'.format( name=name, version=complete_version)) try: subprocess.run(temp_cmd, stderr=stderr, stdout=stdout, check=True) except subprocess.CalledProcessError: return False if mappings: mappings[name] = complete_version return True color_print.warning('no candidate version for %s' % name) return False
def delete_vuln_resources_in_k8s(vuln, verbose=False): """Delete resources related to one vulnerability. Delete resources related to one vulnerability specified by args.vuln from the current Kubernetes cluster. Args: vuln: Information dict about one vulnerability and its resources' locations. verbose: Verbose or not. Returns: None. """ color_print.debug( '{vuln} is going to be removed'.format(vuln=vuln['name'])) yamls = [ os.path.join(vuln['path'], dependency) for dependency in vuln['dependencies']['yamls'] ] if not KubernetesResourceDeployer.delete(yamls, verbose=verbose): color_print.error('failed to remove {v}'.format(v=vuln['name'])) else: # remove port record if applicable yamls_svc = [yaml for yaml in yamls if yaml.endswith('-service.yaml')] if yamls_svc: # release ports not used any more port_manager.release_ports(yamls_svc) color_print.debug('{v} successfully removed'.format(v=vuln['name']))
def _configure_docker_with_kata(cls, base_dir, recover=False): # configure /etc/docker/daemon.json color_print.debug('modifying /etc/docker/daemon.json') system_func.create_file_if_not_exist('/etc/docker/daemon.json') try: with open('/etc/docker/daemon.json', 'r') as f: content = json.loads(f.read()) except json.decoder.JSONDecodeError: content = dict() if recover: # used when removing kata-containers if 'runtimes' in content.keys(): content.pop('runtimes') if 'default-runtime' in content.keys(): content.pop('default-runtime') else: # used when installing kata-containers runtimes = { "kata-runtime": { "path": "{base_dir}/bin/kata-runtime".format(base_dir=base_dir) }, "kata-clh": { "path": "{base_dir}/bin/kata-clh".format(base_dir=base_dir) }, "kata-qemu": { "path": "{base_dir}/bin/kata-qemu".format(base_dir=base_dir) }, "kata-fc": { "path": "{base_dir}/bin/kata-fc".format(base_dir=base_dir) }, } content['runtimes'] = runtimes content['default-runtime'] = 'kata-runtime' with open('/etc/docker/daemon.json', 'w') as f: f.write(json.dumps(content)) return True
def _modify_grub(cls, version=None, recover=False, verbose=False): stdout, stderr = verbose_func.verbose_output(verbose) # edit grub color_print.debug('modifying grub config file') if recover: # recover grub grub_option = '0' else: grub_option = '\"Advanced options for Ubuntu>Ubuntu, with Linux {version}\"'.format( version=version) cmd_modify_grub = 'sed\n-i\ns/^GRUB_DEFAULT=.*$/' \ 'GRUB_DEFAULT={grub_option}/\n/etc/default/grub'.format( grub_option=grub_option).split('\n') subprocess.run( cmd_modify_grub, stdout=stdout, stderr=stderr, check=True) # update grub color_print.debug('updating grub') subprocess.run( cls.cmd_update_grub, stdout=stdout, stderr=stderr, check=True)
def uninstall(cls, verbose=False): """Uninstall Kata-containers. Args: verbose: Verbose or not. Returns: None. """ # currently, metarget only supports docker # in the future more CRIs will be supported # 1. configure /etc/docker/daemon.json if not cls._configure_docker_with_kata( base_dir=config.kata_tar_decompress_dest, recover=True): color_print.error( 'failed to remove kata-containers configurations') return False # 2. reload daemon configurations and restart docker if not cls.reload_and_restart_docker(verbose=verbose): return False # 3. remove /etc/kata-containers/ color_print.debug( 'removing {kata_config_dir}'.format( kata_config_dir=config.kata_config_dir)) rmtree(path=config.kata_config_dir, ignore_errors=True) # 4. remove /opt/kata/ color_print.debug('removing {kata_dst}'.format( kata_dst=config.kata_tar_decompress_dest)) rmtree(path=config.kata_tar_decompress_dest, ignore_errors=True) return True
def reload_daemon_config(verbose=False): color_print.debug('reloading daemon configurations') stdout, stderr = verbose_func.verbose_output(verbose) try: subprocess.run('systemctl daemon-reload'.split(), stdout=stdout, stderr=stderr, check=True) return True except subprocess.CalledProcessError: color_print.error('failed to reload daemon configurations') return False
def deploy_vuln_resources_in_k8s(vuln, external=False, verbose=False): """Deploy resources related to one vulnerability. Deploy resources related to one vulnerability specified by args.vuln in the current Kubernetes cluster. Args: vuln: Information dict about one vulnerability and its resources' locations. external: Expose service through NodePort or not (ClusterIP by default).. verbose: Verbose or not. Returns: None. """ color_print.debug( '{vuln} is going to be installed'.format( vuln=vuln['name'])) yamls = [os.path.join(vuln['path'], dependency) for dependency in vuln['dependencies']['yamls']] # if services need to be exposed externally, modify yaml # and change type from ClusterIP to NodePort if external: yamls_svc = [ temp_yaml for temp_yaml in yamls if temp_yaml.endswith('-service.yaml')] if yamls_svc: # remove services from yamls yamls = [ temp_yaml for temp_yaml in yamls if not temp_yaml.endswith('-service.yaml')] # allocate ports on host host_ports = port_manager.allocate_ports(entries=yamls_svc) # generate new yamls using nodeport in svc yamls new_yamls_svc = resource_modifier.generate_svcs_with_clusterip_to_nodeport( yamls=yamls_svc, ports=host_ports) # add updated services into original yamls yamls.extend(new_yamls_svc) # create namespace metarget in k8s if it is not created yet if not KubernetesResourceDeployer.apply( resources_list=[config.k8s_metarget_namespace_file], verbose=verbose): color_print.error_and_exit( 'failed to create namespace {nm}'.format( nm=config.k8s_metarget_namespace)) if not KubernetesResourceDeployer.apply( resources_list=yamls, verbose=verbose): color_print.error( 'failed to install {v}'.format( v=vuln['name'])) else: color_print.debug('{v} successfully installed'.format(v=vuln['name']))
def _pull_domestic_image(cls, image, ori_prefix, new_prefix, mappings=None, verbose=False): temp_image = image.replace(ori_prefix, new_prefix) if mappings: mappings[temp_image] = image if not cls._image_exist(image): cls._pull_image(temp_image, verbose=verbose) cls._tag_image(temp_image, image) else: color_print.debug('%s already pulled' % image)
def download_package_list(): """Download Ubuntu kernels packages list. This function will download package list of Ubuntu kernel from kernel repository (e.g. https://kernel.ubuntu.com/~kernel-ppa/mainline/) and store package names and URLs locally for further usage. Returns: None. """ color_print.debug('downloading kernel package list') r = requests.get(config.ubuntu_kernel_repo) soup = BeautifulSoup(r.text, 'html.parser') version_table = soup.table tr_all = version_table.find_all("tr") kernels = [ tr.a.get_text() for tr in tr_all[4:-1] if tr.a.get_text().startswith('v') ] packages = dict() for kernel in kernels: color_print.debug('downloading info for kernel %s' % kernel) r = requests.get(config.ubuntu_kernel_repo + kernel) soup = BeautifulSoup(r.text, 'html.parser') hyperlinks = soup.find_all('a') package_links = set(entry.get_text() for entry in hyperlinks) package_links = [(config.ubuntu_kernel_repo + kernel + entry) for entry in package_links if filter_name_by_regex(entry)] packages[kernel] = package_links color_print.debug('kernel info downloaded:') color_print.debug(json.dumps(package_links)) with open(config.kernel_packages_list, 'w') as f: yaml.dump(packages, f)
def _pre_install(cls, mappings=None, verbose=False): color_print.debug('pre-installing') stdout, stderr = verbose_func.verbose_output(verbose) # install requirements cls._apt_update(verbose=verbose) subprocess.run(cls.cmd_apt_install + cls._kubernetes_requirements, stdout=stdout, stderr=stderr, check=True) cls._add_apt_repository(gpg_url=config.k8s_apt_repo_gpg, repo_entry=config.k8s_apt_repo_entry, verbose=verbose) # incompatible with ustc repo because it has no gpg currently mappings['gpg_url'] = config.k8s_apt_repo_gpg mappings['repo_entry'] = config.k8s_apt_repo_entry
def _install_by_version_with_apt(cls, version, verbose=False): color_print.debug('switching kernel version with apt') stdout, stderr = verbose_func.verbose_output(verbose) try: # install image package package_name = cls._get_apt_complete_package( 'linux-image', ['linux-image-extra-{version}'.format(version=version), 'generic'], verbose=verbose) color_print.debug('installing kernel package %s' % package_name) version_suffix = package_name.lstrip('linux-image-extra-') temp_cmd = copy.copy(cls.cmd_apt_install) temp_cmd.append(package_name) subprocess.run(temp_cmd, stdout=stdout, stderr=stderr, check=True) cls._modify_grub(version=version_suffix, verbose=verbose) return True except subprocess.CalledProcessError: return False
def _pull_image(cls, image, mappings=None, verbose=False): stdout, stderr = verbose_func.verbose_output(verbose) if mappings: mappings[image] = None if not cls._image_exist(image): color_print.debug('pulling %s' % image) temp_cmd = 'docker pull {image}'.format(image=image).split() try: subprocess.run(temp_cmd, stdout=stdout, stderr=stderr, check=True) return True except subprocess.CalledProcessError: return False else: color_print.debug('%s already pulled' % image)
def _install_cilium(cls, k8s_version, context, mappings=None, verbose=False): # refer to # https://docs.cilium.io/en/stable/concepts/kubernetes/requirements/#k8s-requirements # https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/ # requirements: # Linux kernel >= 4.9 # Kubernetes >= 1.12 color_print.debug('installing cilium') for image in config.cilium_images: cls._pull_quay_image(image, domestic=context.get('domestic', False), mappings=mappings, verbose=verbose) cls._create_k8s_resources(config.cilium_yaml, verbose=verbose)
def _pre_configure(cls, verbose=False): color_print.debug('pre-configuring') stdout, stderr = verbose_func.verbose_output(verbose) # make sure br_netfilter is loaded. subprocess.run(cls._cmd_modprobe, stdout=stdout, stderr=stderr, check=True) # ensure net.bridge.bridge-nf-call-iptables with open('/etc/sysctl.d/k8s.conf', 'a') as f: f.write('net.bridge.bridge-nf-call-ip6tables = 1\n') f.write('net.bridge.bridge-nf-call-iptables = 1\n') # temporarily turn off swap subprocess.run(cls._cmd_swapoff, stdout=stdout, stderr=stderr, check=True)
def remove(args): """Remove an installed cloud native vulnerability. Remove the vulnerable cloud native gadget with the cloud native vulnerability specified by args.cnv. Args: args.cnv: Name of the specified cloud native vulnerability. args.verbose: Verbose or not. Returns: None. """ vulns = vuln_loader.load_vulns_by_dir(config.vuln_cn_dir_wildcard) vuln = filters.filter_vuln_by_name(vulns=vulns, name=args.cnv) if not vuln: color_print.error_and_exit( 'no cloud native vulnerability named {cnv}'.format(cnv=args.cnv)) if vuln['class'] == 'config' or vuln['class'] == 'mount' or vuln[ 'class'] == 'no-vuln': vulns = vuln_loader.load_vulns_by_dir(config.vuln_cn_dir_wildcard) vuln = filters.filter_vuln_by_name(vulns=vulns, name=args.cnv) if not vuln: color_print.error_and_exit( 'no vulnerability named {cnv}'.format(cnv=args.cnv)) internal_cmds.delete_vuln_resources_in_k8s(vuln, verbose=args.verbose) return color_print.debug( '{vuln} is going to be removed'.format(vuln=vuln['name'])) if vuln['class'].startswith('docker'): DockerInstaller.uninstall(verbose=args.verbose) color_print.debug('{v} successfully removed'.format(v=vuln['name'])) if vuln['class'] == 'kubernetes': KubernetesInstaller.uninstall(verbose=args.verbose) color_print.debug('{v} successfully removed'.format(v=vuln['name'])) if vuln['class'] == 'kata-containers': if KataContainersInstaller.uninstall(verbose=args.verbose): color_print.debug( '{v} successfully removed'.format(v=vuln['name'])) else: color_print.error('failed to remove {v}'.format(v=vuln['name'])) if vuln['class'] == 'kernel': color_print.warning( 'removal of vulnerabilities in class {vuln_class} is unsupported'. format(vuln_class=vuln['class'])) return
def _pre_install(cls, verbose=False): stdout, stderr = verbose_func.verbose_output(verbose) # install requirements color_print.debug('installing prerequisites') try: if not cls._apt_update(verbose=verbose): return False subprocess.run(cls.cmd_apt_install + cls._docker_requirements, stdout=stdout, stderr=stderr, check=True) except subprocess.CalledProcessError: return False cls._add_apt_repository(gpg_url=config.docker_apt_repo_gpg, repo_entry=config.docker_apt_repo_entry, verbose=verbose) for repo in config.containerd_apt_repo_entries: cls._add_apt_repository(repo_entry=repo, verbose=verbose) return True
def _act(cls, resources_list, action=None, verbose=False): stdout, stderr = verbose_func.verbose_output(verbose) cmd_kubectl_create = 'kubectl {action} -f'.format( action=action).split() for res in resources_list: color_print.debug('{action}ing {res}'.format( action=action.strip('e'), res=res)) temp_cmd = copy.copy(cmd_kubectl_create) temp_cmd.append(res) try: subprocess.run(temp_cmd, stdout=stdout, stderr=stderr, check=True) except subprocess.CalledProcessError: color_print.error( 'failed to {action} resources in {res}'.format( action=action, res=res)) return False return True
def docker_installed(verbose=False): """Check whether Docker has been installed. Args: verbose: Verbose or not. Returns: If Docker has been installed, return True, else False. """ _, stderr = verbose_func.verbose_output(verbose) try: temp_cmd = 'docker version'.split() subprocess.run( temp_cmd, stdout=subprocess.PIPE, stderr=stderr, check=True) color_print.debug('docker already installed') return True except (FileNotFoundError, AttributeError, IndexError, subprocess.CalledProcessError): return False
def remove(args): """Remove an installed cloud native gadget. Args: args.gadget: Name of the specified cloud native gadget. args.verbose: Verbose or not. Returns: None. """ if args.gadget == 'docker': DockerInstaller.uninstall(verbose=args.verbose) color_print.debug( '{gadget} successfully removed'.format(gadget=args.gadget)) if args.gadget == 'k8s': KubernetesInstaller.uninstall(verbose=args.verbose) color_print.debug( '{gadget} successfully removed'.format(gadget=args.gadget)) if args.gadget == 'kata': if KataContainersInstaller.uninstall(verbose=args.verbose): color_print.debug( '{gadget} successfully removed'.format(gadget=args.gadget)) else: color_print.error( 'failed to remove {gadget}'.format(gadget=args.gadget)) if args.gadget == 'kernel': color_print.warning( 'removal of {gadget} is unsupported'.format(gadget=args.gadget))
def _install_by_version_with_download(cls, version, verbose=False): color_print.debug('switching kernel version with downloading packages') stdout, stderr = verbose_func.verbose_output(verbose) try: debs = cls._fetch_package_list_by_version(version, verbose=verbose) if not debs: return # download necessary *.deb and install temp_cmd = copy.copy(cls.cmd_dpkg_install) version_suffix = None for deb in debs: cls.download_file(deb, config.kernel_packages_dir) filename = deb.split('/')[-1] temp_cmd.append( '{prefix}/{filename}'.format(prefix=config.kernel_packages_dir, filename=filename)) if 'linux-image-' in filename: # get full version for further modification in grub try: version_suffix = re.search( r'linux-image-[a-z]*-?([\d].*?)_', filename).group(1) except AttributeError: # failed to derive complete kernel version pass color_print.debug('installing kernel packages') # installation of kernel may return nonzero, currently ignore them subprocess.run(temp_cmd, stdout=stdout, stderr=stderr, check=False) if version_suffix: color_print.debug('kernel version: %s' % version_suffix) cls._modify_grub(version=version_suffix) else: color_print.warning('failed to derive complete kernel version') color_print.warning('please update grub manually') return True except subprocess.CalledProcessError: return False
def _update_k8s_worker_script(cls, mappings, context, verbose=False): color_print.debug('generating kubernetes worker script') final_mappings = { 'gpg_url': mappings.pop('gpg_url'), 'repo_entry': mappings.pop('repo_entry'), 'kubernetes_cni_version': mappings.pop('kubernetes-cni'), 'kubelet_version': mappings.pop('kubelet'), 'kubeadm_version': mappings.pop('kubeadm'), } domestic = mappings.pop('domestic') cmds_pull_images = '' if domestic: for key, value in mappings.items(): if 'pause' in key or 'proxy' in key or context.get( 'cni_plugin') in key: cmds_pull_images += '\ndocker pull {image}\n'.format( image=key) cmds_pull_images += '\ndocker tag {old_name} {new_name}\n'.format( old_name=key, new_name=value) else: for key, value in mappings.items(): if value is None: # official images if 'pause' in key or 'proxy' in key or context.get( 'cni_plugin') in key: cmds_pull_images += '\ndocker pull {image}\n'.format( image=key) final_mappings['cmds_pull_images'] = cmds_pull_images final_mappings['master_ip'] = cls.get_host_ip() token, ca_cert_hash = cls._get_kubeadm_token_and_hash(verbose=verbose) final_mappings['token'], final_mappings[ 'ca_cert_hash'] = token, ca_cert_hash final_mappings['kubeadm_options'] = mappings['kubeadm_options'] with open(config.k8s_worker_template, 'r') as fr: with open(config.k8s_worker_script, 'w') as fw: worker_template = fr.read() data = Template(worker_template) res = data.safe_substitute(final_mappings) fw.write(res) color_print.debug('kubernetes worker script generated at %s' % config.k8s_worker_script)
def _run_kubeadm(cls, k8s_version, context, mappings=None, verbose=False): color_print.debug('running kubeadm') stdout, stderr = verbose_func.verbose_output(verbose) temp_cmd = 'kubeadm init'.split() temp_cmd.append('--kubernetes-version={k8s_version}'.format( k8s_version=k8s_version)) temp_cmd.append(cls._kubeadm_common_options) if mappings: mappings['kubeadm_options'] = cls._kubeadm_common_options pod_network_cidr = context.get('pod_network_cidr', None) if pod_network_cidr: temp_cmd.append( '--pod-network-cidr={cidr}'.format(cidr=pod_network_cidr)) try: subprocess.run(temp_cmd, stdout=stdout, stderr=stderr, check=True, env=context.get('envs', None)) return True except subprocess.CalledProcessError: color_print.error('failed to run kubeadm') return False
def _install_calico(cls, k8s_version, context, mappings=None, verbose=False): # refer to # https://github.com/operator-framework/operator-lifecycle-manager/issues/1818 # calico only work for k8s 1.16+? (it is true in my cluster) # refer to # https://docs.projectcalico.org/getting-started/kubernetes/self-managed-onprem/onpremises color_print.debug('installing calico') for image in config.calico_images: cls._pull_docker_image(image, domestic=context.get('domestic', False), mappings=mappings, verbose=verbose) k8s_version = version.parse(k8s_version) # seems not to work below 1.14 if k8s_version < version.parse('1.14'): cls._create_k8s_resources(config.calico_yaml_below_1_14, verbose=verbose) else: cls._create_k8s_resources(config.calico_yaml_from_1_14, verbose=verbose)