def download_from_bootstrap_vm(self, remote_path, local_path): ssh_cmd = ("ssh -q -i {} " "-o StrictHostKeyChecking=no " "-o UserKnownHostsFile=/dev/null".format( os.environ["SSH_KEY"])) utils.run_shell_cmd([ "rsync", "-a", "-e", '"{}"'.format(ssh_cmd), "--delete", "capi@{}:{}".format(self.bootstrap_vm_public_ip, remote_path), local_path ])
def _prepare_tests(self): # Sets KUBE_TEST_REPO_LIST # Builds tests # Taints linux nodes so that no pods will be scheduled there. kubectl = utils.get_kubectl_bin() out, _ = utils.run_shell_cmd([ kubectl, "get", "nodes", "--selector", "beta.kubernetes.io/os=linux", "--no-headers", "-o", "custom-columns=NAME:.metadata.name" ]) linux_nodes = out.decode().strip().split("\n") for node in linux_nodes: utils.run_shell_cmd([ kubectl, "taint", "nodes", "--overwrite", node, "node-role.kubernetes.io/master=:NoSchedule" ]) utils.run_shell_cmd([ kubectl, "label", "nodes", "--overwrite", node, "node-role.kubernetes.io/master=NoSchedule" ]) self.logging.info("Downloading repo-list") utils.download_file(self.opts.repo_list, "/tmp/repo-list") os.environ["KUBE_TEST_REPO_LIST"] = "/tmp/repo-list" self.logging.info("Building tests") utils.run_shell_cmd( cmd=["make", 'WHAT="test/e2e/e2e.test"'], cwd=utils.get_k8s_folder()) self.logging.info("Building ginkgo") utils.run_shell_cmd( cmd=["make", 'WHAT="vendor/github.com/onsi/ginkgo/ginkgo"'], cwd=utils.get_k8s_folder()) self._setup_kubetest()
def _setup_kubetest(self): self.logging.info("Setup Kubetest") if self.opts.kubetest_link: kubetestbin = "/usr/bin/kubetest" utils.download_file(self.opts.kubetest_link, kubetestbin) os.chmod(kubetestbin, stat.S_IRWXU | stat.S_IRWXG) return # Clone repository using git and then install. Workaround for: # https://github.com/kubernetes/test-infra/issues/14712 utils.clone_git_repo( "https://github.com/kubernetes/test-infra", "master", "/tmp/test-infra") utils.run_shell_cmd( cmd=["go", "install", "./kubetest"], cwd="/tmp/test-infra", env={"GO111MODULE": "on"})
def _setup_capz_components(self): self.logging.info("Setup the Azure Cluster API components") utils.run_shell_cmd([ "clusterctl", "init", "--kubeconfig", self.mgmt_kubeconfig_path, "--core", ("cluster-api:%s" % constants.CAPI_VERSION), "--bootstrap", ("kubeadm:%s" % constants.CAPI_VERSION), "--control-plane", ("kubeadm:%s" % constants.CAPI_VERSION), "--infrastructure", ("azure:%s" % constants.CAPZ_PROVIDER_VERSION) ]) self.logging.info("Wait for the deployments to be available") utils.run_shell_cmd([ self.kubectl, "wait", "--kubeconfig", self.mgmt_kubeconfig_path, "--for=condition=Available", "--timeout", "5m", "deployments", "--all", "--all-namespaces" ])
def _get_mgmt_capz_machines_names(self): cmd = [ self.kubectl, "get", "machine", "--kubeconfig", self.mgmt_kubeconfig_path, "--output=custom-columns=NAME:.metadata.name", "--no-headers" ] output, _ = utils.run_shell_cmd(cmd, sensitive=True) return output.decode().strip().split('\n')
def _get_mgmt_capz_machine_node(self, machine_name): cmd = [ self.kubectl, "get", "machine", "--kubeconfig", self.mgmt_kubeconfig_path, "--output=custom-columns=NODE_NAME:.status.nodeRef.name", "--no-headers", machine_name ] output, _ = utils.run_shell_cmd(cmd, sensitive=True) return output.decode().strip()
def _get_capz_nodes(self): cmd = [ self.kubectl, "get", "nodes", "--kubeconfig", self.capz_kubeconfig_path, "-l", "!node-role.kubernetes.io/control-plane", "--output=custom-columns=NAME:.metadata.name", "--no-headers" ] output, _ = utils.run_shell_cmd(cmd, sensitive=True) return output.decode().strip().split('\n')
def _get_capz_node_status(self, node_name): cmd = [ self.kubectl, "get", "node", "--output", "yaml", "--kubeconfig", self.capz_kubeconfig_path, node_name ] output, _ = utils.run_shell_cmd(cmd, sensitive=True) node = yaml.safe_load(output.decode()) if "status" not in node: return None return node["status"]
def _setup_capz_kubeconfig(self): self.logging.info("Setting up CAPZ kubeconfig") cmd = [ self.kubectl, "get", "--kubeconfig", self.mgmt_kubeconfig_path, "secret/%s-kubeconfig" % self.cluster_name, "--output=custom-columns=KUBECONFIG_B64:.data.value", "--no-headers" ] output, _ = utils.run_shell_cmd(cmd) with open(self.capz_kubeconfig_path, 'w') as f: f.write(base64.b64decode(output).decode())
def _prepare_tests(self): kubectl = utils.get_kubectl_bin() out, _ = utils.run_shell_cmd([ kubectl, "get", "nodes", "--selector", "beta.kubernetes.io/os=linux", "--no-headers", "-o", "custom-columns=NAME:.metadata.name" ]) linux_nodes = out.decode().strip().split("\n") for node in linux_nodes: utils.run_shell_cmd([ kubectl, "taint", "nodes", "--overwrite", node, "node-role.kubernetes.io/master=:NoSchedule" ]) utils.run_shell_cmd([ kubectl, "label", "nodes", "--overwrite", node, "node-role.kubernetes.io/master=NoSchedule" ]) self.logging.info("Downloading repo-list") utils.download_file(self.opts.repo_list, "/tmp/repo-list") os.environ["KUBE_TEST_REPO_LIST"] = "/tmp/repo-list"
def _prepull_images(self, timeout=3600): prepull_yaml_path = "/tmp/prepull-windows-images.yaml" utils.download_file(self.opts.prepull_yaml, prepull_yaml_path) self.logging.info("Starting Windows images pre-pull") utils.retry_on_error()(utils.run_shell_cmd)( [self.kubectl, "apply", "-f", prepull_yaml_path]) self.logging.info( "Waiting up to %.2f minutes to pre-pull Windows container images", timeout / 60.0) cmd = [self.kubectl, "get", "-o", "yaml", "-f", prepull_yaml_path] for attempt in Retrying(stop=stop_after_delay(timeout), wait=wait_exponential(max=30), retry=retry_if_exception_type(AssertionError), reraise=True): with attempt: output, _ = utils.run_shell_cmd(cmd, sensitive=True) ds = yaml.safe_load(output.decode()) ready_nr = ds["status"]["numberReady"] desired_ready_nr = ds["status"]["desiredNumberScheduled"] assert ready_nr == desired_ready_nr self.logging.info("Windows images successfully pre-pulled") self.logging.info("Cleaning up") utils.retry_on_error()(utils.run_shell_cmd)( [self.kubectl, "delete", "--wait", "-f", prepull_yaml_path])
def upload_to_k8s_node(self, local_path, remote_path, node_address): cmd = [ "scp", "-r", local_path, "{}:{}".format(node_address, remote_path) ] utils.run_shell_cmd(cmd, timeout=600)
def download_from_k8s_node(self, remote_path, local_path, node_address): cmd = [ "scp", "-r", "{}:{}".format(node_address, remote_path), local_path ] utils.run_shell_cmd(cmd, timeout=600)
def run_cmd_on_k8s_node(self, cmd, node_address): cmd = ["ssh", node_address, "'{}'".format(cmd)] return utils.run_shell_cmd(cmd, timeout=600)
def _build_k8s_artifacts(self): local_k8s_path = utils.get_k8s_folder() remote_k8s_path = self.deployer.remote_k8s_path self.deployer.remote_clone_git_repo(self.opts.k8s_repo, self.opts.k8s_branch, remote_k8s_path) self.logging.info("Building K8s Linux binaries") cmd = ('make ' 'WHAT="cmd/kubectl cmd/kubelet cmd/kubeadm" ' 'KUBE_BUILD_PLATFORMS="linux/amd64"') self.deployer.run_cmd_on_bootstrap_vm([cmd], cwd=remote_k8s_path) del os.environ["KUBECTL_PATH"] self.logging.info("Building K8s Windows binaries") cmd = ('make ' 'WHAT="cmd/kubectl cmd/kubelet cmd/kubeadm cmd/kube-proxy" ' 'KUBE_BUILD_PLATFORMS="windows/amd64"') self.deployer.run_cmd_on_bootstrap_vm([cmd], cwd=remote_k8s_path) os.makedirs(local_k8s_path, exist_ok=True) self.deployer.download_from_bootstrap_vm("{}/".format(remote_k8s_path), local_k8s_path) self.logging.info("Building K8s Linux DaemonSet container images") cmd = ("KUBE_FASTBUILD=true KUBE_BUILD_CONFORMANCE=n make " "quick-release-images") self.deployer.run_cmd_on_bootstrap_vm([cmd], cwd=remote_k8s_path) kubeadm_bin = os.path.join(constants.KUBERNETES_LINUX_BINS_LOCATION, 'kubeadm') out, _ = utils.run_shell_cmd([kubeadm_bin, "version", "-o=short"], local_k8s_path) self.ci_version = out.decode().strip() self.deployer.ci_version = self.ci_version self.logging.info("Copying binaries to remote artifacts directory") linux_bin_dir = "%s/%s/bin/linux/amd64" % ( self.deployer.remote_artifacts_dir, self.ci_version) windows_bin_dir = "%s/%s/bin/windows/amd64" % ( self.deployer.remote_artifacts_dir, self.ci_version) images_dir = "%s/%s/images" % (self.deployer.remote_artifacts_dir, self.ci_version) script = [ "mkdir -p {0} {1} {2}".format(linux_bin_dir, windows_bin_dir, images_dir) ] for bin_name in ["kubectl", "kubelet", "kubeadm"]: linux_bin_path = "%s/%s/%s" % ( remote_k8s_path, constants.KUBERNETES_LINUX_BINS_LOCATION, bin_name) script.append("cp {0} {1}".format(linux_bin_path, linux_bin_dir)) for bin_name in ["kubectl", "kubelet", "kubeadm", "kube-proxy"]: win_bin_path = "%s/%s/%s.exe" % ( remote_k8s_path, constants.KUBERNETES_WINDOWS_BINS_LOCATION, bin_name) script.append("cp {0} {1}".format(win_bin_path, windows_bin_dir)) images_names = [ "kube-apiserver.tar", "kube-controller-manager.tar", "kube-proxy.tar", "kube-scheduler.tar" ] for image_name in images_names: image_path = "%s/%s/%s" % (remote_k8s_path, constants.KUBERNETES_IMAGES_LOCATION, image_name) script.append("cp {0} {1}".format(image_path, images_dir)) script.append("chmod 644 {0}/*".format(images_dir)) self.deployer.run_cmd_on_bootstrap_vm(script) self._setup_e2e_tests() self._setup_kubetest()