def _prepare_tests(self): # Sets KUBE_TEST_REPO_LIST # Builds tests # Taints linux nodes so that no pods will be scheduled there. kubectl = utils.get_kubectl_bin() out, _ = utils.run_shell_cmd([ kubectl, "get", "nodes", "--selector", "beta.kubernetes.io/os=linux", "--no-headers", "-o", "custom-columns=NAME:.metadata.name" ]) linux_nodes = out.decode("ascii").strip().split("\n") for node in linux_nodes: utils.run_shell_cmd([ kubectl, "taint", "nodes", "--overwrite", node, "node-role.kubernetes.io/master=:NoSchedule" ]) utils.run_shell_cmd([ kubectl, "label", "nodes", "--overwrite", node, "node-role.kubernetes.io/master=NoSchedule" ]) self.logging.info("Downloading repo-list") utils.download_file(self.opts.repo_list, "/tmp/repo-list") os.environ["KUBE_TEST_REPO_LIST"] = "/tmp/repo-list" self.logging.info("Building tests") utils.run_shell_cmd(cmd=["make", 'WHAT="test/e2e/e2e.test"'], cwd=utils.get_k8s_folder()) self.logging.info("Building ginkgo") utils.run_shell_cmd( cmd=["make", 'WHAT="vendor/github.com/onsi/ginkgo/ginkgo"'], cwd=utils.get_k8s_folder()) self._setup_kubetest()
def _prepare_ansible(self): utils.clone_repo(self.opts.ansibleRepo, self.opts.ansibleBranch, self.default_ansible_path) # Creating ansible hosts file linux_master = self._get_linux_vms()[0].get("name") linux_minions = [vm.get("name") for vm in self._get_linux_vms()[1:]] windows_minions = [vm.get("name") for vm in self._get_windows_vms()] hosts_file_content = self.ansible_hosts_template.replace("KUBE_MASTER_PLACEHOLDER", linux_master) hosts_file_content = hosts_file_content.replace("KUBE_MINIONS_LINUX_PLACEHOLDER", "\n".join(linux_minions)) hosts_file_content = hosts_file_content.replace("KUBE_MINIONS_WINDOWS_PLACEHOLDER","\n".join(windows_minions)) self.logging.info("Writing hosts file for ansible inventory.") with open(self.ansible_hosts_path, "w") as f: f.write(hosts_file_content) # Creating hosts_vars for hosts for vm in self._get_windows_vms(): vm_name = vm.get("name") vm_username = self.ansible_windows_admin # TO DO: Have this configurable trough opts vm_pass = openstack.server_get_password(vm_name, self.opts.keyFile) hosts_var_content = self.ansible_host_var_windows_template.replace("USERNAME_PLACEHOLDER", vm_username).replace("PASS_PLACEHOLDER", vm_pass) filepath = os.path.join(self.ansible_host_var_dir, vm_name) with open(filepath, "w") as f: f.write(hosts_var_content) # Populate hosts file with open(OVN_OVS_CI.HOSTS_FILE,"a") as f: for vm in self._get_all_vms(): vm_name = vm.get("name") if vm_name.find("master") > 0: vm_name = vm_name + " kubernetes" hosts_entry=("%s %s\n" % (self._get_vm_fip(vm), vm_name)) self.logging.info("Adding entry %s to hosts file." % hosts_entry) f.write(hosts_entry) # Enable ansible log and set ssh options with open(self.ansible_config_file, "a") as f: log_file = os.path.join(self.opts.log_path, "ansible-deploy.log") log_config = "log_path=%s\n" % log_file # This probably goes better in /etc/ansible.cfg (set in dockerfile ) ansible_config="\n\n[ssh_connection]\nssh_args=-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\n" f.write(log_config) f.write(ansible_config) full_ansible_tmp_path = os.path.join(self.ansible_playbook_root, "tmp") utils.mkdir_p(full_ansible_tmp_path) # Copy kubernetes prebuilt binaries for file in ["kubelet","kubectl","kube-apiserver","kube-controller-manager","kube-scheduler","kube-proxy"]: full_file_path = os.path.join(utils.get_k8s_folder(), constants.KUBERNETES_LINUX_BINS_LOCATION, file) self.logging.info("Copying %s to %s." % (full_file_path, full_ansible_tmp_path)) shutil.copy(full_file_path, full_ansible_tmp_path) for file in ["kubelet.exe", "kubectl.exe", "kube-proxy.exe"]: full_file_path = os.path.join(utils.get_k8s_folder(), constants.KUBERNETES_WINDOWS_BINS_LOCATION, file) self.logging.info("Copying %s to %s." % (full_file_path, full_ansible_tmp_path)) shutil.copy(full_file_path, full_ansible_tmp_path)
def _prepareTests(self): # Sets KUBE_TEST_REPO_LIST # Builds tests # Taints linux nodes so that no pods will be scheduled there. kubectl = os.environ.get("KUBECTL_PATH") if os.environ.get("KUBECTL_PATH") else os.path.join(utils.get_k8s_folder(), "cluster/kubectl.sh") cmd = [kubectl, "get", "nodes","--selector","beta.kubernetes.io/os=linux","--no-headers", "-o", "custom-columns=NAME:.metadata.name"] out, err, ret = utils.run_cmd(cmd, stdout=True, stderr=True) if ret != 0: self.logging.info("Failed to get kubernetes nodes: %s." % err) raise Exception("Failed to get kubernetes nodes: %s." % err) linux_nodes = out.strip().split("\n") for node in linux_nodes: taint_cmd=[kubectl, "taint", "nodes", node, "key=value:NoSchedule"] label_cmd=[kubectl, "label", "nodes", node, "node-role.kubernetes.io/master=NoSchedule"] _, err, ret = utils.run_cmd(taint_cmd, stderr=True) if ret != 0: self.logging.info("Failed to taint node %s with error %s." %(node, err)) raise Exception("Failed to taint node %s with error %s." %(node, err)) _, err, ret = utils.run_cmd(label_cmd, stderr=True) if ret != 0: self.logging.info("Failed to label node %s with error %s." %(node, err)) raise Exception("Failed to label node %s with error %s." %(node, err)) self.logging.info("Downloading repo-list.") utils.download_file(self.opts.repo_list, "/tmp/repo-list") os.environ["KUBE_TEST_REPO_LIST"] = "/tmp/repo-list" self.logging.info("Building tests.") cmd = ["make", 'WHAT="test/e2e/e2e.test"'] _, err, ret = utils.run_cmd(cmd, stderr=True, cwd=utils.get_k8s_folder()) if ret != 0: self.logging.error("Failed to build k8s test binaries with error: %s" % err) raise Exception("Failed to build k8s test binaries with error: %s" % err) self.logging.info("Building ginkgo") cmd = ["make", 'WHAT="vendor/github.com/onsi/ginkgo/ginkgo"'] _, err, ret = utils.run_cmd(cmd, stderr=True, cwd=utils.get_k8s_folder()) if ret != 0: self.logging.error("Failed to build k8s ginkgo binaries with error: %s" % err) raise Exception("Failed to build k8s ginkgo binaries with error: %s" % err) self.logging.info("Get Kubetest") cmd = ["go", "get", "-u", "k8s.io/test-infra/kubetest"] _, err, ret = utils.run_cmd(cmd, stderr=True) if ret != 0: self.logging.error("Failed to get kubetest binary with error: %s" % err) raise Exception("Failed to get kubetest binary with errorr: %s" % err)
def _run_tests(self): # Invokes kubetest self.logging.info("Running tests on env.") cmd = ["kubetest"] cmd.append("--check-version-skew=false") cmd.append("--ginkgo-parallel=%s" % self.opts.parallel_test_nodes) cmd.append("--verbose-commands=true") cmd.append("--provider=skeleton") cmd.append("--test") cmd.append("--dump=%s" % self.opts.log_path) cmd.append( ('--test_args=--ginkgo.flakeAttempts=1 ' '--num-nodes=2 --ginkgo.noColor ' '--ginkgo.dryRun=%(dryRun)s ' '--node-os-distro=windows ' '--ginkgo.focus=%(focus)s ' '--ginkgo.skip=%(skip)s') % { "dryRun": self.opts.test_dry_run, "focus": self.opts.test_focus_regex, "skip": self.opts.test_skip_regex }) docker_config_file = os.environ.get("DOCKER_CONFIG_FILE") if docker_config_file: cmd.append(' --docker-config-file=%s' % docker_config_file) return subprocess.call(cmd, cwd=utils.get_k8s_folder())
def _prepare_test_env(self): self.logging.info("Preparing test env") utils.clone_git_repo( self.opts.k8s_repo, self.opts.k8s_branch, utils.get_k8s_folder()) os.environ["KUBE_MASTER"] = "local" os.environ["KUBE_MASTER_IP"] = self.deployer.master_public_address os.environ["KUBE_MASTER_URL"] = "https://%s:%s" % ( self.deployer.master_public_address, self.deployer.master_public_port) self._setup_kubeconfig() if self.opts.container_runtime == "docker": self._prepull_images()
def _runTests(self): # invokes kubetest self.logging.info("Running tests on env.") cmd = ["kubetest"] cmd.append("--ginkgo-parallel=%s" % self.opts.parallel_test_nodes) cmd.append("--verbose-commands=true") cmd.append("--provider=skeleton") cmd.append("--test") cmd.append("--dump=%s" % self.opts.log_path) cmd.append('--test_args=--ginkgo.flakeAttempts=1 --num-nodes=2 --ginkgo.noColor --ginkgo.dryRun=%(dryRun)s --node-os-distro=windows --ginkgo.focus=%(focus)s --ginkgo.skip=%(skip)s' % { "dryRun": self.opts.test_dry_run, "focus": self.opts.test_focus_regex, "skip": self.opts.test_skip_regex }) return subprocess.call(cmd, cwd=utils.get_k8s_folder())
def _build_k8s_artifacts(self): k8s_path = utils.get_k8s_folder() utils.clone_git_repo( self.opts.k8s_repo, self.opts.k8s_branch, k8s_path) self.logging.info("Building K8s Linux binaries") cmd = [ 'make', 'WHAT="cmd/kubectl cmd/kubelet cmd/kubeadm"', 'KUBE_BUILD_PLATFORMS="linux/amd64"' ] utils.run_shell_cmd(cmd, k8s_path) del os.environ["KUBECTL_PATH"] self.logging.info("Building K8s Windows binaries") cmd = [ 'make', 'WHAT="cmd/kubectl cmd/kubelet cmd/kubeadm cmd/kube-proxy"', 'KUBE_BUILD_PLATFORMS="windows/amd64"' ] utils.run_shell_cmd(cmd, k8s_path) self.logging.info("Building K8s Linux DaemonSet container images") cmd = ['make', 'quick-release-images'] env = {"KUBE_FASTBUILD": "true", "KUBE_BUILD_CONFORMANCE": "n"} utils.retry_on_error()(utils.run_shell_cmd)(cmd, k8s_path, env) kubeadm_bin = os.path.join(constants.KUBERNETES_LINUX_BINS_LOCATION, 'kubeadm') out, _ = utils.run_shell_cmd( [kubeadm_bin, "version", "-o=short"], k8s_path) self.ci_version = out.decode().strip() self.deployer.ci_version = self.ci_version ci_artifacts_linux_bin_dir = "%s/%s/bin/linux/amd64" % ( self.ci_artifacts_dir, self.ci_version) ci_artifacts_windows_bin_dir = "%s/%s/bin/windows/amd64" % ( self.ci_artifacts_dir, self.ci_version) ci_artifacts_images_dir = "%s/%s/images" % ( self.ci_artifacts_dir, self.ci_version) os.makedirs(ci_artifacts_linux_bin_dir, exist_ok=True) os.makedirs(ci_artifacts_windows_bin_dir, exist_ok=True) os.makedirs(ci_artifacts_images_dir, exist_ok=True) for bin_name in ["kubectl", "kubelet", "kubeadm"]: linux_bin_path = "%s/%s/%s" % ( k8s_path, constants.KUBERNETES_LINUX_BINS_LOCATION, bin_name) shutil.copy(linux_bin_path, ci_artifacts_linux_bin_dir) for bin_name in ["kubectl", "kubelet", "kubeadm", "kube-proxy"]: win_bin_path = "%s/%s/%s.exe" % ( k8s_path, constants.KUBERNETES_WINDOWS_BINS_LOCATION, bin_name) shutil.copy(win_bin_path, ci_artifacts_windows_bin_dir) images_names = [ "kube-apiserver.tar", "kube-controller-manager.tar", "kube-proxy.tar", "kube-scheduler.tar" ] for image_name in images_names: image_path = "%s/%s/%s" % ( k8s_path, constants.KUBERNETES_IMAGES_LOCATION, image_name) shutil.copy(image_path, ci_artifacts_images_dir)
def _build_k8s_binaries(self): k8s_path = utils.get_k8s_folder() utils.clone_repo(self.opts.k8s_repo, self.opts.k8s_branch, k8s_path) utils.build_k8s_binaries()
def _prepare_ansible(self): utils.clone_repo(self.opts.ansibleRepo, self.opts.ansibleBranch, self.default_ansible_path) # Creating ansible hosts file linux_master_hostname = self.deployer.get_cluster_master_vm_name() windows_minions_hostnames = self.deployer.get_cluster_win_minion_vms_names() hosts_file_content = self.ansible_hosts_template.replace("KUBE_MASTER_PLACEHOLDER", linux_master_hostname) hosts_file_content = hosts_file_content.replace("KUBE_MINIONS_WINDOWS_PLACEHOLDER","\n".join(windows_minions_hostnames)) self.logging.info("Writing hosts file for ansible inventory.") with open(self.ansible_hosts_path, "w") as f: f.write(hosts_file_content) # This proliferation of args should be set to cli to ansible when called win_hosts_extra_vars = "\nCONTAINER_RUNTIME: \"%s\"" % self.opts.containerRuntime if self.opts.containerRuntime == "containerd": win_hosts_extra_vars += "\nCNIBINS: \"sdnms\"" # Creating hosts_vars for hosts for vm_name in windows_minions_hostnames: vm_username = self.deployer.get_win_vm_username(vm_name) # TO DO: Have this configurable trough opts vm_pass = self.deployer.get_win_vm_password(vm_name) hosts_var_content = self.ansible_host_var_windows_template.replace("USERNAME_PLACEHOLDER", vm_username).replace("PASS_PLACEHOLDER", vm_pass) filepath = os.path.join(self.ansible_host_var_dir, vm_name) with open(filepath, "w") as f: f.write(hosts_var_content) f.write(win_hosts_extra_vars) # Enable ansible log and set ssh options with open(self.ansible_config_file, "a") as f: log_file = os.path.join(self.opts.log_path, "ansible-deploy.log") log_config = "log_path=%s\n" % log_file # This probably goes better in /etc/ansible.cfg (set in dockerfile ) ansible_config="\n\n[ssh_connection]\nssh_args=-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\n" f.write(log_config) f.write(ansible_config) full_ansible_tmp_path = os.path.join(self.ansible_playbook_root, "tmp") utils.mkdir_p(full_ansible_tmp_path) # Copy kubernetes prebuilt binaries for file in ["kubelet","kubectl","kube-apiserver","kube-controller-manager","kube-scheduler","kube-proxy"]: full_file_path = os.path.join(utils.get_k8s_folder(), constants.KUBERNETES_LINUX_BINS_LOCATION, file) self.logging.info("Copying %s to %s." % (full_file_path, full_ansible_tmp_path)) shutil.copy(full_file_path, full_ansible_tmp_path) for file in ["kubelet.exe", "kubectl.exe", "kube-proxy.exe"]: full_file_path = os.path.join(utils.get_k8s_folder(), constants.KUBERNETES_WINDOWS_BINS_LOCATION, file) self.logging.info("Copying %s to %s." % (full_file_path, full_ansible_tmp_path)) shutil.copy(full_file_path, full_ansible_tmp_path) azure_ccm = "false" # Generate azure.json if needed and populate group vars with necessary paths if self.opts.flannelMode == Terraform_Flannel.FLANNEL_MODE_L2BRIDGE: self._generate_azure_config() azure_ccm = "true" # Set flannel mode in group vars with open(self.ansible_group_vars_file, "a") as f: f.write("FLANNEL_MODE: %s\n" % self.opts.flannelMode) f.write("AZURE_CCM: %s\n" % azure_ccm) f.write("AZURE_CCM_LOCAL_PATH: %s\n" % Terraform_Flannel.AZURE_CCM_LOCAL_PATH)