Пример #1
0
    def _prepare_tests(self):
        # Sets KUBE_TEST_REPO_LIST
        # Builds tests
        # Taints linux nodes so that no pods will be scheduled there.
        kubectl = utils.get_kubectl_bin()
        out, _ = utils.run_shell_cmd([
            kubectl, "get", "nodes", "--selector",
            "beta.kubernetes.io/os=linux", "--no-headers", "-o",
            "custom-columns=NAME:.metadata.name"
        ])
        linux_nodes = out.decode("ascii").strip().split("\n")
        for node in linux_nodes:
            utils.run_shell_cmd([
                kubectl, "taint", "nodes", "--overwrite", node,
                "node-role.kubernetes.io/master=:NoSchedule"
            ])
            utils.run_shell_cmd([
                kubectl, "label", "nodes", "--overwrite", node,
                "node-role.kubernetes.io/master=NoSchedule"
            ])

        self.logging.info("Downloading repo-list")
        utils.download_file(self.opts.repo_list, "/tmp/repo-list")
        os.environ["KUBE_TEST_REPO_LIST"] = "/tmp/repo-list"

        self.logging.info("Building tests")
        utils.run_shell_cmd(cmd=["make", 'WHAT="test/e2e/e2e.test"'],
                            cwd=utils.get_k8s_folder())

        self.logging.info("Building ginkgo")
        utils.run_shell_cmd(
            cmd=["make", 'WHAT="vendor/github.com/onsi/ginkgo/ginkgo"'],
            cwd=utils.get_k8s_folder())

        self._setup_kubetest()
Пример #2
0
 def upload_to_k8s_node(self,
                        local_path,
                        remote_path,
                        node_address,
                        timeout="10m"):
     self.logging.info("Uploading %s to %s on node %s", local_path,
                       remote_path, node_address)
     utils.run_shell_cmd([
         "timeout", timeout, "scp", "-r", local_path,
         "%s:%s" % (node_address, remote_path)
     ])
Пример #3
0
 def download_from_k8s_node(self,
                            remote_path,
                            local_path,
                            node_address,
                            timeout="10m"):
     self.logging.info("Downloading %s to %s from node %s", remote_path,
                       local_path, node_address)
     utils.run_shell_cmd([
         "timeout", timeout, "scp", "-r",
         "%s:%s" % (node_address, remote_path), local_path
     ])
Пример #4
0
    def _setup_kubetest(self):
        self.logging.info("Setup Kubetest")

        if self.opts.kubetest_link != "":
            kubetestbin = "/usr/bin/kubetest"
            utils.download_file(self.opts.kubetest_link, kubetestbin)
            os.chmod(kubetestbin, stat.S_IRWXU | stat.S_IRWXG)
            return

        # Clone repository using git and then install. Workaround for:
        # https://github.com/kubernetes/test-infra/issues/14712
        utils.clone_repo("https://github.com/kubernetes/test-infra", "master",
                         "/tmp/test-infra")
        utils.run_shell_cmd(cmd=["go", "install", "./kubetest"],
                            cwd="/tmp/test-infra",
                            env={"GO111MODULE": "on"})
Пример #5
0
    def _build_sdn_cni_binaries(self):
        sdn_cni_dir = utils.get_sdn_folder()
        utils.clone_git_repo(
            self.opts.sdn_repo, self.opts.sdn_branch, sdn_cni_dir)

        self.logging.info("Building the SDN CNI binaries")
        utils.run_shell_cmd(["GOOS=windows", "make", "all"], sdn_cni_dir)

        self.logging.info("Copying binaries to local artifacts directory")
        ci_artifacts_cni_dir = os.path.join(self.ci_artifacts_dir, "cni")
        os.makedirs(ci_artifacts_cni_dir, exist_ok=True)

        sdn_binaries_names = ["nat.exe", "sdnbridge.exe", "sdnoverlay.exe"]
        for sdn_bin_name in sdn_binaries_names:
            sdn_bin = os.path.join(sdn_cni_dir, "out", sdn_bin_name)
            shutil.copy(sdn_bin, ci_artifacts_cni_dir)
Пример #6
0
def main():
    if os.path.exists(top_repos_filename):
        print("Reusing existing github seach.")
        return top_repos_filename

    if not os.path.exists(DATA_DIR):
        os.makedirs(DATA_DIR)

    url = "https://api.github.com/search/repositories?" \
          "q={}&" \
          "sort=stars&" \
          "order=desc&" \
          "per_page=100".format(_query)

    cmd = "curl \"{}\" -o {}".format(url, top_repos_filename)
    utils.run_shell_cmd(cmd)
    return top_repos_filename
Пример #7
0
def run_pipeline():
    """ Run a full smCounter pipeline on the test readset 
    """

    cmd = (
        """ python /srv/qgen/code/qiaseq-dna/run_qiaseq_dna.py run_sm_counter_v1.params.txt v1 """
        """ single NEB_S2 > run.log 2>&1 """)
    return_code = utils.run_shell_cmd(cmd)
    assert return_code == 0, """ smCounter pipeline failed !\n
Пример #8
0
    def down(self):
        self.logging.info("Deleting kind cluster")
        utils.run_shell_cmd(
            ["kind", "delete", "cluster", "--name", self.kind_cluster_name])

        self.logging.info("Deleting Azure resource group")
        client = self.resource_mgmt_client
        try:
            delete_async_operation = client.resource_groups.delete(
                self.cluster_name)
            delete_async_operation.wait()
        except msrestazure.azure_exceptions.CloudError as e:
            cloud_error_data = e.error
            if cloud_error_data.error == "ResourceGroupNotFound":
                self.logging.warning("Resource group %s does not exist",
                                     self.cluster_name)
            else:
                raise e
Пример #9
0
    def run_shell_cmd(cmd):
        """
        Takes a command and executes it with the help of Popen.

        :param cmd: the command to execute in form of a list
        :return: rc, status code of the process
        """

        return utils.run_shell_cmd(cmd)
 def teardown(cmd):
     '''
     Clean-up after tests
     :param cmd:
     :return:
     '''
     output, err = run_shell_cmd(cmd)
     if err:
         exit(1)
     return
    def setup(cmd):
        '''
        Environment setup before running any tests

        :param cmd:
        :return:
        '''
        output, err = run_shell_cmd(cmd)
        if err:
	    logging.error("Shell command returns error, exiting... :" + err)
            exit(1)
Пример #12
0
    def _create_kind_cluster(self):
        self.logging.info("Create Kind management cluster")
        kind_config_file = os.path.join(os.getcwd(),
                                        "cluster-api/kind-config.yaml")
        kind_node_image = (os.environ.get("KIND_NODE_IMAGE")
                           or "e2eteam/kind-node:v1.18.4")
        utils.run_shell_cmd([
            "kind", "create", "cluster", "--config", kind_config_file,
            "--kubeconfig", self.kind_kubeconfig_path, "--image",
            kind_node_image, "--wait", "15m", "--name", self.kind_cluster_name
        ])

        self.logging.info("Add the Azure cluster api components")
        capi_config = os.path.join(os.getcwd(),
                                   "cluster-api/azure/config.yaml")
        capi_version = "v0.3.8"
        capz_provider_version = "v0.4.6"
        utils.run_shell_cmd([
            "clusterctl", "init", "--kubeconfig", self.kind_kubeconfig_path,
            "--core", ("cluster-api:%s" % capi_version), "--bootstrap",
            ("kubeadm:%s" % capi_version), "--control-plane",
            ("kubeadm:%s" % capi_version), "--infrastructure",
            ("azure:%s" % capz_provider_version), "--config", capi_config
        ])

        self.logging.info("Wait for the deployments to be available")
        utils.run_shell_cmd([
            self.kubectl, "wait", "--kubeconfig", self.kind_kubeconfig_path,
            "--for=condition=Available", "--timeout", "5m", "deployments",
            "--all", "--all-namespaces"
        ])
Пример #13
0
    def _build_containerd_shim(self):
        fromVendor = False
        if self.opts.containerd_shim_repo is None:
            fromVendor = True

        containerd_shim_path = utils.get_containerd_shim_folder(fromVendor)

        if fromVendor:
            utils.run_shell_cmd(["go", "get", "github.com/LK4D4/vndr"])

            cmd = ["vndr", "-whitelist", "hcsshim",
                   "github.com/Microsoft/hcsshim"]
            vendoring_path = utils.get_containerd_folder()
            utils.run_shell_cmd(cmd, vendoring_path)
        else:
            utils.clone_git_repo(self.opts.containerd_shim_repo,
                                 self.opts.containerd_shim_branch,
                                 containerd_shim_path)

        self.logging.info("Building containerd shim")
        cmd = [
            "GOOS=windows", "go", "build", "-o", constants.CONTAINERD_SHIM_BIN,
            constants.CONTAINERD_SHIM_DIR
        ]
        utils.run_shell_cmd(cmd, containerd_shim_path)

        self.logging.info("Copying binaries to local artifacts directory")
        ci_artifacts_containerd_bin_dir = os.path.join(
            self.ci_artifacts_dir, "containerd/bin")
        os.makedirs(ci_artifacts_containerd_bin_dir, exist_ok=True)

        containerd_shim_bin = os.path.join(
            containerd_shim_path, constants.CONTAINERD_SHIM_BIN)
        shutil.copy(containerd_shim_bin, ci_artifacts_containerd_bin_dir)
Пример #14
0
def filter_commit_single_scala_file(repo, commit):
    # check that changes contains only a single scala file
    cmd = "git --git-dir={} show {} --name-only".format(repo, commit)
    out = utils.run_shell_cmd(cmd)
    files = [l.strip()
             for l in out.split('\n')[1:]
             if len(l.strip()) > 0]  # skip header & empty
    scala_files = []
    for f in files:
        _, ext = os.path.splitext(f)
        if ext.lower() == '.scala':
            scala_files.append(f)
    return len(scala_files) == 1
Пример #15
0
    def __del__(self):
        """
        Destructor is responsible for cleaning up all artifacts. This covers unmounting and deleting the mountpoints.
        The mount_stub remains unchanged.
        """

        if self.is_mounted:
            for d in self.volume_mount_paths:
                logger.info(f"Cleaning up {d}")
                # Unmount and clean up
                utils.run_shell_cmd(["sudo", "umount", d])
                utils.run_shell_cmd(["sudo", "rm", "-r", d])

            logger.info(f"Cleaning up {self.mount_path}")
            utils.run_shell_cmd(["sudo", "umount", self.mount_path])
            utils.run_shell_cmd(["sudo", "rm", "-r", self.mount_path])
Пример #16
0
    def _prepull_images(self, timeout=3600):
        prepull_yaml_path = "/tmp/prepull-windows-images.yaml"
        utils.download_file(self.opts.prepull_yaml, prepull_yaml_path)

        self.logging.info("Starting Windows images pre-pull")
        utils.retry_on_error()(utils.run_shell_cmd)(
            [self.kubectl, "apply", "-f", prepull_yaml_path])

        self.logging.info(
            "Waiting up to %.2f minutes to pre-pull Windows container images",
            timeout / 60.0)

        sleep_time = 5
        start = time.time()
        cmd = [self.kubectl, "get", "-o", "yaml", "-f", prepull_yaml_path]
        while True:
            elapsed = time.time() - start
            if elapsed > timeout:
                raise Exception("Couldn't pre-pull Windows images within "
                                "%.2f minutes." % (timeout / 60.0))

            output, _ = utils.retry_on_error()(
                utils.run_shell_cmd)(cmd, sensitive=True)
            prepull_daemonset = yaml.safe_load(output.decode("ascii"))

            if (prepull_daemonset["status"]["numberReady"] ==
                    prepull_daemonset["status"]["desiredNumberScheduled"]):
                break

            time.sleep(sleep_time)

        self.logging.info("Windows images pre-pulled in %.2f minutes",
                          (time.time() - start) / 60.0)

        self.logging.info("Cleaning up")
        utils.run_shell_cmd(
            [self.kubectl, "delete", "--wait", "-f", prepull_yaml_path])
Пример #17
0
    def _collect_logs(self, daemonset_yaml, script_url, operating_system):
        if "KUBECONFIG" not in os.environ:
            self.logging.info(("Skipping collection of %s logs, because "
                               "KUBECONFIG is not set."), operating_system)
            return

        self.logging.info("Collecting %s logs.", operating_system)
        daemonset_name = "collect-logs-%s" % operating_system

        utils.mkdir_p("/tmp/collect-logs")
        daemonset_yaml_file = "/tmp/collect-logs/collect-logs-%s.yaml" % (
            operating_system)
        utils.download_file(daemonset_yaml, daemonset_yaml_file)
        utils.sed_inplace(daemonset_yaml_file, "{{SCRIPT_URL}}", script_url)

        kubectl = utils.get_kubectl_bin()

        utils.run_shell_cmd([kubectl, "create", "-f", daemonset_yaml_file])
        out, _ = utils.run_shell_cmd([
            kubectl, "get", "pods",
            "--selector=name=%s" % daemonset_name,
            "--output=custom-columns=NAME:.metadata.name", "--no-headers"
        ])

        log_pods = out.decode('ascii').strip().splitlines()
        for pod in log_pods:
            if not utils.wait_for_ready_pod(pod):
                self.logging.warning(
                    "Timed out waiting for pod to be ready: %s", pod)
                continue

            out, _ = utils.run_shell_cmd([
                kubectl, "get", "pod", pod,
                "--output=custom-columns=NODE:.spec.nodeName", "--no-headers"
            ])
            vm_name = out.decode('ascii').strip()

            self.logging.info("Copying logs from: %s", vm_name)

            logs_vm_path = os.path.join(self.opts.log_path, "%s.zip" % vm_name)

            if operating_system == "linux":
                src_path = "%s:/tmp/k8s-logs.tar.gz" % pod
            else:
                src_path = "%s:k/logs.zip" % pod

            utils.run_shell_cmd([kubectl, "cp", src_path, logs_vm_path])

        if not utils.daemonset_cleanup(daemonset_yaml_file, daemonset_name):
            self.logging.error("Timed out waiting for daemonset cleanup: %s",
                               daemonset_name)
            raise Exception("Timed out waiting for daemonset cleanup: %s" %
                            daemonset_name)

        self.logging.info("Finished collecting %s logs.", operating_system)
Пример #18
0
def filter_commit_continuous_single_edit(repo, commit):
    cmd = "git --git-dir={} show {}".format(repo, commit)
    changes = utils.run_shell_cmd(cmd).split('\n')[1:]  # ignore header
    last_head = ' '
    switches = 0
    for line in changes:
        if len(line) < 1:
            continue
        if line.startswith("---"):
            continue
        if line.startswith("+++"):
            continue
        if line[0] not in {' ', '+', '-'}:
            continue
        if line[0] != last_head:
            switches += 1
        last_head = line[0]
    return switches == 3  # (\s -> + -> - -> \s) or (\s -> - -> + -> \s)
Пример #19
0
    def _wait_for_ready_cni(self, timeout=900):
        self.logging.info(
            "Waiting up to %.2f minutes for ready CNI on the Windows agents",
            timeout / 60.0)

        win_node_addresses = self.deployer.windows_private_addresses
        local_script_path = os.path.join(
            os.getcwd(), "cluster-api/scripts/confirm-ready-cni.ps1")
        remote_script_path = "/tmp/confirm-ready-cni.ps1"

        self._upload_to(
            local_script_path, remote_script_path, win_node_addresses)

        sleep_time = 10
        start = time.time()
        while True:
            elapsed = time.time() - start
            if elapsed > timeout:
                err_msg = "The CNI was not ready within %s minutes." % (
                    timeout / 60.0)
                self.logging.error(err_msg)
                raise Exception(err_msg)

            all_ready = True
            for node_address in win_node_addresses:
                cmd = ["timeout", "1m", "ssh", node_address,
                       remote_script_path]
                try:
                    stdout, _ = utils.run_shell_cmd(cmd, sensitive=True)
                except Exception:
                    all_ready = False
                    break

                cni_ready = strtobool(stdout.decode('ascii').strip())
                if not cni_ready:
                    all_ready = False
                    break

            if all_ready:
                self.logging.info(
                    "The CNI is ready on all the Windows agents")
                break

            time.sleep(sleep_time)
    def perf_run(self, num_of_time, average_results=True):
         self.query_output.append('Performance Query Results:')
         for f in os.listdir(self.query_generator.query_dir):
             if os.path.isfile(os.path.join(self.query_generator.query_dir,f)):
                iterations = 0
                elapse = 0
                self.query_output.append('Running ' + f)
                for i in range(1, num_of_time):

                    run = self.perf_cmd + self.cmd + os.path.join(self.query_generator.query_dir,f) + self.cmd_output
                    output, err = run_shell_cmd(run)
                    if not err:
                        iterations += 1
                        self.query_output.append(output)
                        elapse += float(output.split(',')[-4])

                if average_results:
                    elapse = 0 if iterations == 0 else elapse / iterations
                    self.query_output.append( ' '.join(['average elapse:',  str(elapse)]) )
         return
Пример #21
0
    def _build_containerd_binaries(self):
        containerd_path = utils.get_containerd_folder()
        utils.clone_git_repo(self.opts.containerd_repo,
                             self.opts.containerd_branch, containerd_path)

        ctr_path = utils.get_ctr_folder()
        utils.clone_git_repo(self.opts.ctr_repo,
                             self.opts.ctr_branch, ctr_path)

        gopath = utils.get_go_path()
        cri_tools_path = os.path.join(
            gopath, "src", "github.com", "kubernetes-sigs", "cri-tools")
        utils.clone_git_repo(self.opts.cri_tools_repo,
                             self.opts.cri_tools_branch, cri_tools_path)

        self.logging.info("Building containerd with cri plugin")
        utils.run_shell_cmd(["GOOS=windows", "make"], containerd_path)

        self.logging.info("Building ctr")
        utils.run_shell_cmd(["GOOS=windows", "make", "bin/ctr.exe"], ctr_path)

        self.logging.info("Building crictl")
        utils.run_shell_cmd(["GOOS=windows", "make", "crictl"], cri_tools_path)

        self.logging.info("Copying binaries to local artifacts directory")
        ci_artifacts_containerd_bin_dir = os.path.join(
            self.ci_artifacts_dir, "containerd/bin")
        os.makedirs(ci_artifacts_containerd_bin_dir, exist_ok=True)

        containerd_bins_location = os.path.join(
            containerd_path, constants.CONTAINERD_BINS_LOCATION)
        for path in glob.glob("%s/*" % containerd_bins_location):
            shutil.copy(path, ci_artifacts_containerd_bin_dir)

        ctr_bin = os.path.join(ctr_path, constants.CONTAINERD_CTR_LOCATION)
        shutil.copy(ctr_bin, ci_artifacts_containerd_bin_dir)

        crictl_bin = os.path.join(cri_tools_path, "_output/crictl.exe")
        shutil.copy(crictl_bin, ci_artifacts_containerd_bin_dir)
Пример #22
0
def get_repo_commit_count(repo):
    cmd = "git --git-dir={} rev-list --all --count".format(repo)
    return int(utils.run_shell_cmd(cmd))
Пример #23
0
 def run_cmd_on_k8s_node(self, cmd, node_address, timeout="10m"):
     return utils.run_shell_cmd(
         ["timeout", timeout, "ssh", node_address,
          "'%s'" % cmd])
Пример #24
0
    def _build_k8s_artifacts(self):
        k8s_path = utils.get_k8s_folder()
        utils.clone_git_repo(
            self.opts.k8s_repo, self.opts.k8s_branch, k8s_path)

        self.logging.info("Building K8s Linux binaries")
        cmd = [
            'make', 'WHAT="cmd/kubectl cmd/kubelet cmd/kubeadm"',
            'KUBE_BUILD_PLATFORMS="linux/amd64"'
        ]
        utils.run_shell_cmd(cmd, k8s_path)
        del os.environ["KUBECTL_PATH"]

        self.logging.info("Building K8s Windows binaries")
        cmd = [
            'make',
            'WHAT="cmd/kubectl cmd/kubelet cmd/kubeadm cmd/kube-proxy"',
            'KUBE_BUILD_PLATFORMS="windows/amd64"'
        ]
        utils.run_shell_cmd(cmd, k8s_path)

        self.logging.info("Building K8s Linux DaemonSet container images")
        cmd = ['make', 'quick-release-images']
        env = {"KUBE_FASTBUILD": "true",
               "KUBE_BUILD_CONFORMANCE": "n"}
        utils.retry_on_error()(utils.run_shell_cmd)(cmd, k8s_path, env)

        kubeadm_bin = os.path.join(constants.KUBERNETES_LINUX_BINS_LOCATION,
                                   'kubeadm')
        out, _ = utils.run_shell_cmd(
            [kubeadm_bin, "version", "-o=short"], k8s_path)
        self.ci_version = out.decode().strip()
        self.deployer.ci_version = self.ci_version

        ci_artifacts_linux_bin_dir = "%s/%s/bin/linux/amd64" % (
            self.ci_artifacts_dir, self.ci_version)
        ci_artifacts_windows_bin_dir = "%s/%s/bin/windows/amd64" % (
            self.ci_artifacts_dir, self.ci_version)
        ci_artifacts_images_dir = "%s/%s/images" % (
            self.ci_artifacts_dir, self.ci_version)

        os.makedirs(ci_artifacts_linux_bin_dir, exist_ok=True)
        os.makedirs(ci_artifacts_windows_bin_dir, exist_ok=True)
        os.makedirs(ci_artifacts_images_dir, exist_ok=True)

        for bin_name in ["kubectl", "kubelet", "kubeadm"]:
            linux_bin_path = "%s/%s/%s" % (
                k8s_path, constants.KUBERNETES_LINUX_BINS_LOCATION, bin_name)
            shutil.copy(linux_bin_path, ci_artifacts_linux_bin_dir)

        for bin_name in ["kubectl", "kubelet", "kubeadm", "kube-proxy"]:
            win_bin_path = "%s/%s/%s.exe" % (
                k8s_path, constants.KUBERNETES_WINDOWS_BINS_LOCATION, bin_name)
            shutil.copy(win_bin_path, ci_artifacts_windows_bin_dir)

        images_names = [
            "kube-apiserver.tar", "kube-controller-manager.tar",
            "kube-proxy.tar", "kube-scheduler.tar"
        ]
        for image_name in images_names:
            image_path = "%s/%s/%s" % (
                k8s_path, constants.KUBERNETES_IMAGES_LOCATION,
                image_name)
            shutil.copy(image_path, ci_artifacts_images_dir)
Пример #25
0
 def _wait_for_ready_pods(self):
     self.logging.info("Waiting for all the pods to be ready")
     utils.run_shell_cmd([
         self.kubectl, "wait", "--for=condition=Ready", "--timeout", "30m",
         "pods", "--all", "--all-namespaces"
     ])
Пример #26
0
def list_commits(repo):
    cmd = "git --git-dir={} log --pretty=format:\"%h\"".format(repo)
    return utils.run_shell_cmd(cmd).split('\n')