def gen_workflow_params(self, phase):
        parameters = OrderedDict()
        lang_substitutions = {
            "onnx_py3": "onnx-py3",
            "onnx_main_py3.6": "onnx-main-py3.6",
            "onnx_ort1_py3.6": "onnx-ort1-py3.6",
            "onnx_ort2_py3.6": "onnx-ort2-py3.6",
        }

        lang = miniutils.override(self.language, lang_substitutions)

        parts = [
            "caffe2",
            lang,
        ] + self.get_build_name_middle_parts() + [phase]

        build_env_name = "-".join(parts)
        parameters["build_environment"] = miniutils.quote(build_env_name)
        if "ios" in self.compiler_names:
            parameters["build_ios"] = miniutils.quote("1")
        if phase == "test":
            # TODO cuda should not be considered a compiler
            if "cuda" in self.compiler_names:
                parameters["use_cuda_docker_runtime"] = miniutils.quote("1")

        if self.distro.name != "macos":
            parameters["docker_image"] = self.gen_docker_image()
            if self.build_only:
                parameters["build_only"] = miniutils.quote("1")
        if phase == "test":
            resource_class = "large" if "cuda" not in self.compiler_names else "gpu.medium"
            parameters["resource_class"] = resource_class

        return parameters
    def gen_yaml_tree(self, build_or_test):

        env_tuples = [("BUILD_ENVIRONMENT", miniutils.quote(" ".join(self.gen_build_env_parms())))]

        if self.libtorch_variant:
            env_tuples.append(("LIBTORCH_VARIANT", miniutils.quote(self.libtorch_variant)))

        os_name = miniutils.override(self.os, {"macos": "mac"})
        d = {"<<": "*" + "_".join([self.get_name_prefix(), os_name, build_or_test])}

        if build_or_test == "test":

            if not (self.smoke and self.os == "macos"):
                env_tuples.append(("DOCKER_IMAGE", self.gen_docker_image()))

            if self.cuda_version:
                env_tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))

        else:
            if self.os == "linux" and build_or_test != "upload":
                d["docker"] = [{"image": self.gen_docker_image()}]

        d["environment"] = OrderedDict(env_tuples)

        if build_or_test == "test":
            if self.cuda_version:
                d["resource_class"] = "gpu.medium"

        return d
    def gen_docker_image(self):
        if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
            if self.gpu_version is None:
                return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
            else:
                return miniutils.quote(
                    f"pytorch/libtorch-cxx11-builder:{self.gpu_version}")
        if self.pydistro == "conda":
            if self.gpu_version is None:
                return miniutils.quote("pytorch/conda-builder:cpu")
            else:
                return miniutils.quote(
                    f"pytorch/conda-builder:{self.gpu_version}")

        docker_word_substitution = {
            "manywheel": "manylinux",
            "libtorch": "manylinux",
        }

        docker_distro_prefix = miniutils.override(self.pydistro,
                                                  docker_word_substitution)

        # The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
        # TODO cuda images should consolidate into tag-base images similar to rocm
        alt_docker_suffix = "cuda102" if not self.gpu_version else (
            "rocm:" + self.gpu_version.strip("rocm")
            if self.gpu_version.startswith("rocm") else self.gpu_version)
        docker_distro_suffix = alt_docker_suffix if self.pydistro != "conda" else (
            "cuda" if alt_docker_suffix.startswith("cuda") else "rocm")
        return miniutils.quote("pytorch/" + docker_distro_prefix + "-" +
                               docker_distro_suffix)
Beispiel #4
0
    def gen_workflow_job(self,
                         phase,
                         upload_phase_dependency=None,
                         nightly=False):
        job_def = OrderedDict()
        job_def["name"] = self.gen_build_name(phase, nightly)
        job_def["build_environment"] = miniutils.quote(" ".join(
            self.gen_build_env_parms()))
        job_def["requires"] = ["setup"]
        if self.smoke:
            job_def["requires"].append("update_s3_htmls_for_nightlies")
            job_def["requires"].append(
                "update_s3_htmls_for_nightlies_devtoolset7")
            job_def["filters"] = {"branches": {"only": "postnightly"}}
        else:
            filter_branches = ["nightly"]
            # we only want to add the release branch filter if we aren't
            # uploading
            if phase not in ["upload"]:
                filter_branches.append(r"/release\/.*/")
            job_def["filters"] = {
                "branches": {
                    "only": filter_branches
                },
                # Will run on tags like v1.5.0-rc1, etc.
                "tags": {
                    # Using a raw string here to avoid having to escape
                    # anything
                    "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
                }
            }
        if self.libtorch_variant:
            job_def["libtorch_variant"] = miniutils.quote(
                self.libtorch_variant)
        if phase == "test":
            if not self.smoke:
                job_def["requires"].append(
                    self.gen_build_name("build", nightly))
            if not (self.smoke and self.os == "macos"):
                job_def["docker_image"] = self.gen_docker_image()

            if self.cuda_version:
                job_def["use_cuda_docker_runtime"] = miniutils.quote("1")
        else:
            if self.os == "linux" and phase != "upload":
                job_def["docker_image"] = self.gen_docker_image()

        if phase == "test":
            if self.cuda_version:
                job_def["resource_class"] = "gpu.medium"
        if phase == "upload":
            job_def["context"] = "org-member"
            job_def["requires"] = [
                "setup",
                self.gen_build_name(upload_phase_dependency, nightly)
            ]

        os_name = miniutils.override(self.os, {"macos": "mac"})
        job_name = "_".join([self.get_name_prefix(), os_name, phase])
        return {job_name: job_def}
    def gen_yaml_tree(self, build_or_test):

        env_tuples = [("BUILD_ENVIRONMENT",
                       miniutils.quote(" ".join(self.gen_build_env_parms())))]

        if self.libtorch_variant:
            env_tuples.append(
                ("LIBTORCH_VARIANT", miniutils.quote(self.libtorch_variant)))

        os_name = miniutils.override(self.os, {"macos": "mac"})
        d = {
            "<<":
            "*" + "_".join([self.get_name_prefix(), os_name, build_or_test])
        }

        if build_or_test == "test":

            if not (self.smoke and self.os == "macos"):
                env_tuples.append(("DOCKER_IMAGE", self.gen_docker_image()))

            if self.cuda_version:
                env_tuples.append(
                    ("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))

        else:
            if self.os == "linux" and build_or_test != "upload":
                d["docker"] = [{"image": self.gen_docker_image()}]

        d["environment"] = OrderedDict(env_tuples)

        if build_or_test == "test":
            if self.cuda_version:
                d["resource_class"] = "gpu.medium"

        return d
    def gen_workflow_job(self, phase, upload_phase_dependency=None):
        job_def = OrderedDict()
        job_def["name"] = self.gen_build_name(phase)
        job_def["build_environment"] = miniutils.quote(" ".join(
            self.gen_build_env_parms()))
        job_def["requires"] = ["setup"]
        job_def["filters"] = {"branches": {"only": "nightly"}}
        if self.libtorch_variant:
            job_def["libtorch_variant"] = miniutils.quote(
                self.libtorch_variant)
        if phase == "test":
            if not self.smoke:
                job_def["requires"].append(self.gen_build_name("build"))
            if not (self.smoke and self.os == "macos"):
                job_def["docker_image"] = self.gen_docker_image()

            if self.cuda_version:
                job_def["use_cuda_docker_runtime"] = miniutils.quote("1")
        else:
            if self.os == "linux" and phase != "upload":
                job_def["docker_image"] = self.gen_docker_image()

        if phase == "test":
            if self.cuda_version:
                job_def["resource_class"] = "gpu.medium"
        if phase == "upload":
            job_def["context"] = "org-member"
            job_def["requires"] = [
                "setup", self.gen_build_name(upload_phase_dependency)
            ]

        os_name = miniutils.override(self.os, {"macos": "mac"})
        job_name = "_".join([self.get_name_prefix(), os_name, phase])
        return {job_name: job_def}
Beispiel #7
0
    def gen_workflow_job(self,
                         phase,
                         upload_phase_dependency=None,
                         nightly=False):
        job_def = OrderedDict()
        job_def["name"] = self.gen_build_name(phase, nightly)
        job_def["build_environment"] = miniutils.quote(" ".join(
            self.gen_build_env_parms()))
        if self.smoke:
            job_def["requires"] = [
                "update_s3_htmls",
            ]
            job_def["filters"] = branch_filters.gen_filter_dict(
                branches_list=["nightly"],
                tags_list=[branch_filters.RC_PATTERN],
            )
        else:
            if phase in ["upload"]:
                filter_branch = "nightly"
            else:
                filter_branch = r"/.*/"
            job_def["filters"] = branch_filters.gen_filter_dict(
                branches_list=[filter_branch],
                tags_list=[branch_filters.RC_PATTERN],
            )
        if self.libtorch_variant:
            job_def["libtorch_variant"] = miniutils.quote(
                self.libtorch_variant)
        if phase == "test":
            if not self.smoke:
                job_def["requires"] = [self.gen_build_name("build", nightly)]
            if not (self.smoke
                    and self.os == "macos") and self.os != "windows":
                job_def["docker_image"] = self.gen_docker_image()

            if self.os != "windows" and self.cuda_version:
                job_def["use_cuda_docker_runtime"] = miniutils.quote("1")
        else:
            if self.os == "linux" and phase != "upload":
                job_def["docker_image"] = self.gen_docker_image()

        if phase == "test":
            if self.cuda_version:
                if self.os == "windows":
                    job_def["executor"] = "windows-with-nvidia-gpu"
                else:
                    job_def["resource_class"] = "gpu.medium"
        if phase == "upload":
            job_def["context"] = "org-member"
            job_def["requires"] = [
                self.gen_build_name(upload_phase_dependency, nightly)
            ]

        os_name = miniutils.override(self.os, {"macos": "mac"})
        job_name = "_".join([self.get_name_prefix(), os_name, phase])
        return {job_name: job_def}
    def gen_docker_image(self):

        lang_substitutions = {
            "onnx_py2": "py2",
            "cmake": "py2",
        }

        lang = miniutils.override(self.language, lang_substitutions)
        parts = [lang] + self.get_build_name_middle_parts()
        return miniutils.quote(DOCKER_IMAGE_PATH_BASE + "-".join(parts) + ":" + str(DOCKER_IMAGE_VERSION))
    def construct_phase_name(self, phase):
        root_parts = self.get_build_name_root_parts()

        build_name_substitutions = {
            "onnx_ort1_py3.6": "onnx_main_py3.6",
            "onnx_ort2_py3.6": "onnx_main_py3.6",
        }
        if phase == "build":
            root_parts = [miniutils.override(r, build_name_substitutions) for r in root_parts]
        return "_".join(root_parts + [phase]).replace(".", "_")
    def gen_docker_image(self):

        lang_substitutions = {
            "onnx_py2": "py2",
            "cmake": "py2",
        }

        lang = miniutils.override(self.language, lang_substitutions)
        parts = [lang] + self.get_build_name_middle_parts()
        return miniutils.quote(DOCKER_IMAGE_PATH_BASE + "-".join(parts) + ":" + str(DOCKER_IMAGE_VERSION))
Beispiel #11
0
    def gen_docker_image(self):

        docker_word_substitution = {
            "manywheel": "manylinux",
            "libtorch": "manylinux",
        }

        docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)

        alt_docker_suffix = self.cuda_version or "80"
        docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
        return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
    def gen_docker_image(self):

        docker_word_substitution = {
            "manywheel": "manylinux",
            "libtorch": "manylinux",
        }

        docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)

        # The cpu nightlies are built on the soumith/manylinux-cuda80 docker image
        alt_docker_suffix = self.cuda_version or "80"
        docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
        return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
Beispiel #13
0
    def gen_yaml_tree(self, phase):

        tuples = []

        lang_substitutions = {
            "onnx_py2": "onnx-py2",
            "onnx_py3.6": "onnx-py3.6",
        }

        lang = miniutils.override(self.language, lang_substitutions)

        parts = [
            "caffe2",
            lang,
        ] + self.get_build_name_middle_parts() + [phase]

        build_env = "-".join(parts)
        if not self.distro.name == "macos":
            build_env = miniutils.quote(build_env)

        tuples.append(("BUILD_ENVIRONMENT", build_env))

        if self.compiler.name == "ios":
            tuples.append(("BUILD_IOS", miniutils.quote("1")))

        if phase == "test":
            # TODO cuda should not be considered a compiler
            if self.compiler.name == "cuda":
                tuples.append(
                    ("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))

        if self.distro.name == "macos":
            tuples.append(("PYTHON_VERSION", miniutils.quote("2")))

        else:
            tuples.append(("DOCKER_IMAGE", self.gen_docker_image()))
            if self.build_only:
                tuples.append(("BUILD_ONLY", miniutils.quote("1")))

        d = OrderedDict({"environment": OrderedDict(tuples)})

        if phase == "test":
            resource_class = "large" if self.compiler.name != "cuda" else "gpu.medium"
            d["resource_class"] = resource_class

        d["<<"] = "*" + "_".join(
            ["caffe2", self.get_platform(), phase, "defaults"])

        return d
    def gen_docker_image(self):
        if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
            return miniutils.quote("pytorch/pytorch-binary-docker-image-ubuntu16.04:latest")

        docker_word_substitution = {
            "manywheel": "manylinux",
            "libtorch": "manylinux",
        }

        docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)

        # The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
        alt_docker_suffix = self.cuda_version or "102"
        docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
        return miniutils.quote("pytorch/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
    def gen_docker_image(self):

        docker_word_substitution = {
            "manywheel": "manylinux",
            "libtorch": "manylinux",
        }

        docker_distro_prefix = miniutils.override(self.pydistro,
                                                  docker_word_substitution)

        # The cpu nightlies are built on the soumith/manylinux-cuda100 docker image
        alt_docker_suffix = self.cuda_version or "100"
        docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
        return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" +
                               docker_distro_suffix)
    def gen_docker_image(self):
        if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
            return miniutils.quote("pytorch/conda-cuda-cxx11-ubuntu1604:latest")

        docker_word_substitution = {
            "manywheel": "manylinux",
            "libtorch": "manylinux",
        }

        docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)

        # The cpu nightlies are built on the pytorch/manylinux-cuda100 docker image
        alt_docker_suffix = self.cuda_version or "100"
        docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
        if self.cuda_version == "101":
            return "soumith/manylinux-cuda101@sha256:5d62be90d5b7777121180e6137c7eed73d37aaf9f669c51b783611e37e0b4916"
        return miniutils.quote("pytorch/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
    def gen_yaml_tree(self):

        tuples = []

        lang_substitutions = {
            "onnx_py2": "onnx-py2",
        }

        lang = miniutils.override(self.language, lang_substitutions)

        parts = [
            "caffe2",
            lang,
        ] + self.get_build_name_middle_parts() + [self.phase]

        build_env = "-".join(parts)
        if not self.distro.name == "macos":
            build_env = miniutils.quote(build_env)

        tuples.append(("BUILD_ENVIRONMENT", build_env))

        if self.compiler.name == "ios":
            tuples.append(("BUILD_IOS", miniutils.quote("1")))

        if self.phase == "test":
            # TODO cuda should not be considered a compiler
            if self.compiler.name == "cuda":
                tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1")))

        if self.distro.name == "macos":
            tuples.append(("PYTHON_VERSION", miniutils.quote("2")))

        else:
            tuples.append(("DOCKER_IMAGE", self.gen_docker_image()))
            if self.build_only:
                tuples.append(("BUILD_ONLY", miniutils.quote("1")))

        d = OrderedDict({"environment": OrderedDict(tuples)})

        if self.phase == "test":
            resource_class = "large" if self.compiler.name != "cuda" else "gpu.medium"
            d["resource_class"] = resource_class

        d["<<"] = "*" + "_".join(["caffe2", self.get_platform(), self.phase, "defaults"])

        return d