def gen_yaml_tree(self, build_or_test): env_tuples = [("BUILD_ENVIRONMENT", miniutils.quote(" ".join(self.gen_build_env_parms())))] if self.libtorch_variant: env_tuples.append(("LIBTORCH_VARIANT", miniutils.quote(self.libtorch_variant))) os_name = miniutils.override(self.os, {"macos": "mac"}) d = {"<<": "*" + "_".join([self.get_name_prefix(), os_name, build_or_test])} if build_or_test == "test": if not (self.smoke and self.os == "macos"): env_tuples.append(("DOCKER_IMAGE", self.gen_docker_image())) if self.cuda_version: env_tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1"))) else: if self.os == "linux" and build_or_test != "upload": d["docker"] = [{"image": self.gen_docker_image()}] d["environment"] = OrderedDict(env_tuples) if build_or_test == "test": if self.cuda_version: d["resource_class"] = "gpu.medium" return d
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False): job_def = OrderedDict() job_def["name"] = self.gen_build_name(phase, nightly) job_def["build_environment"] = miniutils.quote(" ".join( self.gen_build_env_parms())) job_def["requires"] = ["setup"] if self.smoke: job_def["requires"].append("update_s3_htmls_for_nightlies") job_def["requires"].append( "update_s3_htmls_for_nightlies_devtoolset7") job_def["filters"] = {"branches": {"only": "postnightly"}} else: filter_branches = ["nightly"] # we only want to add the release branch filter if we aren't # uploading if phase not in ["upload"]: filter_branches.append(r"/release\/.*/") job_def["filters"] = { "branches": { "only": filter_branches }, # Will run on tags like v1.5.0-rc1, etc. "tags": { # Using a raw string here to avoid having to escape # anything "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/" } } if self.libtorch_variant: job_def["libtorch_variant"] = miniutils.quote( self.libtorch_variant) if phase == "test": if not self.smoke: job_def["requires"].append( self.gen_build_name("build", nightly)) if not (self.smoke and self.os == "macos"): job_def["docker_image"] = self.gen_docker_image() if self.cuda_version: job_def["use_cuda_docker_runtime"] = miniutils.quote("1") else: if self.os == "linux" and phase != "upload": job_def["docker_image"] = self.gen_docker_image() if phase == "test": if self.cuda_version: job_def["resource_class"] = "gpu.medium" if phase == "upload": job_def["context"] = "org-member" job_def["requires"] = [ "setup", self.gen_build_name(upload_phase_dependency, nightly) ] os_name = miniutils.override(self.os, {"macos": "mac"}) job_name = "_".join([self.get_name_prefix(), os_name, phase]) return {job_name: job_def}
def gen_yaml_tree(self, build_or_test): env_tuples = [("BUILD_ENVIRONMENT", miniutils.quote(" ".join(self.gen_build_env_parms())))] if self.libtorch_variant: env_tuples.append( ("LIBTORCH_VARIANT", miniutils.quote(self.libtorch_variant))) os_name = miniutils.override(self.os, {"macos": "mac"}) d = { "<<": "*" + "_".join([self.get_name_prefix(), os_name, build_or_test]) } if build_or_test == "test": if not (self.smoke and self.os == "macos"): env_tuples.append(("DOCKER_IMAGE", self.gen_docker_image())) if self.cuda_version: env_tuples.append( ("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1"))) else: if self.os == "linux" and build_or_test != "upload": d["docker"] = [{"image": self.gen_docker_image()}] d["environment"] = OrderedDict(env_tuples) if build_or_test == "test": if self.cuda_version: d["resource_class"] = "gpu.medium" return d
def gen_tree(self): extra_requires = [x.gen_job_name() for x in BUILD_CONFIGS] if self.is_upload else [] props_dict = { "build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(True)), "requires": extra_requires, "context": "org-member", "filters": { "branches": { "only": "nightly" } }, } if not self.is_upload: props_dict["ios_arch"] = self.variant props_dict["ios_platform"] = ios_definitions.get_platform( self.variant) props_dict["name"] = self.gen_job_name() props_dict["use_metal"] = miniutils.quote(str(int(True))) props_dict["use_coreml"] = miniutils.quote(str(int(True))) template_name = "_".join([ "binary", "ios", self.get_phase_name(), ]) return [{template_name: props_dict}]
def gen_docker_image(self): if self.gcc_config_variant == 'gcc5.4_cxx11-abi': if self.gpu_version is None: return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu") else: return miniutils.quote( f"pytorch/libtorch-cxx11-builder:{self.gpu_version}") if self.pydistro == "conda": if self.gpu_version is None: return miniutils.quote("pytorch/conda-builder:cpu") else: return miniutils.quote( f"pytorch/conda-builder:{self.gpu_version}") docker_word_substitution = { "manywheel": "manylinux", "libtorch": "manylinux", } docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) # The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image # TODO cuda images should consolidate into tag-base images similar to rocm alt_docker_suffix = "cuda102" if not self.gpu_version else ( "rocm:" + self.gpu_version.strip("rocm") if self.gpu_version.startswith("rocm") else self.gpu_version) docker_distro_suffix = alt_docker_suffix if self.pydistro != "conda" else ( "cuda" if alt_docker_suffix.startswith("cuda") else "rocm") return miniutils.quote("pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix)
def gen_workflow_params(self, phase): parameters = OrderedDict() lang_substitutions = { "onnx_py3": "onnx-py3", "onnx_main_py3.6": "onnx-main-py3.6", "onnx_ort1_py3.6": "onnx-ort1-py3.6", "onnx_ort2_py3.6": "onnx-ort2-py3.6", } lang = miniutils.override(self.language, lang_substitutions) parts = [ "caffe2", lang, ] + self.get_build_name_middle_parts() + [phase] build_env_name = "-".join(parts) parameters["build_environment"] = miniutils.quote(build_env_name) if "ios" in self.compiler_names: parameters["build_ios"] = miniutils.quote("1") if phase == "test": # TODO cuda should not be considered a compiler if "cuda" in self.compiler_names: parameters["use_cuda_docker_runtime"] = miniutils.quote("1") if self.distro.name != "macos": parameters["docker_image"] = self.gen_docker_image() if self.build_only: parameters["build_only"] = miniutils.quote("1") if phase == "test": resource_class = "large" if "cuda" not in self.compiler_names else "gpu.medium" parameters["resource_class"] = resource_class return parameters
def gen_yaml_tree(self, build_or_test): build_job_name_pieces = self.get_build_job_name_pieces(build_or_test) build_env_name = "-".join(map(str, build_job_name_pieces)) env_dict = OrderedDict([ ("BUILD_ENVIRONMENT", build_env_name), ("DOCKER_IMAGE", self.gen_docker_image_path()), ]) if self.pyver: env_dict["PYTHON_VERSION"] = miniutils.quote(self.pyver) if build_or_test == "test" and self.gpu_resource: env_dict["USE_CUDA_DOCKER_RUNTIME"] = miniutils.quote("1") d = { "environment": env_dict, "<<": "*" + "_".join(["pytorch", "linux", build_or_test, "defaults"]), } if build_or_test == "test": resource_class = "large" if self.gpu_resource: resource_class = "gpu." + self.gpu_resource if self.gpu_resource == "large": env_dict["MULTI_GPU"] = miniutils.quote("1") d["resource_class"] = resource_class return d
def gen_yaml_tree(self, build_or_test): build_job_name_pieces = self.get_build_job_name_pieces(build_or_test) build_env_name = "-".join(map(str, build_job_name_pieces)) env_dict = OrderedDict([ ("BUILD_ENVIRONMENT", build_env_name), ("DOCKER_IMAGE", self.gen_docker_image_path()), ]) if self.pyver: env_dict["PYTHON_VERSION"] = miniutils.quote(self.pyver) if build_or_test == "test" and self.gpu_resource: env_dict["USE_CUDA_DOCKER_RUNTIME"] = miniutils.quote("1") d = { "environment": env_dict, "<<": "*" + "_".join(["pytorch", "linux", build_or_test, "defaults"]), } if build_or_test == "test": resource_class = "large" if self.gpu_resource: resource_class = "gpu." + self.gpu_resource if self.gpu_resource == "large": env_dict["MULTI_GPU"] = miniutils.quote("1") d["resource_class"] = resource_class return d
def gen_workflow_job(self, phase, upload_phase_dependency=None): job_def = OrderedDict() job_def["name"] = self.gen_build_name(phase) job_def["build_environment"] = miniutils.quote(" ".join( self.gen_build_env_parms())) job_def["requires"] = ["setup"] job_def["filters"] = {"branches": {"only": "nightly"}} if self.libtorch_variant: job_def["libtorch_variant"] = miniutils.quote( self.libtorch_variant) if phase == "test": if not self.smoke: job_def["requires"].append(self.gen_build_name("build")) if not (self.smoke and self.os == "macos"): job_def["docker_image"] = self.gen_docker_image() if self.cuda_version: job_def["use_cuda_docker_runtime"] = miniutils.quote("1") else: if self.os == "linux" and phase != "upload": job_def["docker_image"] = self.gen_docker_image() if phase == "test": if self.cuda_version: job_def["resource_class"] = "gpu.medium" if phase == "upload": job_def["context"] = "org-member" job_def["requires"] = [ "setup", self.gen_build_name(upload_phase_dependency) ] os_name = miniutils.override(self.os, {"macos": "mac"}) job_name = "_".join([self.get_name_prefix(), os_name, phase]) return {job_name: job_def}
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False): job_def = OrderedDict() job_def["name"] = self.gen_build_name(phase, nightly) job_def["build_environment"] = miniutils.quote(" ".join( self.gen_build_env_parms())) if self.smoke: job_def["requires"] = [ "update_s3_htmls", ] job_def["filters"] = branch_filters.gen_filter_dict( branches_list=["nightly"], tags_list=[branch_filters.RC_PATTERN], ) else: if phase in ["upload"]: filter_branch = "nightly" else: filter_branch = r"/.*/" job_def["filters"] = branch_filters.gen_filter_dict( branches_list=[filter_branch], tags_list=[branch_filters.RC_PATTERN], ) if self.libtorch_variant: job_def["libtorch_variant"] = miniutils.quote( self.libtorch_variant) if phase == "test": if not self.smoke: job_def["requires"] = [self.gen_build_name("build", nightly)] if not (self.smoke and self.os == "macos") and self.os != "windows": job_def["docker_image"] = self.gen_docker_image() if self.os != "windows" and self.cuda_version: job_def["use_cuda_docker_runtime"] = miniutils.quote("1") else: if self.os == "linux" and phase != "upload": job_def["docker_image"] = self.gen_docker_image() if phase == "test": if self.cuda_version: if self.os == "windows": job_def["executor"] = "windows-with-nvidia-gpu" else: job_def["resource_class"] = "gpu.medium" if phase == "upload": job_def["context"] = "org-member" job_def["requires"] = [ self.gen_build_name(upload_phase_dependency, nightly) ] os_name = miniutils.override(self.os, {"macos": "mac"}) job_name = "_".join([self.get_name_prefix(), os_name, phase]) return {job_name: job_def}
def gen_workflow_job(channel: str): return OrderedDict({ "anaconda_prune": OrderedDict({ "name": f"anaconda-prune-{channel}", "context": quote("org-member"), "packages": quote(PACKAGES_TO_PRUNE), "channel": channel, "filters": gen_filter_dict(branches_list=["postnightly"]), }) })
def get_workflow_jobs(): """Generates a list of docker image build definitions""" return [ OrderedDict({ "docker_build_job": OrderedDict({ "name": quote(image_name), "image_name": quote(image_name) }) }) for image_name in IMAGE_NAMES ]
def gen_yaml_tree(self, phase): tuples = [] lang_substitutions = { "onnx_py2": "onnx-py2", "onnx_py3.6": "onnx-py3.6", } lang = miniutils.override(self.language, lang_substitutions) parts = [ "caffe2", lang, ] + self.get_build_name_middle_parts() + [phase] build_env = "-".join(parts) if not self.distro.name == "macos": build_env = miniutils.quote(build_env) tuples.append(("BUILD_ENVIRONMENT", build_env)) if self.compiler.name == "ios": tuples.append(("BUILD_IOS", miniutils.quote("1"))) if phase == "test": # TODO cuda should not be considered a compiler if self.compiler.name == "cuda": tuples.append( ("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1"))) if self.distro.name == "macos": tuples.append(("PYTHON_VERSION", miniutils.quote("2"))) else: tuples.append(("DOCKER_IMAGE", self.gen_docker_image())) if self.build_only: tuples.append(("BUILD_ONLY", miniutils.quote("1"))) d = OrderedDict({"environment": OrderedDict(tuples)}) if phase == "test": resource_class = "large" if self.compiler.name != "cuda" else "gpu.medium" d["resource_class"] = resource_class d["<<"] = "*" + "_".join( ["caffe2", self.get_platform(), phase, "defaults"]) return d
def gen_docker_image(self): if self.gcc_config_variant == 'gcc5.4_cxx11-abi': return miniutils.quote("pytorch/pytorch-binary-docker-image-ubuntu16.04:latest") docker_word_substitution = { "manywheel": "manylinux", "libtorch": "manylinux", } docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) # The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image alt_docker_suffix = self.cuda_version or "102" docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix return miniutils.quote("pytorch/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
def gen_workflow_params(self, phase): parameters = OrderedDict() build_job_name_pieces = self.get_build_job_name_pieces(phase) build_env_name = "-".join(map(str, build_job_name_pieces)) parameters["build_environment"] = miniutils.quote(build_env_name) parameters["docker_image"] = self.gen_docker_image_path() if phase == "test" and self.gpu_resource: parameters["use_cuda_docker_runtime"] = miniutils.quote("1") if phase == "test": resource_class = "large" if self.gpu_resource: resource_class = "gpu." + self.gpu_resource parameters["resource_class"] = resource_class return parameters
def get_workflow_jobs(): """Generates a list of docker image build definitions""" ret = [] for image_name in IMAGE_NAMES: parameters = OrderedDict({ "name": quote(f"docker-{image_name}"), "image_name": quote(image_name), }) if image_name == "pytorch-linux-xenial-py3.6-gcc5.4": # pushing documentation on tags requires CircleCI to also # build all the dependencies on tags, including this docker image parameters['filters'] = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN) ret.append(OrderedDict({"docker_build_job": parameters})) return ret
def gen_workflow_job(self, phase): # All jobs require the setup job job_def = OrderedDict() job_def["name"] = self.gen_build_name(phase) job_def["requires"] = ["setup"] if phase == "build": job_def["requires"].append( miniutils.quote("docker-" + self.gen_docker_image_name())) if phase == "test": # TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a # caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated # build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed # pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641) dependency_build = self.parent_build or self job_def["requires"].append( dependency_build.gen_build_name("build")) job_name = "pytorch_linux_test" else: job_name = "pytorch_linux_build" if not self.is_important: # If you update this, update # caffe2_build_definitions.py too job_def["filters"] = { "branches": { "only": ["master", r"/ci-all\/.*/", r"/release\/.*/"] } } job_def.update(self.gen_workflow_params(phase)) return {job_name: job_def}
def gen_docker_image_path(self): parms_source = self.parent_build or self base_build_env_name = "-".join(parms_source.get_parms(True)) image_path = gen_docker_image_path(base_build_env_name) return miniutils.quote(image_path)
def gen_tree(self): non_phase_parts = [ "pytorch", "linux", "xenial", "py3", "clang5", "mobile", ] + self.variant_parts full_job_name = "_".join(non_phase_parts) build_env_name = "-".join(non_phase_parts) props_dict = { "build_environment": build_env_name, "build_only": miniutils.quote(str(int(True))), "docker_image": self.docker_image, "name": full_job_name, } if self.is_master_only: props_dict[ "filters"] = cimodel.data.simple.util.branch_filters.gen_branches_only_filter_dict( ) return [{"pytorch_linux_build": props_dict}]
def render(fh, data, depth, is_list_member=False): """ PyYaml does not allow precise control over the quoting behavior, especially for merge references. Therefore, we use this custom YAML renderer. """ indentation = " " * INDENTATION_WIDTH * depth if is_dict(data): tuples = list(data.items()) if type(data) is not OrderedDict: tuples.sort() for i, (k, v) in enumerate(tuples): # If this dict is itself a list member, the first key gets prefixed with a list marker list_marker_prefix = LIST_MARKER if is_list_member and not i else "" trailing_whitespace = "\n" if is_collection(v) else " " fh.write(indentation + list_marker_prefix + k + ":" + trailing_whitespace) render(fh, v, depth + 1 + int(is_list_member)) elif type(data) is list: for v in data: render(fh, v, depth, True) else: # use empty quotes to denote an empty string value instead of blank space modified_data = miniutils.quote(data) if data == '' else data list_member_prefix = indentation + LIST_MARKER if is_list_member else "" fh.write(list_member_prefix + str(modified_data) + "\n")
def gen_docker_image_path(self): parms_source = self.parent_build or self base_build_env_name = "-".join(parms_source.get_parms(True)) return miniutils.quote(DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(DOCKER_IMAGE_VERSION))
def gen_yaml_tree(self): tuples = [] lang_substitutions = { "onnx_py2": "onnx-py2", } lang = miniutils.override(self.language, lang_substitutions) parts = [ "caffe2", lang, ] + self.get_build_name_middle_parts() + [self.phase] build_env = "-".join(parts) if not self.distro.name == "macos": build_env = miniutils.quote(build_env) tuples.append(("BUILD_ENVIRONMENT", build_env)) if self.compiler.name == "ios": tuples.append(("BUILD_IOS", miniutils.quote("1"))) if self.phase == "test": # TODO cuda should not be considered a compiler if self.compiler.name == "cuda": tuples.append(("USE_CUDA_DOCKER_RUNTIME", miniutils.quote("1"))) if self.distro.name == "macos": tuples.append(("PYTHON_VERSION", miniutils.quote("2"))) else: tuples.append(("DOCKER_IMAGE", self.gen_docker_image())) if self.build_only: tuples.append(("BUILD_ONLY", miniutils.quote("1"))) d = OrderedDict({"environment": OrderedDict(tuples)}) if self.phase == "test": resource_class = "large" if self.compiler.name != "cuda" else "gpu.medium" d["resource_class"] = resource_class d["<<"] = "*" + "_".join(["caffe2", self.get_platform(), self.phase, "defaults"]) return d
def gen_docker_image(self): if self.gcc_config_variant == 'gcc5.4_cxx11-abi': return miniutils.quote("pytorch/conda-cuda-cxx11-ubuntu1604:latest") docker_word_substitution = { "manywheel": "manylinux", "libtorch": "manylinux", } docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) # The cpu nightlies are built on the pytorch/manylinux-cuda100 docker image alt_docker_suffix = self.cuda_version or "100" docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix if self.cuda_version == "101": return "soumith/manylinux-cuda101@sha256:5d62be90d5b7777121180e6137c7eed73d37aaf9f669c51b783611e37e0b4916" return miniutils.quote("pytorch/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
def gen_docker_image_path(self): parms_source = self.parent_build or self base_build_env_name = "-".join(parms_source.get_parms(True)) image_path = gen_docker_image_path( base_build_env_name, DOCKER_IMAGE_TAG if self.rocm_version is None else DOCKER_IMAGE_TAG_ROCM) return miniutils.quote(image_path)
def gen_docker_image(self): lang_substitutions = { "onnx_py2": "py2", "cmake": "py2", } lang = miniutils.override(self.language, lang_substitutions) parts = [lang] + self.get_build_name_middle_parts() return miniutils.quote(DOCKER_IMAGE_PATH_BASE + "-".join(parts) + ":" + str(DOCKER_IMAGE_VERSION))
def gen_workflow_params(self, phase): parameters = OrderedDict() build_job_name_pieces = self.get_build_job_name_pieces(phase) build_env_name = "-".join(map(str, build_job_name_pieces)) parameters["build_environment"] = miniutils.quote(build_env_name) parameters["docker_image"] = self.gen_docker_image_path() if Conf.is_test_phase(phase) and self.gpu_resource: parameters["use_cuda_docker_runtime"] = miniutils.quote("1") if Conf.is_test_phase(phase): resource_class = "large" if self.gpu_resource: resource_class = "gpu." + self.gpu_resource if self.rocm_version is not None: resource_class = "pytorch/amd-gpu" parameters["resource_class"] = resource_class if phase == "build" and self.rocm_version is not None: parameters["resource_class"] = "xlarge" return parameters
def gen_docker_image(self): lang_substitutions = { "onnx_py2": "py2", "cmake": "py2", } lang = miniutils.override(self.language, lang_substitutions) parts = [lang] + self.get_build_name_middle_parts() return miniutils.quote(DOCKER_IMAGE_PATH_BASE + "-".join(parts) + ":" + str(DOCKER_IMAGE_VERSION))
def gen_schedule_tree(cron_timing): return [{ "schedule": { "cron": miniutils.quote(cron_timing), "filters": { "branches": { "only": ["master"], }, }, }, }]
def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False): """Generates a list of docker image build definitions""" ret = [] for image_name in images: if image_name.startswith('docker-'): image_name = image_name.lstrip('docker-') if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME: continue parameters = OrderedDict({ "name": quote(f"docker-{image_name}"), "image_name": quote(image_name), }) if image_name == "pytorch-linux-xenial-py3.7-gcc5.4": # pushing documentation on tags requires CircleCI to also # build all the dependencies on tags, including this docker image parameters['filters'] = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN) ret.append(OrderedDict({"docker_build_job": parameters})) return ret
def gen_schedule_tree(cron_timing): return [{ "schedule": { "cron": miniutils.quote(cron_timing), "filters": { "branches": { "only": ["master"], }, }, }, }]
def gen_docker_image(self): docker_word_substitution = { "manywheel": "manylinux", "libtorch": "manylinux", } docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) alt_docker_suffix = self.cuda_version or "80" docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
def gen_docker_image(self): docker_word_substitution = { "manywheel": "manylinux", "libtorch": "manylinux", } docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) # The cpu nightlies are built on the soumith/manylinux-cuda80 docker image alt_docker_suffix = self.cuda_version or "80" docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
def gen_build_workflows_tree(): build_workflows_functions = [ pytorch_build_definitions.get_workflow_jobs, cimodel.data.simple.macos_definitions.get_workflow_jobs, cimodel.data.simple.android_definitions.get_workflow_jobs, cimodel.data.simple.ios_definitions.get_workflow_jobs, cimodel.data.simple.mobile_definitions.get_workflow_jobs, cimodel.data.simple.ge_config_tests.get_workflow_jobs, cimodel.data.simple.bazel_definitions.get_workflow_jobs, caffe2_build_definitions.get_workflow_jobs, cimodel.data.simple.binary_smoketest.get_workflow_jobs, cimodel.data.simple.nightly_ios.get_workflow_jobs, cimodel.data.simple.nightly_android.get_workflow_jobs, windows_build_definitions.get_windows_workflows, binary_build_definitions.get_post_upload_jobs, binary_build_definitions.get_binary_smoke_test_jobs, ] binary_build_functions = [ binary_build_definitions.get_binary_build_jobs, binary_build_definitions.get_nightly_tests, binary_build_definitions.get_nightly_uploads, ] docker_builder_functions = [ cimodel.data.simple.docker_definitions.get_workflow_jobs ] return { "workflows": { "binary_builds": { "when": r"<< pipeline.parameters.run_binary_tests >>", "jobs": [f() for f in binary_build_functions], }, "docker_build": OrderedDict( { "triggers": [ { "schedule": { "cron": miniutils.quote("0 15 * * 0"), "filters": {"branches": {"only": ["master"]}}, } } ], "jobs": [f() for f in docker_builder_functions], } ), "build": {"jobs": [f() for f in build_workflows_functions]}, } }
def gen_docker_image(self): docker_word_substitution = { "manywheel": "manylinux", "libtorch": "manylinux", } docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution) # The cpu nightlies are built on the soumith/manylinux-cuda100 docker image alt_docker_suffix = self.cuda_version or "100" docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
def gen_tree(self): resource_class = "gpu.medium" if self.use_cuda_docker else "large" docker_image = DOCKER_IMAGE_CUDA_10_2 if self.use_cuda_docker else DOCKER_IMAGE_BASIC full_name = "_".join(self.get_all_parts(False)) build_env = self.build_env_override or "-".join( self.get_all_parts(True)) props_dict = { "name": full_name, "build_environment": build_env, "requires": ["setup"] + self.extra_requires, "resource_class": resource_class, "docker_image": docker_image, } if self.use_cuda_docker: props_dict["use_cuda_docker_runtime"] = miniutils.quote(str(1)) return [{"pytorch_linux_test": props_dict}]
def gen_docker_image_requires(self): parms_source = self.parent_build or self base_build_env_name = "-".join(parms_source.get_parms(True)) _, requires = gen_docker_image(base_build_env_name) return miniutils.quote(requires)
def gen_docker_image_path(self): parms_source = self.parent_build or self base_build_env_name = "-".join(parms_source.get_parms(True)) return miniutils.quote(DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(DOCKER_IMAGE_VERSION))