def gen_docs_configs(xenial_parent_config): configs = [] configs.append( HiddenConf( "pytorch_python_doc_build", parent_build=xenial_parent_config, filters=gen_filter_dict( branches_list=["master", "main", "nightly"], tags_list=RC_PATTERN), )) configs.append( DocPushConf( "pytorch_python_doc_push", parent_build="pytorch_python_doc_build", branch="site", )) configs.append( HiddenConf( "pytorch_cpp_doc_build", parent_build=xenial_parent_config, filters=gen_filter_dict( branches_list=["master", "main", "nightly"], tags_list=RC_PATTERN), )) configs.append( DocPushConf( "pytorch_cpp_doc_push", parent_build="pytorch_cpp_doc_build", branch="master", )) return configs
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False): job_def = OrderedDict() job_def["name"] = self.gen_build_name(phase, nightly) job_def["build_environment"] = miniutils.quote(" ".join( self.gen_build_env_parms())) if self.smoke: job_def["requires"] = [ "update_s3_htmls", ] job_def["filters"] = branch_filters.gen_filter_dict( branches_list=["nightly"], tags_list=[branch_filters.RC_PATTERN], ) else: if phase in ["upload"]: filter_branch = "nightly" else: filter_branch = r"/.*/" job_def["filters"] = branch_filters.gen_filter_dict( branches_list=[filter_branch], tags_list=[branch_filters.RC_PATTERN], ) if self.libtorch_variant: job_def["libtorch_variant"] = miniutils.quote( self.libtorch_variant) if phase == "test": if not self.smoke: job_def["requires"] = [self.gen_build_name("build", nightly)] if not (self.smoke and self.os == "macos") and self.os != "windows": job_def["docker_image"] = self.gen_docker_image() if self.os != "windows" and self.cuda_version: job_def["use_cuda_docker_runtime"] = miniutils.quote("1") else: if self.os == "linux" and phase != "upload": job_def["docker_image"] = self.gen_docker_image() if phase == "test": if self.cuda_version: if self.os == "windows": job_def["executor"] = "windows-with-nvidia-gpu" else: job_def["resource_class"] = "gpu.medium" if phase == "upload": job_def["context"] = "org-member" job_def["requires"] = [ self.gen_build_name(upload_phase_dependency, nightly) ] os_name = miniutils.override(self.os, {"macos": "mac"}) job_name = "_".join([self.get_name_prefix(), os_name, phase]) return {job_name: job_def}
def gen_tree(self): props_dict = { "name": self.job_name, "requires": self.dependencies, } if self.is_master_only: props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST) elif self.is_pr_only: props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST) return [{self.template_name: props_dict}]
def get_post_upload_jobs(): """Generate jobs to update HTML indices and report binary sizes""" configs = gen_build_env_list(False) common_job_def = { "context": "org-member", "filters": branch_filters.gen_filter_dict( branches_list=["nightly"], tags_list=[branch_filters.RC_PATTERN], ), "requires": [], } for conf in configs: upload_job_name = conf.gen_build_name( build_or_test="upload", nightly=True ) common_job_def["requires"].append(upload_job_name) return [ { "update_s3_htmls": { "name": "update_s3_htmls", **common_job_def, }, }, { "upload_binary_sizes": { "name": "upload_binary_sizes", **common_job_def, }, } ]
def gen_tree(self): base_name_parts = [ "pytorch", "linux", "xenial", "py3", "clang5", "android", "ndk", "r19c", ] + self.variant + [ "build", ] full_job_name = "_".join(base_name_parts) build_env_name = "-".join(base_name_parts) props_dict = { "name": full_job_name, "build_environment": "\"{}\"".format(build_env_name), "docker_image": "\"{}\"".format(DOCKER_IMAGE_NDK), "requires": [DOCKER_REQUIREMENT_NDK] } if self.is_master_only: props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST) return [{self.template_name: props_dict}]
def gen_workflow_job(self, phase): return { "pytorch_doc_push": { "name": self.name, "requires": [self.parent_build], "context": "org-member", "filters": gen_filter_dict(branches_list=["nightly"]) } }
def gen_workflow_job(channel: str): return OrderedDict({ "anaconda_prune": OrderedDict({ "name": f"anaconda-prune-{channel}", "context": quote("org-member"), "packages": quote(PACKAGES_TO_PRUNE), "channel": channel, "filters": gen_filter_dict(branches_list=["postnightly"]), }) })
def get_post_upload_jobs(): return [ { "update_s3_htmls": { "name": "update_s3_htmls", "context": "org-member", "filters": branch_filters.gen_filter_dict( branches_list=["postnightly"], ), }, }, ]
def gen_workflow_job(self, phase): job_def = OrderedDict() job_def["name"] = self.construct_phase_name(phase) if phase == "test": job_def["requires"] = [self.construct_phase_name("build")] job_name = "caffe2_" + self.get_platform() + "_test" else: job_name = "caffe2_" + self.get_platform() + "_build" if not self.is_important: job_def["filters"] = gen_filter_dict() job_def.update(self.gen_workflow_params(phase)) return {job_name: job_def}
def get_workflow_jobs(): """Generates a list of docker image build definitions""" ret = [] for image_name in IMAGE_NAMES: parameters = OrderedDict({ "name": quote(f"docker-{image_name}"), "image_name": quote(image_name), }) if image_name == "pytorch-linux-xenial-py3.6-gcc5.4": # pushing documentation on tags requires CircleCI to also # build all the dependencies on tags, including this docker image parameters['filters'] = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN) ret.append(OrderedDict({"docker_build_job": parameters})) return ret
def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False): """Generates a list of docker image build definitions""" ret = [] for image_name in images: if image_name.startswith('docker-'): image_name = image_name.lstrip('docker-') if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME: continue parameters = OrderedDict({ "name": quote(f"docker-{image_name}"), "image_name": quote(image_name), }) if image_name == "pytorch-linux-xenial-py3.7-gcc5.4": # pushing documentation on tags requires CircleCI to also # build all the dependencies on tags, including this docker image parameters['filters'] = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN) ret.append(OrderedDict({"docker_build_job": parameters})) return ret
def gen_workflow_job(self, phase): job_def = OrderedDict() job_def["name"] = self.gen_build_name(phase) if phase == "test": # TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a # caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated # build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed # pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641) dependency_build = self.parent_build or self job_def["requires"] = [dependency_build.gen_build_name("build")] job_name = "pytorch_linux_test" else: job_name = "pytorch_linux_build" if not self.is_important: job_def["filters"] = gen_filter_dict() job_def.update(self.gen_workflow_params(phase)) return {job_name : job_def}
def gen_upload_job(self, phase, requires_dependency): """Generate binary_upload job for configuration Output looks similar to: - binary_upload: name: binary_linux_manywheel_3_7m_cu92_devtoolset7_nightly_upload context: org-member requires: binary_linux_manywheel_3_7m_cu92_devtoolset7_nightly_test filters: branches: only: - nightly tags: only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/ package_type: manywheel upload_subfolder: cu92 """ return { "binary_upload": OrderedDict({ "name": self.gen_build_name(phase, nightly=True), "context": "org-member", "requires": [self.gen_build_name(requires_dependency, nightly=True)], "filters": branch_filters.gen_filter_dict( branches_list=["nightly"], tags_list=[branch_filters.RC_PATTERN], ), "package_type": self.pydistro, "upload_subfolder": binary_build_data.get_processor_arch_name(self.gpu_version, ), }) }
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) for fc in found_configs: restrict_phases = None distro_name = fc.find_prop("distro_name") compiler_name = fc.find_prop("compiler_name") compiler_version = fc.find_prop("compiler_version") is_xla = fc.find_prop("is_xla") or False is_asan = fc.find_prop("is_asan") or False is_onnx = fc.find_prop("is_onnx") or False is_pure_torch = fc.find_prop("is_pure_torch") or False is_vulkan = fc.find_prop("is_vulkan") or False parms_list_ignored_for_docker_image = [] python_version = None if compiler_name == "cuda" or compiler_name == "android": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] cuda_version = None rocm_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name == "rocm": rocm_version = fc.find_prop("compiler_version") restrict_phases = ["build", "test1", "test2", "caffe2_test"] elif compiler_name == "android": android_ndk_version = fc.find_prop("compiler_version") # TODO: do we need clang to compile host binaries like protoc? parms_list.append("clang5") parms_list.append("android-ndk-" + android_ndk_version) android_abi = fc.find_prop("android_abi") parms_list_ignored_for_docker_image.append(android_abi) restrict_phases = ["build"] elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) if is_asan: parms_list.append("asan") python_version = fc.find_prop("pyver") parms_list[0] = fc.find_prop("abbreviated_pyver") if is_onnx: parms_list.append("onnx") python_version = fc.find_prop("pyver") parms_list[0] = fc.find_prop("abbreviated_pyver") restrict_phases = ["build", "ort_test1", "ort_test2"] if cuda_version: cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7" parms_list.append(cuda_gcc_version) is_libtorch = fc.find_prop("is_libtorch") or False is_important = fc.find_prop("is_important") or False parallel_backend = fc.find_prop("parallel_backend") or None build_only = fc.find_prop("build_only") or False is_coverage = fc.find_prop("is_coverage") or False shard_test = fc.find_prop("shard_test") or False # TODO: fix pure_torch python test packaging issue. if shard_test: restrict_phases = ["build"] if restrict_phases is None else restrict_phases restrict_phases.extend(["test1", "test2"]) if build_only or is_pure_torch: restrict_phases = ["build"] if is_coverage and restrict_phases is None: restrict_phases = ["build", "coverage_test"] gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, parms_list_ignored_for_docker_image, python_version, cuda_version, rocm_version, is_xla, is_vulkan, is_pure_torch, restrict_phases, gpu_resource, is_libtorch=is_libtorch, is_important=is_important, parallel_backend=parallel_backend, ) # run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds # should run on a CPU-only build that runs on all PRs. # XXX should this be updated to a more modern build? Projects are # beginning to drop python3.6 if ( distro_name == "xenial" and fc.find_prop("pyver") == "3.6" and cuda_version is None and parallel_backend is None and not is_vulkan and not is_pure_torch and compiler_name == "gcc" and fc.find_prop("compiler_version") == "5.4" ): c.filters = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN) c.dependent_tests = gen_docs_configs(c) if cuda_version == "10.2" and python_version == "3.6" and not is_libtorch: c.dependent_tests = gen_dependent_configs(c) if ( compiler_name == "gcc" and compiler_version == "5.4" and not is_libtorch and not is_vulkan and not is_pure_torch and parallel_backend is None ): bc_breaking_check = Conf( "backward-compatibility-check", [], is_xla=False, restrict_phases=["test"], is_libtorch=False, is_important=True, parent_build=c, ) c.dependent_tests.append(bc_breaking_check) config_list.append(c) return config_list
def gen_tree(self): base_phase = "build" if self.test_index is None else "test" numbered_phase = ( base_phase if self.test_index is None else base_phase + str(self.test_index) ) key_parts = ["pytorch", "windows", base_phase] if self.multi_gpu: key_parts.append('multigpu') key_name = "_".join(key_parts) cpu_forcing_name_parts = ["on", "cpu"] if self.force_on_cpu else [] target_arch = self.cuda_version.render_dots() if self.cuda_version else "cpu" python_version = "3.8" base_name_parts = [ "pytorch", "windows", self.vscode_spec.render(), "py" + python_version.replace(".", ""), target_arch, ] prerequisite_jobs = [] if base_phase == "test": prerequisite_jobs.append("_".join(base_name_parts + ["build"])) if self.cuda_version: self.cudnn_version = 8 if self.cuda_version.major == 11 else 7 arch_env_elements = ( ["cuda" + str(self.cuda_version.major), "cudnn" + str(self.cudnn_version)] if self.cuda_version else ["cpu"] ) build_environment_string = "-".join( ["pytorch", "win"] + self.vscode_spec.get_elements() + arch_env_elements + ["py" + python_version.split(".")[0]] ) is_running_on_cuda = bool(self.cuda_version) and not self.force_on_cpu if self.multi_gpu: props_dict = {"requires": prerequisite_jobs} else: props_dict = { "build_environment": build_environment_string, "python_version": miniutils.quote(python_version), "vc_version": miniutils.quote(self.vscode_spec.dotted_version()), "vc_year": miniutils.quote(str(self.vscode_spec.year)), "vc_product": self.vscode_spec.get_product(), "use_cuda": miniutils.quote(str(int(is_running_on_cuda))), "requires": prerequisite_jobs, } if self.master_only: props_dict[ "filters" ] = gen_filter_dict() elif self.nightly_only: props_dict[ "filters" ] = gen_filter_dict(branches_list=["nightly"], tags_list=RC_PATTERN) elif self.master_and_nightly: props_dict[ "filters" ] = gen_filter_dict(branches_list=NON_PR_BRANCH_LIST + ["nightly"], tags_list=RC_PATTERN) name_parts = base_name_parts + cpu_forcing_name_parts + [numbered_phase] if not self.multi_gpu: if base_phase == "test": test_name = "-".join(["pytorch", "windows", numbered_phase]) props_dict["test_name"] = test_name if is_running_on_cuda: props_dict["executor"] = "windows-with-nvidia-gpu" props_dict["cuda_version"] = ( miniutils.quote(str(self.cuda_version)) if self.cuda_version else "cpu" ) props_dict["name"] = "_".join(name_parts) return [{key_name: props_dict}]