def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) for fc in found_configs: distro_name = fc.find_prop("distro_name") python_version = None if distro_name == "xenial": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] compiler_name = fc.find_prop("compiler_name") cuda_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) if compiler_name == "clang": parms_list.append("asan") if cuda_version in ["9.2", "10"]: # TODO The gcc version is orthogonal to CUDA version? parms_list.append("gcc7") is_xla = fc.find_prop("is_xla") or False gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, python_version, cuda_version, is_xla, None, gpu_resource, ) if cuda_version == "8": c.dependent_tests = gen_dependent_configs(c) config_list.append(c) return config_list
def gen_build_env_list(smoke): root = get_root(smoke, "N/A") config_list = conf_tree.dfs(root) newlist = [] for c in config_list: conf = Conf(c.find_prop("os_name"), c.find_prop("cu"), c.find_prop("package_format"), [c.find_prop("pyver")], c.find_prop("smoke"), c.find_prop("libtorch_variant")) newlist.append(conf) return newlist
def generate_graph(toplevel_config_node): """ Traverses the graph once first just to find the max depth """ config_list = conf_tree.dfs(toplevel_config_node) max_depth = 0 for config in config_list: max_depth = max(max_depth, config.get_depth()) # color the nodes using the max depth from pygraphviz import AGraph dot = AGraph() def node_discovery_callback(node, sibling_index, sibling_count): depth = node.get_depth() sat_min, sat_max = 0.1, 0.6 sat_range = sat_max - sat_min saturation_fraction = sibling_index / float( sibling_count - 1) if sibling_count > 1 else 1 saturation = sat_min + sat_range * saturation_fraction # TODO Use a hash of the node label to determine the color hue = depth / float(max_depth + 1) rgb_tuple = colorsys.hsv_to_rgb(hue, saturation, 1) this_node_key = node.get_node_key() dot.add_node( this_node_key, label=node.get_label(), style="filled", # fillcolor=hex_color + ":orange", fillcolor=rgb2hex(rgb_tuple), penwidth=3, color=rgb2hex(colorsys.hsv_to_rgb(hue, saturation, 0.9))) def child_callback(node, child): this_node_key = node.get_node_key() child_node_key = child.get_node_key() dot.add_edge((this_node_key, child_node_key)) conf_tree.dfs_recurse(toplevel_config_node, lambda x: None, node_discovery_callback, child_callback) return dot
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) for fc in found_configs: c = Conf( language=fc.find_prop("language_version"), distro=fc.find_prop("distro_version"), compilers=fc.find_prop("compiler_version"), build_only=fc.find_prop("build_only"), is_important=fc.find_prop("important"), ) config_list.append(c) return config_list
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) for fc in found_configs: c = Conf( fc.find_prop("language_version"), fc.find_prop("distro_version"), fc.find_prop("compiler_version"), fc.find_prop("phase_name"), fc.find_prop("build_only"), ) config_list.append(c) return config_list
def gen_build_env_list(smoke): root = get_root(smoke, "N/A") config_list = conf_tree.dfs(root) newlist = [] for c in config_list: conf = Conf( c.find_prop("os_name"), c.find_prop("cu"), c.find_prop("package_format"), [c.find_prop("pyver")], c.find_prop("smoke"), c.find_prop("libtorch_variant"), c.find_prop("devtoolset_version"), ) newlist.append(conf) return newlist
def gen_build_env_list(smoke): root = get_root(smoke, "N/A") config_list = conf_tree.dfs(root) newlist = [] for c in config_list: conf = Conf( c.find_prop("os_name"), c.find_prop("gpu"), c.find_prop("package_format"), [c.find_prop("pyver")], c.find_prop("smoke") and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64 c.find_prop("libtorch_variant"), c.find_prop("gcc_config_variant"), c.find_prop("libtorch_config_variant"), ) newlist.append(conf) return newlist
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) for fc in found_configs: restrict_phases = None distro_name = fc.find_prop("distro_name") compiler_name = fc.find_prop("compiler_name") compiler_version = fc.find_prop("compiler_version") is_xla = fc.find_prop("is_xla") or False is_asan = fc.find_prop("is_asan") or False is_onnx = fc.find_prop("is_onnx") or False is_pure_torch = fc.find_prop("is_pure_torch") or False is_vulkan = fc.find_prop("is_vulkan") or False parms_list_ignored_for_docker_image = [] python_version = None if compiler_name == "cuda" or compiler_name == "android": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] cuda_version = None rocm_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name == "rocm": rocm_version = fc.find_prop("compiler_version") restrict_phases = ["build", "test1", "test2", "caffe2_test"] elif compiler_name == "android": android_ndk_version = fc.find_prop("compiler_version") # TODO: do we need clang to compile host binaries like protoc? parms_list.append("clang5") parms_list.append("android-ndk-" + android_ndk_version) android_abi = fc.find_prop("android_abi") parms_list_ignored_for_docker_image.append(android_abi) restrict_phases = ["build"] elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) if is_asan: parms_list.append("asan") python_version = fc.find_prop("pyver") parms_list[0] = fc.find_prop("abbreviated_pyver") if is_onnx: parms_list.append("onnx") python_version = fc.find_prop("pyver") parms_list[0] = fc.find_prop("abbreviated_pyver") restrict_phases = ["build", "ort_test1", "ort_test2"] if cuda_version: cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7" parms_list.append(cuda_gcc_version) is_libtorch = fc.find_prop("is_libtorch") or False is_important = fc.find_prop("is_important") or False parallel_backend = fc.find_prop("parallel_backend") or None build_only = fc.find_prop("build_only") or False is_coverage = fc.find_prop("is_coverage") or False shard_test = fc.find_prop("shard_test") or False # TODO: fix pure_torch python test packaging issue. if shard_test: restrict_phases = ["build"] if restrict_phases is None else restrict_phases restrict_phases.extend(["test1", "test2"]) if build_only or is_pure_torch: restrict_phases = ["build"] if is_coverage and restrict_phases is None: restrict_phases = ["build", "coverage_test"] gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, parms_list_ignored_for_docker_image, python_version, cuda_version, rocm_version, is_xla, is_vulkan, is_pure_torch, restrict_phases, gpu_resource, is_libtorch=is_libtorch, is_important=is_important, parallel_backend=parallel_backend, ) # run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds # should run on a CPU-only build that runs on all PRs. # XXX should this be updated to a more modern build? Projects are # beginning to drop python3.6 if ( distro_name == "xenial" and fc.find_prop("pyver") == "3.6" and cuda_version is None and parallel_backend is None and not is_vulkan and not is_pure_torch and compiler_name == "gcc" and fc.find_prop("compiler_version") == "5.4" ): c.filters = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN) c.dependent_tests = gen_docs_configs(c) if cuda_version == "10.2" and python_version == "3.6" and not is_libtorch: c.dependent_tests = gen_dependent_configs(c) if ( compiler_name == "gcc" and compiler_version == "5.4" and not is_libtorch and not is_vulkan and not is_pure_torch and parallel_backend is None ): bc_breaking_check = Conf( "backward-compatibility-check", [], is_xla=False, restrict_phases=["test"], is_libtorch=False, is_important=True, parent_build=c, ) c.dependent_tests.append(bc_breaking_check) config_list.append(c) return config_list
def gen_tree(): root = get_root() configs_list = conf_tree.dfs(root) return configs_list
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) restrict_phases = None for fc in found_configs: distro_name = fc.find_prop("distro_name") python_version = None if distro_name == "xenial": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] compiler_name = fc.find_prop("compiler_name") cuda_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name == "android": android_ndk_version = fc.find_prop("compiler_version") # TODO: do we need clang to compile host binaries like protoc? parms_list.append("clang5") parms_list.append("android-ndk-" + android_ndk_version) restrict_phases = ["build"] elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) if compiler_name == "clang": parms_list.append("asan") if cuda_version in ["9.2", "10"]: # TODO The gcc version is orthogonal to CUDA version? parms_list.append("gcc7") is_xla = fc.find_prop("is_xla") or False is_namedtensor = fc.find_prop("is_namedtensor") or False gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, python_version, cuda_version, is_xla, restrict_phases, gpu_resource, is_namedtensor=is_namedtensor, ) if cuda_version == "9" and python_version == "3.6": c.dependent_tests = gen_dependent_configs(c) config_list.append(c) return config_list
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) for fc in found_configs: restrict_phases = None distro_name = fc.find_prop("distro_name") compiler_name = fc.find_prop("compiler_name") compiler_version = fc.find_prop("compiler_version") is_xla = fc.find_prop("is_xla") or False parms_list_ignored_for_docker_image = [] vulkan = fc.find_prop("vulkan") or False if vulkan: parms_list_ignored_for_docker_image.append("vulkan") python_version = None if compiler_name == "cuda" or compiler_name == "android": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] cuda_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name == "android": android_ndk_version = fc.find_prop("compiler_version") # TODO: do we need clang to compile host binaries like protoc? parms_list.append("clang5") parms_list.append("android-ndk-" + android_ndk_version) android_abi = fc.find_prop("android_abi") parms_list_ignored_for_docker_image.append(android_abi) restrict_phases = ["build"] elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) # TODO: This is a nasty special case if gcc_version == 'clang5' and not is_xla: parms_list.append("asan") python_version = fc.find_prop("pyver") parms_list[0] = fc.find_prop("abbreviated_pyver") if cuda_version: cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7" parms_list.append(cuda_gcc_version) is_libtorch = fc.find_prop("is_libtorch") or False is_important = fc.find_prop("is_important") or False parallel_backend = fc.find_prop("parallel_backend") or None build_only = fc.find_prop("build_only") or False if build_only and restrict_phases is None: restrict_phases = ["build"] gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, parms_list_ignored_for_docker_image, python_version, cuda_version, is_xla, vulkan, restrict_phases, gpu_resource, is_libtorch=is_libtorch, is_important=is_important, parallel_backend=parallel_backend, ) # run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds # should run on a CPU-only build that runs on all PRs. if distro_name == 'xenial' and fc.find_prop("pyver") == '3.6' \ and cuda_version is None \ and parallel_backend is None \ and compiler_name == 'gcc' \ and fc.find_prop('compiler_version') == '5.4': c.dependent_tests = gen_docs_configs(c) if cuda_version == "10.1" and python_version == "3.6" and not is_libtorch: c.dependent_tests = gen_dependent_configs(c) if (compiler_name == "gcc" and compiler_version == "5.4" and not is_libtorch and parallel_backend is None): bc_breaking_check = Conf( "backward-compatibility-check", [], is_xla=False, restrict_phases=["test"], is_libtorch=False, is_important=True, parent_build=c, ) c.dependent_tests.append(bc_breaking_check) config_list.append(c) return config_list
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) restrict_phases = None for fc in found_configs: distro_name = fc.find_prop("distro_name") compiler_name = fc.find_prop("compiler_name") compiler_version = fc.find_prop("compiler_version") is_xla = fc.find_prop("is_xla") or False parms_list_ignored_for_docker_image = [] python_version = None if compiler_name == "cuda" or compiler_name == "android": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] cuda_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name == "android": android_ndk_version = fc.find_prop("compiler_version") # TODO: do we need clang to compile host binaries like protoc? parms_list.append("clang5") parms_list.append("android-ndk-" + android_ndk_version) android_abi = fc.find_prop("android_abi") parms_list_ignored_for_docker_image.append(android_abi) restrict_phases = ["build"] elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) # TODO: This is a nasty special case if compiler_name == "clang" and not is_xla: parms_list.append("asan") python_version = fc.find_prop("pyver") parms_list[0] = fc.find_prop("abbreviated_pyver") if cuda_version in ["9.2", "10", "10.1"]: # TODO The gcc version is orthogonal to CUDA version? parms_list.append("gcc7") is_libtorch = fc.find_prop("is_libtorch") or False is_important = fc.find_prop("is_important") or False parallel_backend = fc.find_prop("parallel_backend") or None gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, parms_list_ignored_for_docker_image, python_version, cuda_version, is_xla, restrict_phases, gpu_resource, is_libtorch=is_libtorch, is_important=is_important, parallel_backend=parallel_backend, ) if cuda_version == "9" and python_version == "3.6" and not is_libtorch: c.dependent_tests = gen_dependent_configs(c) if (compiler_name == "gcc" and compiler_version == "5.4" and not is_libtorch and parallel_backend is None): bc_breaking_check = Conf( "backward-compatibility-check", [], is_xla=False, restrict_phases=["test"], is_libtorch=False, is_important=True, parent_build=c, ) c.dependent_tests.append(bc_breaking_check) config_list.append(c) return config_list
def instantiate_configs(): config_list = [] root = get_root() found_configs = conf_tree.dfs(root) restrict_phases = None for fc in found_configs: distro_name = fc.find_prop("distro_name") python_version = None if distro_name == "xenial": python_version = fc.find_prop("pyver") parms_list = [fc.find_prop("abbreviated_pyver")] else: parms_list = ["py" + fc.find_prop("pyver")] compiler_name = fc.find_prop("compiler_name") cuda_version = None if compiler_name == "cuda": cuda_version = fc.find_prop("compiler_version") elif compiler_name == "android": android_ndk_version = fc.find_prop("compiler_version") # TODO: do we need clang to compile host binaries like protoc? parms_list.append("clang5") parms_list.append("android-ndk-" + android_ndk_version) restrict_phases = ["build"] elif compiler_name: gcc_version = compiler_name + (fc.find_prop("compiler_version") or "") parms_list.append(gcc_version) if compiler_name == "clang": parms_list.append("asan") if cuda_version in ["9.2", "10"]: # TODO The gcc version is orthogonal to CUDA version? parms_list.append("gcc7") is_xla = fc.find_prop("is_xla") or False is_namedtensor = fc.find_prop("is_namedtensor") or False is_important = fc.find_prop("is_important") or False gpu_resource = None if cuda_version and cuda_version != "10": gpu_resource = "medium" c = Conf( distro_name, parms_list, python_version, cuda_version, is_xla, restrict_phases, gpu_resource, is_namedtensor=is_namedtensor, is_important=is_important, ) if cuda_version == "9" and python_version == "3.6": c.dependent_tests = gen_dependent_configs(c) config_list.append(c) return config_list