def copy_dasm_files(spmi_location, upload_directory, tag_name): """Copies .dasm files to a tempDirectory, zip it, and copy the compressed file to the upload directory. Args: spmi_location (string): Location where .dasm files are present upload_directory (string): Upload directory tag_name (string): tag_name used in zip file name. """ print("Copy .dasm files") # Create upload_directory if not os.path.isdir(upload_directory): os.makedirs(upload_directory) dasm_file_present = False # Create temp directory to copy all issues to upload. We don't want to create a sub-folder # inside issues_directory because that will also get included twice. with TempDir() as prep_directory: for file_path, dirs, files in walk(spmi_location, topdown=True): # Credit: https://stackoverflow.com/a/19859907 dirs[:] = [d for d in dirs] for name in files: if not name.lower().endswith(".dasm"): continue dasm_src_file = path.join(file_path, name) dasm_dst_file = dasm_src_file.replace(spmi_location, prep_directory) dst_directory = path.dirname(dasm_dst_file) if not os.path.exists(dst_directory): os.makedirs(dst_directory) try: shutil.copy2(dasm_src_file, dasm_dst_file) dasm_file_present = True except PermissionError as pe_error: print('Ignoring PermissionError: {0}'.format(pe_error)) # If there are no diffs, create an zip file with single file in it. # Otherwise, Azdo considers it as failed job. # See https://github.com/dotnet/arcade/issues/8200 if not dasm_file_present: no_diff = os.path.join(prep_directory, "nodiff.txt") with open(no_diff, "w") as no_diff_file: no_diff_file.write("No diffs found!") # Zip compress the files we will upload zip_path = os.path.join(prep_directory, "Asmdiffs_" + tag_name) print("Creating archive: " + zip_path) shutil.make_archive(zip_path, 'zip', prep_directory) zip_path += ".zip" dst_zip_path = os.path.join(upload_directory, "Asmdiffs_" + tag_name + ".zip") print("Copying {} to {}".format(zip_path, dst_zip_path)) try: shutil.copy2(zip_path, dst_zip_path) except PermissionError as pe_error: print('Ignoring PermissionError: {0}'.format(pe_error))
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) antigen_directory = coreclr_args.antigen_directory core_root = coreclr_args.core_root tag_name = "{}-{}".format(coreclr_args.run_configuration, coreclr_args.partition) output_directory = coreclr_args.output_directory run_duration = coreclr_args.run_duration if not run_duration: run_duration = 60 path_to_corerun = os.path.join(core_root, "corerun") path_to_tool = os.path.join(antigen_directory, "Antigen") if is_windows: path_to_corerun += ".exe" path_to_tool += ".exe" if not is_windows: # Disable core dumps. The fuzzers have their own graceful handling for # runtime crashes. Especially on macOS we can quickly fill up the drive # with dumps if we find lots of crashes since dumps there are very big. import resource resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) try: # Run tool such that issues are placed in a temp folder with TempDir() as temp_location: antigen_log = path.join(temp_location, get_antigen_filename(tag_name)) run_command([ path_to_tool, "-c", path_to_corerun, "-o", temp_location, "-d", str(run_duration) ], _exit_on_fail=True, _output_file=antigen_log) # Copy issues for upload print("Copying issues to " + output_directory) copy_issues(temp_location, output_directory, tag_name) except PermissionError as pe: print("Got error: %s", pe)
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) antigen_directory = coreclr_args.antigen_directory core_root = coreclr_args.core_root tag_name = "{}-{}".format(coreclr_args.run_configuration, coreclr_args.partition) output_directory = coreclr_args.output_directory run_duration = coreclr_args.run_duration if not run_duration: run_duration = 60 path_to_corerun = os.path.join(core_root, "corerun") path_to_tool = os.path.join(antigen_directory, "Antigen") if is_windows: path_to_corerun += ".exe" path_to_tool += ".exe" try: # Run tool such that issues are placed in a temp folder with TempDir() as temp_location: antigen_log = path.join(temp_location, get_antigen_filename(tag_name)) run_command([ path_to_tool, "-c", path_to_corerun, "-o", temp_location, "-d", str(run_duration) ], _exit_on_fail=True, _output_file=antigen_log) # Copy issues for upload print("Copying issues to " + output_directory) copy_issues(temp_location, output_directory, tag_name) except PermissionError as pe: print("Got error: %s", pe)
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) fuzzlyn_directory = coreclr_args.fuzzlyn_directory core_root = coreclr_args.core_root tag_name = "{}-{}".format(coreclr_args.run_configuration, coreclr_args.partition) output_directory = coreclr_args.output_directory if not coreclr_args.run_duration: run_duration = 60 * 60 # 60 minutes by default else: run_duration = int( coreclr_args.run_duration) * 60 # Run for duration in seconds path_to_corerun = os.path.join(core_root, "corerun") path_to_tool = os.path.join(fuzzlyn_directory, "Fuzzlyn") if is_windows: path_to_corerun += ".exe" path_to_tool += ".exe" os.makedirs(output_directory, exist_ok=True) if not is_windows: # Disable core dumps. The fuzzers have their own graceful handling for # runtime crashes. Especially on macOS we can quickly fill up the drive # with dumps if we find lots of crashes since dumps there are very big. import resource resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) with TempDir() as temp_location: summary_file_name = "issues-summary-{}.txt".format(tag_name) summary_file_path = path.join(temp_location, summary_file_name) with open(summary_file_path, 'w'): pass upload_fuzzer_output_path = path.join( output_directory, "Fuzzlyn-{}.log".format(tag_name)) with open(summary_file_path, 'r') as fp: exit_evt = threading.Event() reduce_examples = ReduceExamples(fp, temp_location, path_to_tool, path_to_corerun, exit_evt) reduce_examples.start() run_command([ path_to_tool, "--seconds-to-run", str(run_duration), "--output-events-to", summary_file_path, "--host", path_to_corerun, "--parallelism", "-1", "--known-errors", "dotnet/runtime" ], _exit_on_fail=True, _output_file=upload_fuzzer_output_path) exit_evt.set() reduce_examples.join() upload_summary_file_path = path.join(output_directory, summary_file_name) print("Copying summary: {} -> {}".format(summary_file_path, upload_summary_file_path)) shutil.copy2(summary_file_path, upload_summary_file_path) upload_issues_zip_path = path.join(output_directory, "AllIssues-{}".format(tag_name)) print("Creating zip {}.zip".format(upload_issues_zip_path)) shutil.make_archive(upload_issues_zip_path, 'zip', temp_location)
def main(main_args): """ Prepare the Helix data for SuperPMI asmdiffs Azure DevOps pipeline. The Helix correlation payload directory is created and populated as follows: <source_directory>\payload -- the correlation payload directory -- contains the *.py scripts from <source_directory>\src\coreclr\scripts -- contains superpmi.exe, mcs.exe from the target-specific build <source_directory>\payload\base -- contains the baseline JITs <source_directory>\payload\diff -- contains the diff JITs <source_directory>\payload\jit-analyze -- contains the self-contained jit-analyze build (from dotnet/jitutils) <source_directory>\payload\git -- contains a Portable ("xcopy installable") `git` tool, downloaded from: https://netcorenativeassets.blob.core.windows.net/resource-packages/external/windows/git/Git-2.32.0-64-bit.zip This is needed by jit-analyze to do `git diff` on the generated asm. The `<source_directory>\payload\git\cmd` directory is added to the PATH. NOTE: this only runs on Windows. Then, AzDO pipeline variables are set. Args: main_args ([type]): Arguments to the script Returns: 0 on success, otherwise a failure code """ # Set up logging. logger = logging.getLogger() logger.setLevel(logging.INFO) stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setLevel(logging.INFO) logger.addHandler(stream_handler) coreclr_args = setup_args(main_args) arch = coreclr_args.arch source_directory = coreclr_args.source_directory product_directory = coreclr_args.product_directory python_path = sys.executable # CorrelationPayload directories correlation_payload_directory = os.path.join(source_directory, "payload") superpmi_scripts_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts') base_jit_directory = os.path.join(correlation_payload_directory, "base") diff_jit_directory = os.path.join(correlation_payload_directory, "diff") jit_analyze_build_directory = os.path.join(correlation_payload_directory, "jit-analyze") git_directory = os.path.join(correlation_payload_directory, "git") ######## Get the portable `git` package git_url = "https://netcorenativeassets.blob.core.windows.net/resource-packages/external/windows/git/Git-2.32.0-64-bit.zip" print('Downloading {} -> {}'.format(git_url, git_directory)) urls = [ git_url ] # There are too many files to be verbose in the download and copy. download_files(urls, git_directory, verbose=False, display_progress=False) git_exe_tool = os.path.join(git_directory, "cmd", "git.exe") if not os.path.isfile(git_exe_tool): print('Error: `git` not found at {}'.format(git_exe_tool)) return 1 ######## Get SuperPMI python scripts # Copy *.py to CorrelationPayload print('Copying {} -> {}'.format(superpmi_scripts_directory, correlation_payload_directory)) copy_directory(superpmi_scripts_directory, correlation_payload_directory, verbose_copy=True, match_func=lambda path: any(path.endswith(extension) for extension in [".py"])) ######## Get baseline JIT # Figure out which baseline JIT to use, and download it. if not os.path.exists(base_jit_directory): os.makedirs(base_jit_directory) print("Fetching history of `main` branch so we can find the baseline JIT") run_command(["git", "fetch", "--depth=500", "origin", "main"], source_directory, _exit_on_fail=True) # Note: we only support downloading Windows versions of the JIT currently. To support downloading # non-Windows JITs on a Windows machine, pass `-host_os <os>` to jitrollingbuild.py. print("Running jitrollingbuild.py download to get baseline JIT") jit_rolling_build_script = os.path.join(superpmi_scripts_directory, "jitrollingbuild.py") _, _, return_code = run_command([ python_path, jit_rolling_build_script, "download", "-arch", arch, "-target_dir", base_jit_directory], source_directory) if return_code != 0: print('{} failed with {}'.format(jit_rolling_build_script, return_code)) return return_code ######## Get diff JIT print('Copying diff binaries {} -> {}'.format(product_directory, diff_jit_directory)) copy_directory(product_directory, diff_jit_directory, verbose_copy=True, match_func=match_jit_files) ######## Get SuperPMI tools # Put the SuperPMI tools directly in the root of the correlation payload directory. print('Copying SuperPMI tools {} -> {}'.format(product_directory, correlation_payload_directory)) copy_directory(product_directory, correlation_payload_directory, verbose_copy=True, match_func=match_superpmi_tool_files) ######## Clone and build jitutils: we only need jit-analyze try: with TempDir() as jitutils_directory: run_command( ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory]) # Make sure ".dotnet" directory exists, by running the script at least once dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh" dotnet_script_path = os.path.join(source_directory, dotnet_script_name) run_command([dotnet_script_path, "--info"], jitutils_directory) # Build jit-analyze only, and build it as a self-contained app (not framework-dependent). # What target RID are we building? It depends on where we're going to run this code. # The RID catalog is here: https://docs.microsoft.com/en-us/dotnet/core/rid-catalog. # Windows x64 => win-x64 # Windows x86 => win-x86 # Windows arm32 => win-arm # Windows arm64 => win-arm64 # Linux x64 => linux-x64 # Linux arm32 => linux-arm # Linux arm64 => linux-arm64 # macOS x64 => osx-x64 # NOTE: we currently only support running on Windows x86/x64 (we don't pass the target OS) RID = None if arch == "x86": RID = "win-x86" if arch == "x64": RID = "win-x64" # Set dotnet path to run build os.environ["PATH"] = os.path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"] run_command([ "dotnet", "publish", "-c", "Release", "--runtime", RID, "--self-contained", "--output", jit_analyze_build_directory, os.path.join(jitutils_directory, "src", "jit-analyze", "jit-analyze.csproj")], jitutils_directory) except PermissionError as pe_error: # Details: https://bugs.python.org/issue26660 print('Ignoring PermissionError: {0}'.format(pe_error)) jit_analyze_tool = os.path.join(jit_analyze_build_directory, "jit-analyze.exe") if not os.path.isfile(jit_analyze_tool): print('Error: {} not found'.format(jit_analyze_tool)) return 1 ######## Set pipeline variables helix_source_prefix = "official" creator = "" print('Setting pipeline variables:') set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("Architecture", arch) set_pipeline_variable("Creator", creator) set_pipeline_variable("HelixSourcePrefix", helix_source_prefix) return 0
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) fuzzlyn_directory = coreclr_args.fuzzlyn_directory core_root = coreclr_args.core_root tag_name = "{}-{}".format(coreclr_args.run_configuration, coreclr_args.partition) output_directory = coreclr_args.output_directory if not coreclr_args.run_duration: run_duration = 60 * 60 # 60 minutes by default else: run_duration = int( coreclr_args.run_duration) * 60 # Run for duration in seconds path_to_corerun = os.path.join(core_root, "corerun") path_to_tool = os.path.join(fuzzlyn_directory, "Fuzzlyn") if is_windows: path_to_corerun += ".exe" path_to_tool += ".exe" os.makedirs(output_directory, exist_ok=True) with TempDir() as temp_location: summary_file_name = "issues-summary-{}.txt".format(tag_name) summary_file_path = path.join(temp_location, summary_file_name) with open(summary_file_path, 'w'): pass upload_fuzzer_output_path = path.join( output_directory, "Fuzzlyn-{}.log".format(tag_name)) with open(summary_file_path, 'r') as fp: exit_evt = threading.Event() reduce_examples = ReduceExamples(fp, temp_location, path_to_tool, path_to_corerun, exit_evt) reduce_examples.start() run_command([ path_to_tool, "--seconds-to-run", str(run_duration), "--output-events-to", summary_file_path, "--host", path_to_corerun, "--parallelism", "-1" ], _exit_on_fail=True, _output_file=upload_fuzzer_output_path) exit_evt.set() reduce_examples.join() upload_summary_file_path = path.join(output_directory, summary_file_name) print("Copying summary: {} -> {}".format(summary_file_path, upload_summary_file_path)) shutil.copy2(summary_file_path, upload_summary_file_path) upload_issues_zip_path = path.join(output_directory, "AllIssues-{}".format(tag_name)) print("Creating zip {}.zip".format(upload_issues_zip_path)) shutil.make_archive(upload_issues_zip_path, 'zip', temp_location)
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) arch_name = coreclr_args.arch os_name = "win" if coreclr_args.platform.lower() == "windows" else "linux" run_configuration = "{}-{}".format(os_name, arch_name) source_directory = coreclr_args.source_directory # CorrelationPayload directories correlation_payload_directory = path.join(coreclr_args.source_directory, "payload") scripts_src_directory = path.join(source_directory, "src", "coreclr", 'scripts') coreroot_directory = path.join(correlation_payload_directory, "CoreRoot") dst_directory = path.join(correlation_payload_directory, "exploratory") helix_source_prefix = "official" creator = "" repo_urls = { "Antigen": "https://github.com/kunalspathak/Antigen.git", "Fuzzlyn": "https://github.com/jakobbotsch/Fuzzlyn.git", } # tool_name is verifed in setup_args assert coreclr_args.tool_name in repo_urls repo_url = repo_urls[coreclr_args.tool_name] # create exploratory directory print('Copying {} -> {}'.format(scripts_src_directory, coreroot_directory)) copy_directory(scripts_src_directory, coreroot_directory, verbose_output=True, match_func=lambda path: any( path.endswith(extension) for extension in [".py"])) if is_windows: acceptable_copy = lambda path: any( path.endswith(extension) for extension in [".py", ".dll", ".exe", ".json"]) else: # Need to accept files without any extension, which is how executable file's names look. acceptable_copy = lambda path: (os.path.basename(path).find( ".") == -1) or any( path.endswith(extension) for extension in [".py", ".dll", ".so", ".json", ".a"]) # copy CORE_ROOT print('Copying {} -> {}'.format(coreclr_args.core_root_directory, coreroot_directory)) copy_directory(coreclr_args.core_root_directory, coreroot_directory, verbose_output=True, match_func=acceptable_copy) try: with TempDir() as tool_code_directory: # clone the tool run_command([ "git", "clone", "--quiet", "--depth", "1", repo_url, tool_code_directory ]) publish_dir = path.join(tool_code_directory, "publish") # build the tool with ChangeDir(tool_code_directory): dotnet_cmd = os.path.join(source_directory, "dotnet.cmd") if not is_windows: dotnet_cmd = os.path.join(source_directory, "dotnet.sh") run_command([ dotnet_cmd, "publish", "-c", "Release", "--self-contained", "-r", run_configuration, "-o", publish_dir ], _exit_on_fail=True) dll_name = coreclr_args.tool_name + ".dll" if not os.path.exists(path.join(publish_dir, dll_name)): raise FileNotFoundError("{} not present at {}".format( dll_name, publish_dir)) # copy tool print('Copying {} -> {}'.format(publish_dir, dst_directory)) copy_directory(publish_dir, dst_directory, verbose_output=True, match_func=acceptable_copy) except PermissionError as pe: print("Skipping file. Got error: %s", pe) # create foo.txt in work_item directories workitem_directory = path.join(source_directory, "workitem") os.mkdir(workitem_directory) foo_txt = os.path.join(workitem_directory, "foo.txt") with open(foo_txt, "w") as foo_txt_file: foo_txt_file.write("hello world!") # Set variables print('Setting pipeline variables:') set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("WorkItemDirectory", workitem_directory) set_pipeline_variable("RunConfiguration", run_configuration) set_pipeline_variable("Creator", creator) set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
def build_and_run(coreclr_args, output_mch_name): """Build the microbenchmarks and run them under "superpmi collect" Args: coreclr_args (CoreClrArguments): Arguments use to drive output_mch_name (string): Name of output mch file name """ arch = coreclr_args.arch python_path = sys.executable core_root = coreclr_args.core_root superpmi_directory = coreclr_args.superpmi_directory performance_directory = coreclr_args.performance_directory log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_exe = os.path.join(dotnet_directory, "dotnet") artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") # Workaround https://github.com/dotnet/sdk/issues/23430 project_file = realpath(project_file) if is_windows: shim_name = "%JitName%" corerun_exe = "CoreRun.exe" script_name = "run_microbenchmarks.bat" else: shim_name = "$JitName" corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" make_executable(dotnet_exe) run_command( [dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory], _exit_on_fail=True) run_command( [dotnet_exe, "build", project_file, "--configuration", "Release", "--framework", "net7.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory], _exit_on_fail=True) # Disable ReadyToRun so we always JIT R2R methods and collect them collection_command = f"{dotnet_exe} {benchmarks_dll} --filter \"*\" --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ " COMPlus_ZapDisable:1 COMPlus_ReadyToRun:0 " \ "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" # Generate the execution script in Temp location with TempDir() as temp_location: script_name = path.join(temp_location, script_name) contents = [] # Unset the JitName so dotnet process will not fail if is_windows: contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") else: contents.append("#!/bin/bash") contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") contents.append(f"pushd {performance_directory}") contents.append(collection_command) with open(script_name, "w") as collection_script: collection_script.write(os.linesep.join(contents)) print() print(f"{script_name} contents:") print("******************************************") print(os.linesep.join(contents)) print("******************************************") make_executable(script_name) run_command([ python_path, path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root, "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug", script_name], _exit_on_fail=True)
def build_and_run(coreclr_args, output_mch_name): """Build the microbenchmarks and run them under "superpmi collect" Args: coreclr_args (CoreClrArguments): Arguments use to drive output_mch_name (string): Name of output mch file name """ arch = coreclr_args.arch python_path = sys.executable core_root = coreclr_args.core_root superpmi_directory = coreclr_args.superpmi_directory performance_directory = coreclr_args.performance_directory log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_exe = os.path.join(dotnet_directory, "dotnet") artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") project_file = os.path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") benchmarks_dll = os.path.join(artifacts_directory, "MicroBenchmarks.dll") # Workaround https://github.com/dotnet/sdk/issues/23430 project_file = os.path.realpath(project_file) if is_windows: shim_name = "%JitName%" corerun_exe = "CoreRun.exe" script_name = "run_microbenchmarks.bat" else: shim_name = "$JitName" corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" make_executable(dotnet_exe) # Start with a "dotnet --info" to see what we've got. run_command([dotnet_exe, "--info"]) env_copy = os.environ.copy() if is_windows: # Try to work around problem with random NuGet failures in "dotnet restore": # error NU3037: Package 'System.Runtime 4.1.0' from source 'https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-public/nuget/v3/index.json': # The repository primary signature validity period has expired. [C:\h\w\A3B008C0\w\B581097F\u\performance\src\benchmarks\micro\MicroBenchmarks.csproj] # Using environment variable specified in https://github.com/NuGet/NuGet.Client/pull/4259. env_copy["NUGET_EXPERIMENTAL_CHAIN_BUILD_RETRY_POLICY"] = "9,2000" # If `dotnet restore` fails, retry. num_tries = 3 for try_num in range(num_tries): # On the last try, exit on fail exit_on_fail = try_num + 1 == num_tries (_, _, return_code) = run_command([ dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory ], _exit_on_fail=exit_on_fail, _env=env_copy) if return_code == 0: # It succeeded! break print("Try {} of {} failed with error code {}: trying again".format( try_num + 1, num_tries, return_code)) # Sleep 5 seconds before trying again time.sleep(5) run_command([ dotnet_exe, "build", project_file, "--configuration", "Release", "--framework", "net7.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory ], _exit_on_fail=True) # Disable ReadyToRun so we always JIT R2R methods and collect them collection_command = f"{dotnet_exe} {benchmarks_dll} --filter \"*\" --corerun {os.path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ " COMPlus_ZapDisable:1 COMPlus_ReadyToRun:0 " \ "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" # Generate the execution script in Temp location with TempDir() as temp_location: script_name = os.path.join(temp_location, script_name) contents = [] # Unset the JitName so dotnet process will not fail if is_windows: contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") else: contents.append("#!/bin/bash") contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") contents.append(f"pushd {performance_directory}") contents.append(collection_command) with open(script_name, "w") as collection_script: collection_script.write(os.linesep.join(contents)) print() print(f"{script_name} contents:") print("******************************************") print(os.linesep.join(contents)) print("******************************************") make_executable(script_name) run_command([ python_path, os.path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root, "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug", script_name ], _exit_on_fail=True)
def copy_issues(issues_directory, upload_directory, tag_name): """Copies issue files (only top 5 smallest files from each folder) into the upload_directory Args: issues_directory (string): Issues directory upload_directory (string): Upload directory tag_name (string): Tag name for zip file Returns: [type]: [description] """ # Create upload_directory if not os.path.isdir(upload_directory): os.makedirs(upload_directory) # Create temp directory to copy all issues to upload. We don't want to create a sub-folder # inside issues_directory because that will also get included twice. with TempDir() as prep_directory: def sorter_by_size(pair): """ Sorts the pair (file_name, file_size) tuple in ascending order of file_size Args: pair ([(string, int)]): List of tuple of file_name, file_size """ pair.sort(key=lambda x: x[1], reverse=False) return pair summary_of_summary = [] for file_path, dirs, files in walk(issues_directory, topdown=True): filename_with_size = [] # Credit: https://stackoverflow.com/a/19859907 dirs[:] = [d for d in dirs] for name in files: if not name.lower().endswith(".g.cs"): continue curr_file_path = path.join(file_path, name) size = getsize(curr_file_path) filename_with_size.append((curr_file_path, size)) if len(filename_with_size) == 0: continue summary_file = path.join(file_path, "summary.txt") summary_of_summary.append("**** " + file_path) with open(summary_file, 'r') as sf: summary_of_summary.append(sf.read()) filename_with_size.append( (summary_file, 0)) # always copy summary.txt # Copy atmost 5 files from each bucket sorted_files = [ f[0] for f in sorter_by_size(filename_with_size)[:6] ] # sorter_by_size(filename_with_size)[:6] print('### Copying below files from {0} to {1}:'.format( issues_directory, prep_directory)) print('') print(os.linesep.join(sorted_files)) for src_file in sorted_files: dst_file = src_file.replace(issues_directory, prep_directory) dst_directory = path.dirname(dst_file) if not os.path.exists(dst_directory): os.makedirs(dst_directory) try: shutil.copy2(src_file, dst_file) except PermissionError as pe_error: print('Ignoring PermissionError: {0}'.format(pe_error)) issues_summary_file_name = "issues-summary-{}.txt".format(tag_name) print("Creating {} in {}".format(issues_summary_file_name, upload_directory)) issues_summary_file = os.path.join(upload_directory, issues_summary_file_name) with open(issues_summary_file, 'w') as sf: sf.write(os.linesep.join(summary_of_summary)) # Also copy the issues-summary inside zip folder dst_issue_summary_file = os.path.join(prep_directory, issues_summary_file_name) try: shutil.copy2(issues_summary_file, dst_issue_summary_file) except PermissionError as pe_error: print('Ignoring PermissionError: {0}'.format(pe_error)) # Zip compress the files we will upload zip_path = os.path.join(prep_directory, "AllIssues-" + tag_name) print("Creating archive: " + zip_path) shutil.make_archive(zip_path, 'zip', prep_directory) zip_path += ".zip" dst_zip_path = os.path.join(upload_directory, "AllIssues-" + tag_name + ".zip") print("Copying {} to {}".format(zip_path, dst_zip_path)) try: shutil.copy2(zip_path, dst_zip_path) except PermissionError as pe_error: print('Ignoring PermissionError: {0}'.format(pe_error)) src_antigen_log = os.path.join(issues_directory, get_antigen_filename(tag_name)) dst_antigen_log = os.path.join(upload_directory, get_antigen_filename(tag_name)) print("Copying {} to {}".format(src_antigen_log, dst_antigen_log)) try: shutil.copy2(src_antigen_log, dst_antigen_log) except PermissionError as pe_error: print('Ignoring PermissionError: {0}'.format(pe_error))
def main(main_args): """ Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) source_directory = coreclr_args.source_directory # CorrelationPayload directories correlation_payload_directory = os.path.join(coreclr_args.source_directory, "payload") superpmi_src_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts') superpmi_dst_directory = os.path.join(correlation_payload_directory, "superpmi") arch = coreclr_args.arch helix_source_prefix = "official" creator = "" ci = True if is_windows: helix_queue = "Windows.10.Arm64" if arch == "arm64" else "Windows.10.Amd64.X86.Rt" else: if arch == "arm": helix_queue = "(Ubuntu.1804.Arm32)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440" elif arch == "arm64": helix_queue = "(Ubuntu.1804.Arm64)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652" else: helix_queue = "Ubuntu.1804.Amd64" # create superpmi directory print('Copying {} -> {}'.format(superpmi_src_directory, superpmi_dst_directory)) copy_directory(superpmi_src_directory, superpmi_dst_directory, verbose_output=True, match_func=lambda path: any( path.endswith(extension) for extension in [".py"])) if is_windows: acceptable_copy = lambda path: any( path.endswith(extension) for extension in [".py", ".dll", ".exe", ".json"]) else: # Need to accept files without any extension, which is how executable file's names look. acceptable_copy = lambda path: (os.path.basename(path).find( ".") == -1) or any( path.endswith(extension) for extension in [".py", ".dll", ".so", ".json"]) print('Copying {} -> {}'.format(coreclr_args.core_root_directory, superpmi_dst_directory)) copy_directory(coreclr_args.core_root_directory, superpmi_dst_directory, verbose_output=True, match_func=acceptable_copy) # Copy all the test files to CORE_ROOT # The reason is there are lot of dependencies with *.Tests.dll and to ensure we do not get # Reflection errors, just copy everything to CORE_ROOT so for all individual partitions, the # references will be present in CORE_ROOT. if coreclr_args.collection_name == "libraries_tests": print('Copying {} -> {}'.format(coreclr_args.input_directory, superpmi_dst_directory)) def make_readable(folder_name): """Make file executable by changing the permission Args: folder_name (string): folder to mark with 744 """ if is_windows: return print("Inside make_readable") run_command(["ls", "-l", folder_name]) for file_path, dirs, files in os.walk(folder_name, topdown=True): for d in dirs: os.chmod( os.path.join(file_path, d), # read+write+execute for owner (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | # read for group (stat.S_IRGRP) | # read for other (stat.S_IROTH)) for f in files: os.chmod( os.path.join(file_path, f), # read+write+execute for owner (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | # read for group (stat.S_IRGRP) | # read for other (stat.S_IROTH)) run_command(["ls", "-l", folder_name]) make_readable(coreclr_args.input_directory) copy_directory(coreclr_args.input_directory, superpmi_dst_directory, verbose_output=True, match_func=acceptable_copy) # Workitem directories workitem_directory = os.path.join(source_directory, "workitem") input_artifacts = "" if coreclr_args.collection_name == "benchmarks": # Setup microbenchmarks setup_microbenchmark(workitem_directory, arch) else: # Setup for pmi/crossgen runs # Clone and build jitutils try: with TempDir() as jitutils_directory: run_command([ "git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory ]) # Make sure ".dotnet" directory exists, by running the script at least once dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh" dotnet_script_path = os.path.join(source_directory, dotnet_script_name) run_command([dotnet_script_path, "--info"], jitutils_directory) # Set dotnet path to run build os.environ["PATH"] = os.path.join( source_directory, ".dotnet") + os.pathsep + os.environ["PATH"] build_file = "build.cmd" if is_windows else "build.sh" run_command( [os.path.join(jitutils_directory, build_file), "-p"], jitutils_directory) copy_files( os.path.join(jitutils_directory, "bin"), superpmi_dst_directory, [os.path.join(jitutils_directory, "bin", "pmi.dll")]) except PermissionError as pe_error: # Details: https://bugs.python.org/issue26660 print('Ignoring PermissionError: {0}'.format(pe_error)) # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2. # The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios. # # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll # if coreclr_args.collection_type == "crossgen2": # dotnet_src_directory = os.path.join(source_directory, ".dotnet") # dotnet_dst_directory = os.path.join(correlation_payload_directory, ".dotnet") # print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory)) # copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False) # payload pmiassemblies_directory = os.path.join(workitem_directory, "pmiAssembliesDirectory") input_artifacts = os.path.join(pmiassemblies_directory, coreclr_args.collection_name) exclude_directory = [ 'Core_Root' ] if coreclr_args.collection_name == "coreclr_tests" else [] exclude_files = native_binaries_to_ignore if coreclr_args.collection_type == "crossgen2": print('Adding exclusions for crossgen2') # Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it. exclude_files += ["Microsoft.Build.dll"] if coreclr_args.collection_name == "libraries_tests": # libraries_tests artifacts contains files from core_root folder. Exclude them. core_root_dir = coreclr_args.core_root_directory exclude_files += [ item for item in os.listdir(core_root_dir) if os.path.isfile(os.path.join(core_root_dir, item)) and ( item.endswith(".dll") or item.endswith(".exe")) ] partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory, exclude_files) # Set variables print('Setting pipeline variables:') set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("WorkItemDirectory", workitem_directory) set_pipeline_variable("InputArtifacts", input_artifacts) set_pipeline_variable("Python", ' '.join(get_python_name())) set_pipeline_variable("Architecture", arch) set_pipeline_variable("Creator", creator) set_pipeline_variable("Queue", helix_queue) set_pipeline_variable("HelixSourcePrefix", helix_source_prefix) set_pipeline_variable("MchFileTag", coreclr_args.mch_file_tag)