def make_readable(folder_name): """Make file executable by changing the permission Args: folder_name (string): folder to mark with 744 """ if is_windows: return print("Inside make_readable") run_command(["ls", "-l", folder_name]) for file_path, dirs, files in os.walk(folder_name, topdown=True): for d in dirs: os.chmod( os.path.join(file_path, d), # read+write+execute for owner (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | # read for group (stat.S_IRGRP) | # read for other (stat.S_IROTH)) for f in files: os.chmod( os.path.join(file_path, f), # read+write+execute for owner (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | # read for group (stat.S_IRGRP) | # read for other (stat.S_IROTH)) run_command(["ls", "-l", folder_name])
def setup_microbenchmark(workitem_directory, arch): """ Perform setup of microbenchmarks Args: workitem_directory (string): Path to work arch (string): Architecture for which dotnet will be installed """ performance_directory = os.path.join(workitem_directory, "performance") run_command([ "git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory ]) with ChangeDir(performance_directory): dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_install_script = os.path.join(performance_directory, "scripts", "dotnet.py") if not os.path.isfile(dotnet_install_script): print("Missing " + dotnet_install_script) return run_command(get_python_name() + [ dotnet_install_script, "install", "--architecture", arch, "--install-dir", dotnet_directory, "--verbose" ])
def run(self): num_reduced = 0 while not self.exit_evt.wait(0.5): try: new_line = self.examples_file.readline() except ValueError: # File closed, means other thread exited (probably ctrl-C) return if new_line: evt = json.loads(new_line) # Only reduce BadResult examples since crashes take very long to reduce. # We will still report crashes, just not with a reduced example. if evt["Kind"] == "ExampleFound": ex = evt["Example"] if ex["Kind"] == "BadResult": print("Reducing {}".format(ex['Seed'])) output_path = path.join(self.examples_dir, str(ex["Seed"]) + ".cs") cmd = [ self.fuzzlyn_path, "--host", self.host_path, "--reduce", "--seed", str(ex['Seed']), "--output", output_path ] run_command(cmd) if path.exists(output_path): num_reduced += 1 if num_reduced >= 5: print( "Skipping reduction of remaining examples (reached limit of 5)" ) return
def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): """Perform the post processing of produced .mch file by stripping the method contexts that are specific to BenchmarkDotnet boilerplate code and hard Args: coreclr_args (CoreclrArguments): Arguments old_mch_filename (string): Name of source .mch file new_mch_filename (string): Name of new .mch file to produce post-processing. """ performance_directory = coreclr_args.performance_directory core_root = coreclr_args.core_root methods_to_strip_list = path.join(performance_directory, "methods_to_strip.mcl") mcs_exe = path.join(core_root, "mcs") mcs_command = [mcs_exe, "-dumpMap", old_mch_filename] # Gather method list to strip (mcs_out, _, return_code) = run_command(mcs_command) if return_code != 0: # If strip command fails, then just copy the old_mch to new_mch print( f"-dumpMap failed. Copying {old_mch_filename} to {new_mch_filename}." ) copyfile(old_mch_filename, new_mch_filename) copyfile(old_mch_filename + ".mct", new_mch_filename + ".mct") return method_context_list = mcs_out.decode("utf-8").split(os.linesep) filtered_context_list = [] match_pattern = re.compile('^(\\d+),(BenchmarkDotNet|Perfolizer)') print("Method indices to strip:") for mc_entry in method_context_list: matched = match_pattern.match(mc_entry) if matched: print(matched.group(1)) filtered_context_list.append(matched.group(1)) print(f"Total {len(filtered_context_list)} methods.") with open(methods_to_strip_list, "w") as f: f.write('\n'.join(filtered_context_list)) # Strip and produce new .mcs file if run_command([ mcs_exe, "-strip", methods_to_strip_list, old_mch_filename, new_mch_filename ])[2] != 0: # If strip command fails, then just copy the old_mch to new_mch print( f"-strip failed. Copying {old_mch_filename} to {new_mch_filename}." ) copyfile(old_mch_filename, new_mch_filename) copyfile(old_mch_filename + ".mct", new_mch_filename + ".mct") return # Create toc file run_command([mcs_exe, "-toc", new_mch_filename])
def make_executable(file_name): """Make file executable by changing the permission Args: file_name (string): file to execute """ if is_windows: return print("Inside make_executable") run_command(["ls", "-l", file_name]) os.chmod( file_name, # read+execute for owner (stat.S_IRUSR | stat.S_IXUSR) | # read+execute for group (stat.S_IRGRP | stat.S_IXGRP) | # read+execute for other (stat.S_IROTH | stat.S_IXOTH)) run_command(["ls", "-l", file_name])
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) antigen_directory = coreclr_args.antigen_directory core_root = coreclr_args.core_root tag_name = "{}-{}".format(coreclr_args.run_configuration, coreclr_args.partition) output_directory = coreclr_args.output_directory run_duration = coreclr_args.run_duration if not run_duration: run_duration = 60 path_to_corerun = os.path.join(core_root, "corerun") path_to_tool = os.path.join(antigen_directory, "Antigen") if is_windows: path_to_corerun += ".exe" path_to_tool += ".exe" try: # Run tool such that issues are placed in a temp folder with TempDir() as temp_location: antigen_log = path.join(temp_location, get_antigen_filename(tag_name)) run_command([ path_to_tool, "-c", path_to_corerun, "-o", temp_location, "-d", str(run_duration) ], _exit_on_fail=True, _output_file=antigen_log) # Copy issues for upload print("Copying issues to " + output_directory) copy_issues(temp_location, output_directory, tag_name) except PermissionError as pe: print("Got error: %s", pe)
def build_and_run(coreclr_args): """Run perf scenarios under crank and collect data with SPMI" Args: coreclr_args (CoreClrArguments): Arguments use to drive output_mch_name (string): Name of output mch file name """ source_directory = coreclr_args.source_directory target_arch = coreclr_args.arch target_os = coreclr_args.host_os checked_root = path.join(source_directory, "artifacts", "bin", "coreclr", target_os + "." + coreclr_args.arch + ".Checked") release_root = path.join(source_directory, "artifacts", "bin", "coreclr", target_os + "." + coreclr_args.arch + ".Release") # We'll use repo script to install dotnet dotnet_install_script_name = "dotnet-install.cmd" if is_windows else "dotnet-install.sh" dotnet_install_script_path = path.join(source_directory, "eng", "common", dotnet_install_script_name) with TempDir(skip_cleanup=True) as temp_location: print("Executing in " + temp_location) # install dotnet 5.0 run_command([dotnet_install_script_path, "-Version", "5.0.3"], temp_location, _exit_on_fail=True) os.environ['DOTNET_MULTILEVEL_LOOKUP'] = '0' os.environ['DOTNET_SKIP_FIRST_TIME_EXPERIENCE'] = '1' dotnet_path = path.join(source_directory, ".dotnet") dotnet_exe = path.join(dotnet_path, "dotnet.exe") if is_windows else path.join( dotnet_path, "dotnet") run_command([dotnet_exe, "--info"], temp_location, _exit_on_fail=True) os.environ['DOTNET_ROOT'] = dotnet_path ## install crank as local tool run_command([ dotnet_exe, "tool", "install", "Microsoft.Crank.Controller", "--version", "0.2.0-*", "--tool-path", temp_location ], _exit_on_fail=True) ## ideally just do sparse clone, but this doesn't work locally ## git clone --filter=blob:none --no-checkout https://github.com/aspnet/benchmarks ## cd benchmarks ## git sparse-checkout init --cone ## git sparse-checkout set scenarios ## could probably just pass a URL and avoid this run_command([ "git", "clone", "--quiet", "--depth", "1", "https://github.com/aspnet/benchmarks" ], temp_location, _exit_on_fail=True) crank_app = path.join(temp_location, "crank") mcs_path = determine_mcs_tool_path(coreclr_args) superpmi_path = determine_superpmi_tool_path(coreclr_args) # todo: add grpc/signalr, perhaps configname_scenario_list = [("platform", "plaintext"), ("json", "json"), ("plaintext", "mvc"), ("database", "fortunes_dapper"), ("database", "fortunes_ef_mvc_https"), ("proxy", "proxy-yarp"), ("staticfiles", "static")] # configname_scenario_list = [("platform", "plaintext")] # note tricks to get one element tuples runtime_options_list = [("Dummy=0", ), ("TieredCompilation=0", ), ("TieredPGO=1", "TC_QuickJitForLoops=1"), ("TieredPGO=1", "TC_QuickJitForLoops=1", "ReadyToRun=0")] # runtime_options_list = [("TieredCompilation=0", )] mch_file = path.join( coreclr_args.output_mch_path, "aspnet.run." + target_os + "." + target_arch + ".checked.mch") benchmark_machine = determine_benchmark_machine(coreclr_args) jitname = determine_native_name(coreclr_args, "clrjit", target_os) coreclrname = determine_native_name(coreclr_args, "coreclr", target_os) spminame = determine_native_name(coreclr_args, "superpmi-shim-collector", target_os) corelibname = "System.Private.CoreLib.dll" jitpath = path.join(".", jitname) jitlib = path.join(checked_root, jitname) coreclr = path.join(release_root, coreclrname) corelib = path.join(release_root, corelibname) spmilib = path.join(checked_root, spminame) for (configName, scenario) in configname_scenario_list: configYml = configName + ".benchmarks.yml" configFile = path.join(temp_location, "benchmarks", "scenarios", configYml) crank_arguments = [ "--config", configFile, "--profile", benchmark_machine, "--scenario", scenario, "--application.framework", "net7.0", "--application.channel", "edge", "--application.sdkVersion", "latest", "--application.environmentVariables", "COMPlus_JitName=" + spminame, "--application.environmentVariables", "SuperPMIShimLogPath=.", "--application.environmentVariables", "SuperPMIShimPath=" + jitpath, "--application.environmentVariables", "COMPlus_EnableExtraSuperPmiQueries=1", "--application.options.downloadFiles", "*.mc", "--application.options.displayOutput", "true", # "--application.options.dumpType", "full", # "--application.options.fetch", "true", "--application.options.outputFiles", spmilib, "--application.options.outputFiles", jitlib, "--application.options.outputFiles", coreclr, "--application.options.outputFiles", corelib ] for runtime_options in runtime_options_list: runtime_arguments = [] for runtime_option in runtime_options: runtime_arguments.append( "--application.environmentVariables") runtime_arguments.append("COMPlus_" + runtime_option) print("") print("================================") print("Config: " + configName + " scenario: " + scenario + " options: " + " ".join(runtime_options)) print("================================") print("") description = [ "--description", configName + "-" + scenario + "-" + "-".join(runtime_options) ] subprocess.run([crank_app] + crank_arguments + description + runtime_arguments, cwd=temp_location) # merge command = [mcs_path, "-merge", "temp.mch", "*.mc", "-dedup", "-thin"] run_command(command, temp_location) # clean command = [ superpmi_path, "-v", "ewmi", "-f", "fail.mcl", jitlib, "temp.mch" ] run_command(command, temp_location) # strip if is_nonzero_length_file("fail.mcl"): print("Replay had failures, cleaning...") fail_file = path.join(coreclr_args.output_mch_path, "fail.mcl") command = [mcs_path, "-strip", "fail.mcl", "temp.mch", mch_file] run_command(command, temp_location) else: print("Replay was clean...") shutil.copy2("temp.mch", mch_file) # index command = [mcs_path, "-toc", mch_file] run_command(command, temp_location) # overall summary print("Merged summary for " + mch_file) command = [mcs_path, "-jitflags", mch_file] run_command(command, temp_location)
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) arch_name = coreclr_args.arch os_name = "win" if coreclr_args.platform.lower() == "windows" else "linux" run_configuration = "{}-{}".format(os_name, arch_name) source_directory = coreclr_args.source_directory # CorrelationPayload directories correlation_payload_directory = path.join(coreclr_args.source_directory, "payload") scripts_src_directory = path.join(source_directory, "src", "coreclr", 'scripts') coreroot_directory = path.join(correlation_payload_directory, "CoreRoot") dst_directory = path.join(correlation_payload_directory, "exploratory") helix_source_prefix = "official" creator = "" repo_urls = { "Antigen": "https://github.com/kunalspathak/Antigen.git", "Fuzzlyn": "https://github.com/jakobbotsch/Fuzzlyn.git", } # tool_name is verifed in setup_args assert coreclr_args.tool_name in repo_urls repo_url = repo_urls[coreclr_args.tool_name] # create exploratory directory print('Copying {} -> {}'.format(scripts_src_directory, coreroot_directory)) copy_directory(scripts_src_directory, coreroot_directory, match_func=lambda path: any( path.endswith(extension) for extension in [".py"])) if is_windows: acceptable_copy = lambda path: any( path.endswith(extension) for extension in [".py", ".dll", ".exe", ".json"]) else: # Need to accept files without any extension, which is how executable file's names look. acceptable_copy = lambda path: (os.path.basename(path).find( ".") == -1) or any( path.endswith(extension) for extension in [".py", ".dll", ".so", ".json", ".a"]) # copy CORE_ROOT print('Copying {} -> {}'.format(coreclr_args.core_root_directory, coreroot_directory)) copy_directory(coreclr_args.core_root_directory, coreroot_directory, match_func=acceptable_copy) try: with TempDir() as tool_code_directory: # clone the tool run_command([ "git", "clone", "--quiet", "--depth", "1", repo_url, tool_code_directory ]) publish_dir = path.join(tool_code_directory, "publish") # build the tool with ChangeDir(tool_code_directory): dotnet_cmd = os.path.join(source_directory, "dotnet.cmd") if not is_windows: dotnet_cmd = os.path.join(source_directory, "dotnet.sh") run_command([ dotnet_cmd, "publish", "-c", "Release", "--self-contained", "-r", run_configuration, "-o", publish_dir ], _exit_on_fail=True) dll_name = coreclr_args.tool_name + ".dll" if not os.path.exists(path.join(publish_dir, dll_name)): raise FileNotFoundError("{} not present at {}".format( dll_name, publish_dir)) # copy tool print('Copying {} -> {}'.format(publish_dir, dst_directory)) copy_directory(publish_dir, dst_directory, match_func=acceptable_copy) except PermissionError as pe: print("Skipping file. Got error: %s", pe) # create foo.txt in work_item directories workitem_directory = path.join(source_directory, "workitem") os.mkdir(workitem_directory) foo_txt = os.path.join(workitem_directory, "foo.txt") with open(foo_txt, "w") as foo_txt_file: foo_txt_file.write("hello world!") # Set variables print('Setting pipeline variables:') set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("WorkItemDirectory", workitem_directory) set_pipeline_variable("RunConfiguration", run_configuration) set_pipeline_variable("Creator", creator) set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ python_path = sys.executable cwd = os.path.dirname(os.path.realpath(__file__)) coreclr_args = setup_args(main_args) spmi_location = os.path.join(cwd, "artifacts", "spmi") log_directory = coreclr_args.log_directory platform_name = coreclr_args.platform os_name = "win" if platform_name.lower() == "windows" else "unix" arch_name = coreclr_args.arch host_arch_name = "x64" if arch_name.endswith("64") else "x86" os_name = "universal" if arch_name.startswith("arm") else os_name jit_path = os.path.join( coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name)) print("Running superpmi.py download") run_command([ python_path, os.path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name, "-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location ], _exit_on_fail=True) failed_runs = [] for jit_flag in jit_flags: log_file = os.path.join( log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_"))) print("Running superpmi.py replay for {}".format(jit_flag)) _, _, return_code = run_command([ python_path, os.path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd, "-jitoption", jit_flag, "-jitoption", "TieredCompilation=0", "-target_os", platform_name, "-target_arch", arch_name, "-arch", host_arch_name, "-jit_path", jit_path, "-spmi_location", spmi_location, "-log_level", "debug", "-log_file", log_file ]) if return_code != 0: failed_runs.append("Failure in {}".format(log_file)) # Consolidate all superpmi_*.logs in superpmi_platform_architecture.log final_log_name = os.path.join( log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name)) print("Consolidating final {}".format(final_log_name)) with open(final_log_name, "a") as final_superpmi_log: for superpmi_log in os.listdir(log_directory): if not superpmi_log.startswith( "superpmi_Jit") or not superpmi_log.endswith(".log"): continue print("Appending {}".format(superpmi_log)) final_superpmi_log.write( "======================================================={}". format(os.linesep)) final_superpmi_log.write("Contents from {}{}".format( superpmi_log, os.linesep)) final_superpmi_log.write( "======================================================={}". format(os.linesep)) with open(os.path.join(log_directory, superpmi_log), "r") as current_superpmi_log: contents = current_superpmi_log.read() final_superpmi_log.write(contents) # Log failures summary if len(failed_runs) > 0: final_superpmi_log.write(os.linesep) final_superpmi_log.write(os.linesep) final_superpmi_log.write( "========Failed runs summary========".format(os.linesep)) final_superpmi_log.write(os.linesep.join(failed_runs)) return 0 if len(failed_runs) == 0 else 1
def main(main_args): """ Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) source_directory = coreclr_args.source_directory # CorrelationPayload directories correlation_payload_directory = os.path.join(coreclr_args.source_directory, "payload") superpmi_src_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts') superpmi_dst_directory = os.path.join(correlation_payload_directory, "superpmi") arch = coreclr_args.arch helix_source_prefix = "official" creator = "" ci = True if is_windows: helix_queue = "Windows.10.Arm64" if arch == "arm64" else "Windows.10.Amd64.X86.Rt" else: if arch == "arm": helix_queue = "(Ubuntu.1804.Arm32)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440" elif arch == "arm64": helix_queue = "(Ubuntu.1804.Arm64)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652" else: helix_queue = "Ubuntu.1804.Amd64" # create superpmi directory print('Copying {} -> {}'.format(superpmi_src_directory, superpmi_dst_directory)) copy_directory(superpmi_src_directory, superpmi_dst_directory, match_func=lambda path: any( path.endswith(extension) for extension in [".py"])) if is_windows: acceptable_copy = lambda path: any( path.endswith(extension) for extension in [".py", ".dll", ".exe", ".json"]) else: # Need to accept files without any extension, which is how executable file's names look. acceptable_copy = lambda path: (os.path.basename(path).find( ".") == -1) or any( path.endswith(extension) for extension in [".py", ".dll", ".so", ".json"]) print('Copying {} -> {}'.format(coreclr_args.core_root_directory, superpmi_dst_directory)) copy_directory(coreclr_args.core_root_directory, superpmi_dst_directory, match_func=acceptable_copy) # Copy all the test files to CORE_ROOT # The reason is there are lot of dependencies with *.Tests.dll and to ensure we do not get # Reflection errors, just copy everything to CORE_ROOT so for all individual partitions, the # references will be present in CORE_ROOT. if coreclr_args.collection_name == "libraries_tests": print('Copying {} -> {}'.format(coreclr_args.input_directory, superpmi_dst_directory)) def make_readable(folder_name): """Make file executable by changing the permission Args: folder_name (string): folder to mark with 744 """ if is_windows: return print("Inside make_readable") run_command(["ls", "-l", folder_name]) for file_path, dirs, files in os.walk(folder_name, topdown=True): for d in dirs: os.chmod( os.path.join(file_path, d), # read+write+execute for owner (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | # read for group (stat.S_IRGRP) | # read for other (stat.S_IROTH)) for f in files: os.chmod( os.path.join(file_path, f), # read+write+execute for owner (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | # read for group (stat.S_IRGRP) | # read for other (stat.S_IROTH)) run_command(["ls", "-l", folder_name]) make_readable(coreclr_args.input_directory) copy_directory(coreclr_args.input_directory, superpmi_dst_directory, match_func=acceptable_copy) # Workitem directories workitem_directory = os.path.join(source_directory, "workitem") input_artifacts = "" if coreclr_args.collection_name == "benchmarks": # Setup microbenchmarks setup_microbenchmark(workitem_directory, arch) else: # Setup for pmi/crossgen runs # Clone and build jitutils try: with TempDir() as jitutils_directory: run_command([ "git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory ]) # Make sure ".dotnet" directory exists, by running the script at least once dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh" dotnet_script_path = os.path.join(source_directory, dotnet_script_name) run_command([dotnet_script_path, "--info"], jitutils_directory) # Set dotnet path to run build os.environ["PATH"] = os.path.join( source_directory, ".dotnet") + os.pathsep + os.environ["PATH"] build_file = "build.cmd" if is_windows else "build.sh" run_command( [os.path.join(jitutils_directory, build_file), "-p"], jitutils_directory) copy_files( os.path.join(jitutils_directory, "bin"), superpmi_dst_directory, [os.path.join(jitutils_directory, "bin", "pmi.dll")]) except PermissionError as pe_error: # Details: https://bugs.python.org/issue26660 print('Ignoring PermissionError: {0}'.format(pe_error)) # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2. # The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios. # # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll # if coreclr_args.collection_type == "crossgen2": # dotnet_src_directory = os.path.join(source_directory, ".dotnet") # dotnet_dst_directory = os.path.join(correlation_payload_directory, ".dotnet") # print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory)) # copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False) # payload pmiassemblies_directory = os.path.join(workitem_directory, "pmiAssembliesDirectory") input_artifacts = os.path.join(pmiassemblies_directory, coreclr_args.collection_name) exclude_directory = [ 'Core_Root' ] if coreclr_args.collection_name == "coreclr_tests" else [] exclude_files = native_binaries_to_ignore if coreclr_args.collection_type == "crossgen2": print('Adding exclusions for crossgen2') # Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it. exclude_files += ["Microsoft.Build.dll"] if coreclr_args.collection_name == "libraries_tests": # libraries_tests artifacts contains files from core_root folder. Exclude them. core_root_dir = coreclr_args.core_root_directory exclude_files += [ item for item in os.listdir(core_root_dir) if os.path.isfile(os.path.join(core_root_dir, item)) and ( item.endswith(".dll") or item.endswith(".exe")) ] partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory, exclude_files) # Set variables print('Setting pipeline variables:') set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("WorkItemDirectory", workitem_directory) set_pipeline_variable("InputArtifacts", input_artifacts) set_pipeline_variable("Python", ' '.join(get_python_name())) set_pipeline_variable("Architecture", arch) set_pipeline_variable("Creator", creator) set_pipeline_variable("Queue", helix_queue) set_pipeline_variable("HelixSourcePrefix", helix_source_prefix) set_pipeline_variable("MchFileTag", coreclr_args.mch_file_tag)
def build_and_run(coreclr_args, output_mch_name): """Build the microbenchmarks and run them under "superpmi collect" Args: coreclr_args (CoreClrArguments): Arguments use to drive output_mch_name (string): Name of output mch file name """ arch = coreclr_args.arch python_path = sys.executable core_root = coreclr_args.core_root superpmi_directory = coreclr_args.superpmi_directory performance_directory = coreclr_args.performance_directory log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_exe = os.path.join(dotnet_directory, "dotnet") artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") if is_windows: shim_name = "%JitName%" corerun_exe = "CoreRun.exe" script_name = "run_microbenchmarks.bat" else: shim_name = "$JitName" corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" make_executable(dotnet_exe) run_command([ dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory ], _exit_on_fail=True) run_command([ dotnet_exe, "build", project_file, "--configuration", "Release", "--framework", "net7.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory ], _exit_on_fail=True) # Disable ReadyToRun so we always JIT R2R methods and collect them collection_command = f"{dotnet_exe} {benchmarks_dll} --filter \"*\" --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ " COMPlus_ZapDisable:1 COMPlus_ReadyToRun:0 " \ "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" # Generate the execution script in Temp location with TempDir() as temp_location: script_name = path.join(temp_location, script_name) contents = [] # Unset the JitName so dotnet process will not fail if is_windows: contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") else: contents.append("#!/bin/bash") contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") contents.append(f"pushd {performance_directory}") contents.append(collection_command) with open(script_name, "w") as collection_script: collection_script.write(os.linesep.join(contents)) print() print(f"{script_name} contents:") print("******************************************") print(os.linesep.join(contents)) print("******************************************") make_executable(script_name) run_command([ python_path, path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root, "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug", script_name ], _exit_on_fail=True)
def main(main_args): """Main entrypoint Args: main_args ([type]): Arguments to the script """ coreclr_args = setup_args(main_args) fuzzlyn_directory = coreclr_args.fuzzlyn_directory core_root = coreclr_args.core_root tag_name = "{}-{}".format(coreclr_args.run_configuration, coreclr_args.partition) output_directory = coreclr_args.output_directory if not coreclr_args.run_duration: run_duration = 60 * 60 # 60 minutes by default else: run_duration = int( coreclr_args.run_duration) * 60 # Run for duration in seconds path_to_corerun = os.path.join(core_root, "corerun") path_to_tool = os.path.join(fuzzlyn_directory, "Fuzzlyn") if is_windows: path_to_corerun += ".exe" path_to_tool += ".exe" os.makedirs(output_directory, exist_ok=True) with TempDir() as temp_location: summary_file_name = "issues-summary-{}.txt".format(tag_name) summary_file_path = path.join(temp_location, summary_file_name) with open(summary_file_path, 'w'): pass upload_fuzzer_output_path = path.join( output_directory, "Fuzzlyn-{}.log".format(tag_name)) with open(summary_file_path, 'r') as fp: exit_evt = threading.Event() reduce_examples = ReduceExamples(fp, temp_location, path_to_tool, path_to_corerun, exit_evt) reduce_examples.start() run_command([ path_to_tool, "--seconds-to-run", str(run_duration), "--output-events-to", summary_file_path, "--host", path_to_corerun, "--parallelism", "-1" ], _exit_on_fail=True, _output_file=upload_fuzzer_output_path) exit_evt.set() reduce_examples.join() upload_summary_file_path = path.join(output_directory, summary_file_name) print("Copying summary: {} -> {}".format(summary_file_path, upload_summary_file_path)) shutil.copy2(summary_file_path, upload_summary_file_path) upload_issues_zip_path = path.join(output_directory, "AllIssues-{}".format(tag_name)) print("Creating zip {}.zip".format(upload_issues_zip_path)) shutil.make_archive(upload_issues_zip_path, 'zip', temp_location)