Exemplo n.º 1
0
def setup_microbenchmark(workitem_directory, arch):
    """ Perform setup of microbenchmarks

    Args:
        workitem_directory (string): Path to work
        arch (string): Architecture for which dotnet will be installed
    """
    performance_directory = os.path.join(workitem_directory, "performance")

    run_command([
        "git", "clone", "--quiet", "--depth", "1",
        "https://github.com/dotnet/performance", performance_directory
    ])

    with ChangeDir(performance_directory):
        dotnet_directory = os.path.join(performance_directory, "tools",
                                        "dotnet", arch)
        dotnet_install_script = os.path.join(performance_directory, "scripts",
                                             "dotnet.py")

        if not os.path.isfile(dotnet_install_script):
            print("Missing " + dotnet_install_script)
            return

        run_command(get_python_name() + [
            dotnet_install_script, "install", "--architecture", arch,
            "--install-dir", dotnet_directory, "--verbose"
        ])
Exemplo n.º 2
0
        def make_readable(folder_name):
            """Make file executable by changing the permission

            Args:
                folder_name (string): folder to mark with 744
            """
            if is_windows:
                return

            print("Inside make_readable")
            run_command(["ls", "-l", folder_name])
            for file_path, dirs, files in os.walk(folder_name, topdown=True):
                for d in dirs:
                    os.chmod(
                        os.path.join(file_path, d),
                        # read+write+execute for owner
                        (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
                        # read for group
                        (stat.S_IRGRP) |
                        # read for other
                        (stat.S_IROTH))

                for f in files:
                    os.chmod(
                        os.path.join(file_path, f),
                        # read+write+execute for owner
                        (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
                        # read for group
                        (stat.S_IRGRP) |
                        # read for other
                        (stat.S_IROTH))
            run_command(["ls", "-l", folder_name])
Exemplo n.º 3
0
def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename):
    """Perform the post processing of produced .mch file by stripping the method contexts
    that are specific to BenchmarkDotnet boilerplate code and hard

    Args:
        coreclr_args (CoreclrArguments): Arguments
        old_mch_filename (string): Name of source .mch file
        new_mch_filename (string): Name of new .mch file to produce post-processing.
    """
    performance_directory = coreclr_args.performance_directory
    core_root = coreclr_args.core_root
    methods_to_strip_list = os.path.join(performance_directory,
                                         "methods_to_strip.mcl")

    mcs_exe = os.path.join(core_root, "mcs")
    mcs_command = [mcs_exe, "-dumpMap", old_mch_filename]

    # Gather method list to strip
    (mcs_out, _, return_code) = run_command(mcs_command)
    if return_code != 0:
        # If strip command fails, then just copy the old_mch to new_mch
        print(
            f"-dumpMap failed. Copying {old_mch_filename} to {new_mch_filename}."
        )
        copyfile(old_mch_filename, new_mch_filename)
        copyfile(old_mch_filename + ".mct", new_mch_filename + ".mct")
        return

    method_context_list = mcs_out.decode("utf-8").split(os.linesep)
    filtered_context_list = []

    match_pattern = re.compile('^(\\d+),(BenchmarkDotNet|Perfolizer)')
    print("Method indices to strip:")
    for mc_entry in method_context_list:
        matched = match_pattern.match(mc_entry)
        if matched:
            print(matched.group(1))
            filtered_context_list.append(matched.group(1))
    print(f"Total {len(filtered_context_list)} methods.")

    with open(methods_to_strip_list, "w") as f:
        f.write('\n'.join(filtered_context_list))

    # Strip and produce new .mcs file
    if run_command([
            mcs_exe, "-strip", methods_to_strip_list, old_mch_filename,
            new_mch_filename
    ])[2] != 0:
        # If strip command fails, then just copy the old_mch to new_mch
        print(
            f"-strip failed. Copying {old_mch_filename} to {new_mch_filename}."
        )
        copyfile(old_mch_filename, new_mch_filename)
        copyfile(old_mch_filename + ".mct", new_mch_filename + ".mct")
        return

    # Create toc file
    run_command([mcs_exe, "-toc", new_mch_filename])
Exemplo n.º 4
0
def main(main_args):
    """Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """

    coreclr_args = setup_args(main_args)

    antigen_directory = coreclr_args.antigen_directory
    core_root = coreclr_args.core_root
    tag_name = "{}-{}".format(coreclr_args.run_configuration,
                              coreclr_args.partition)
    output_directory = coreclr_args.output_directory
    run_duration = coreclr_args.run_duration
    if not run_duration:
        run_duration = 60

    path_to_corerun = os.path.join(core_root, "corerun")
    path_to_tool = os.path.join(antigen_directory, "Antigen")
    if is_windows:
        path_to_corerun += ".exe"
        path_to_tool += ".exe"

    if not is_windows:
        # Disable core dumps. The fuzzers have their own graceful handling for
        # runtime crashes. Especially on macOS we can quickly fill up the drive
        # with dumps if we find lots of crashes since dumps there are very big.
        import resource
        resource.setrlimit(resource.RLIMIT_CORE, (0, 0))

    try:
        # Run tool such that issues are placed in a temp folder
        with TempDir() as temp_location:
            antigen_log = path.join(temp_location,
                                    get_antigen_filename(tag_name))
            run_command([
                path_to_tool, "-c", path_to_corerun, "-o", temp_location, "-d",
                str(run_duration)
            ],
                        _exit_on_fail=True,
                        _output_file=antigen_log)

            # Copy issues for upload
            print("Copying issues to " + output_directory)
            copy_issues(temp_location, output_directory, tag_name)
    except PermissionError as pe:
        print("Got error: %s", pe)
Exemplo n.º 5
0
    def run(self):
        num_reduced = 0
        while not self.exit_evt.wait(0.5):
            try:
                new_line = self.examples_file.readline()
            except ValueError:
                # File closed, means other thread exited (probably ctrl-C)
                return

            if new_line:
                evt = json.loads(new_line)
                # Only reduce BadResult examples since crashes take very long to reduce.
                # We will still report crashes, just not with a reduced example.
                if evt["Kind"] == "ExampleFound":
                    ex = evt["Example"]
                    ex_assert_err = None

                    reduce_this = False
                    if ex["Kind"] == "BadResult":
                        reduce_this = True
                    elif ex["Kind"] == "HitsJitAssert":
                        ex_assert_err = extract_jit_assertion_error(
                            ex["Message"])
                        reduce_this = ex_assert_err is not None and ex_assert_err not in self.reduced_jit_asserts

                    if reduce_this:
                        print("Reducing {}".format(ex['Seed']))
                        output_path = path.join(self.examples_dir,
                                                str(ex["Seed"]) + ".cs")
                        cmd = [
                            self.fuzzlyn_path, "--host", self.host_path,
                            "--reduce", "--seed",
                            str(ex['Seed']), "--output", output_path
                        ]
                        run_command(cmd)
                        if path.exists(output_path):
                            num_reduced += 1
                            if num_reduced >= 5:
                                print(
                                    "Skipping reduction of remaining examples (reached limit of 5)"
                                )
                                return

                            if ex_assert_err is not None:
                                self.reduced_jit_asserts.add(ex_assert_err)
Exemplo n.º 6
0
def make_executable(file_name):
    """Make file executable by changing the permission

    Args:
        file_name (string): file to execute
    """
    if is_windows:
        return

    print("Inside make_executable")
    run_command(["ls", "-l", file_name])
    os.chmod(file_name,
             # read+execute for owner
             (stat.S_IRUSR | stat.S_IXUSR) |
             # read+execute for group
             (stat.S_IRGRP | stat.S_IXGRP) |
             # read+execute for other
             (stat.S_IROTH | stat.S_IXOTH))
    run_command(["ls", "-l", file_name])
Exemplo n.º 7
0
def main(main_args):
    """Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """

    coreclr_args = setup_args(main_args)

    antigen_directory = coreclr_args.antigen_directory
    core_root = coreclr_args.core_root
    tag_name = "{}-{}".format(coreclr_args.run_configuration,
                              coreclr_args.partition)
    output_directory = coreclr_args.output_directory
    run_duration = coreclr_args.run_duration
    if not run_duration:
        run_duration = 60

    path_to_corerun = os.path.join(core_root, "corerun")
    path_to_tool = os.path.join(antigen_directory, "Antigen")
    if is_windows:
        path_to_corerun += ".exe"
        path_to_tool += ".exe"

    try:
        # Run tool such that issues are placed in a temp folder
        with TempDir() as temp_location:
            antigen_log = path.join(temp_location,
                                    get_antigen_filename(tag_name))
            run_command([
                path_to_tool, "-c", path_to_corerun, "-o", temp_location, "-d",
                str(run_duration)
            ],
                        _exit_on_fail=True,
                        _output_file=antigen_log)

            # Copy issues for upload
            print("Copying issues to " + output_directory)
            copy_issues(temp_location, output_directory, tag_name)
    except PermissionError as pe:
        print("Got error: %s", pe)
Exemplo n.º 8
0
def main(main_args):
    """Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """

    python_path = sys.executable
    cwd = os.path.dirname(os.path.realpath(__file__))
    coreclr_args = setup_args(main_args)
    spmi_location = os.path.join(cwd, "artifacts", "spmi")
    log_directory = coreclr_args.log_directory
    platform_name = coreclr_args.platform
    os_name = "win" if platform_name.lower() == "windows" else "unix"
    arch_name = coreclr_args.arch
    host_arch_name = "x64" if arch_name.endswith("64") else "x86"
    os_name = "universal" if arch_name.startswith("arm") else os_name
    jit_path = os.path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))

    print("Running superpmi.py download")
    run_command([python_path, os.path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name,
                 "-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location], _exit_on_fail=True)

    failed_runs = []
    for jit_flag in jit_flags:
        log_file = os.path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_")))
        print("Running superpmi.py replay for {}".format(jit_flag))

        _, _, return_code = run_command([
            python_path,
            os.path.join(cwd, "superpmi.py"),
            "replay",
            "-core_root", cwd,
            "-jitoption", jit_flag,
            "-jitoption", "TieredCompilation=0",
            "-target_os", platform_name,
            "-target_arch", arch_name,
            "-arch", host_arch_name,
            "-jit_path", jit_path,
            "-spmi_location", spmi_location,
            "-log_level", "debug",
            "-log_file", log_file])

        if return_code != 0:
            failed_runs.append("Failure in {}".format(log_file))

    # Consolidate all superpmi_*.logs in superpmi_platform_architecture.log
    final_log_name = os.path.join(log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))
    print("Consolidating final {}".format(final_log_name))
    with open(final_log_name, "a") as final_superpmi_log:
        for superpmi_log in os.listdir(log_directory):
            if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"):
                continue

            print("Appending {}".format(superpmi_log))
            final_superpmi_log.write("======================================================={}".format(os.linesep))
            final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep))
            final_superpmi_log.write("======================================================={}".format(os.linesep))
            with open(os.path.join(log_directory, superpmi_log), "r") as current_superpmi_log:
                contents = current_superpmi_log.read()
                final_superpmi_log.write(contents)

        # Log failures summary
        if len(failed_runs) > 0:
            final_superpmi_log.write(os.linesep)
            final_superpmi_log.write(os.linesep)
            final_superpmi_log.write("========Failed runs summary========".format(os.linesep))
            final_superpmi_log.write(os.linesep.join(failed_runs))

    return 0 if len(failed_runs) == 0 else 1
Exemplo n.º 9
0
def build_and_run(coreclr_args):
    """Run perf scenarios under crank and collect data with SPMI"

    Args:
        coreclr_args (CoreClrArguments): Arguments use to drive
        output_mch_name (string): Name of output mch file name
    """
    source_directory = coreclr_args.source_directory
    target_arch = coreclr_args.arch
    target_os = coreclr_args.host_os

    checked_root = path.join(source_directory, "artifacts", "bin", "coreclr", target_os + "." + coreclr_args.arch + ".Checked")
    release_root = path.join(source_directory, "artifacts", "bin", "coreclr", target_os + "." + coreclr_args.arch + ".Release")

    # We'll use repo script to install dotnet
    dotnet_install_script_name = "dotnet-install.cmd" if is_windows else "dotnet-install.sh"
    dotnet_install_script_path = path.join(source_directory, "eng", "common", dotnet_install_script_name)

    with TempDir(skip_cleanup=True) as temp_location:

        print ("Executing in " + temp_location)

        # install dotnet 6.0
        run_command([dotnet_install_script_path, "-Version", "6.0.4"], temp_location, _exit_on_fail=True)
        os.environ['DOTNET_MULTILEVEL_LOOKUP'] = '0'
        os.environ['DOTNET_SKIP_FIRST_TIME_EXPERIENCE'] = '1'
        dotnet_path = path.join(source_directory, ".dotnet")
        dotnet_exe = path.join(dotnet_path, "dotnet.exe") if is_windows else path.join(dotnet_path, "dotnet")
        run_command([dotnet_exe, "--info"], temp_location, _exit_on_fail=True)
        os.environ['DOTNET_ROOT'] = dotnet_path

        ## install crank as local tool
        run_command(
            [dotnet_exe, "tool", "install", "Microsoft.Crank.Controller", "--version", "0.2.0-*", "--tool-path", temp_location], _exit_on_fail=True)

        ## ideally just do sparse clone, but this doesn't work locally
        ## git clone --filter=blob:none --no-checkout https://github.com/aspnet/benchmarks
        ## cd benchmarks
        ## git sparse-checkout init --cone
        ## git sparse-checkout set scenarios

        ## could probably just pass a URL and avoid this

        run_command(
            ["git", "clone", "--quiet", "--depth", "1", "https://github.com/aspnet/benchmarks"], temp_location, _exit_on_fail=True)

        crank_app = path.join(temp_location, "crank")
        mcs_path = determine_mcs_tool_path(coreclr_args)
        superpmi_path = determine_superpmi_tool_path(coreclr_args)

        # todo: add grpc/signalr, perhaps

        configname_scenario_list = [
                                    ("platform", "plaintext"),
                                    ("json", "json"),
                                    ("plaintext", "mvc"),
                                    ("database", "fortunes_dapper"),
                                    ("database", "fortunes_ef_mvc_https"),
                                    ("database", "updates"),
                                    ("proxy", "proxy-yarp"),
                                    ("staticfiles", "static"),
                                    ("websocket", "websocket"),
                                    ("orchard", "about-sqlite")
                                    ]

        # configname_scenario_list = [("platform", "plaintext")]

        # note tricks to get one element tuples

        runtime_options_list = [("Dummy=0",), ("TieredCompilation=0", ), ("TieredPGO=1",), ("TieredPGO=1", "ReadyToRun=0"),
            ("ReadyToRun=0", "OSR_HitLimit=0", "TC_OnStackReplacement_InitialCounter=10"),
            ("TieredPGO=1", "ReadyToRun=0", "OSR_HitLimit=0", "TC_OnStackReplacement_InitialCounter=10")]

        # runtime_options_list = [("TieredCompilation=0", )]

        mch_file = path.join(coreclr_args.output_mch_path, "aspnet.run." + target_os + "." + target_arch + ".checked.mch")
        benchmark_machine = determine_benchmark_machine(coreclr_args)

        jitname = determine_native_name(coreclr_args, "clrjit", target_os)
        coreclrname = determine_native_name(coreclr_args, "coreclr", target_os)
        spminame = determine_native_name(coreclr_args, "superpmi-shim-collector", target_os)
        corelibname = "System.Private.CoreLib.dll"

        jitpath = path.join(".", jitname)
        jitlib  = path.join(checked_root, jitname)
        coreclr = path.join(release_root, coreclrname)
        corelib = path.join(release_root, corelibname)
        spmilib = path.join(checked_root, spminame)

        for (configName, scenario) in configname_scenario_list:
            configYml = configName + ".benchmarks.yml"
            configFile = path.join(temp_location, "benchmarks", "scenarios", configYml)

            crank_arguments = ["--config", configFile,
                               "--profile", benchmark_machine,
                               "--scenario", scenario,
                               "--application.framework", "net7.0",
                               "--application.channel", "edge",
                               "--application.sdkVersion", "latest",
                               "--application.environmentVariables", "COMPlus_JitName=" + spminame,
                               "--application.environmentVariables", "SuperPMIShimLogPath=.",
                               "--application.environmentVariables", "SuperPMIShimPath=" + jitpath,
                               "--application.environmentVariables", "COMPlus_EnableExtraSuperPmiQueries=1",
                               "--application.options.downloadFiles", "*.mc",
                               "--application.options.displayOutput", "true",
#                               "--application.options.dumpType", "full",
#                               "--application.options.fetch", "true",
                               "--application.options.outputFiles", spmilib,
                               "--application.options.outputFiles", jitlib,
                               "--application.options.outputFiles", coreclr,
                               "--application.options.outputFiles", corelib]

            for runtime_options in runtime_options_list:
                runtime_arguments = []
                for runtime_option in runtime_options:
                    runtime_arguments.append("--application.environmentVariables")
                    runtime_arguments.append("COMPlus_" + runtime_option)

                print("")
                print("================================")
                print("Config: " + configName + " scenario: " + scenario + " options: " + " ".join(runtime_options))
                print("================================")
                print("")

                description = ["--description", configName + "-" + scenario + "-" + "-".join(runtime_options)]
                subprocess.run([crank_app] + crank_arguments + description + runtime_arguments, cwd=temp_location)

        # merge
        command = [mcs_path, "-merge", "temp.mch", "*.mc", "-dedup", "-thin"]
        run_command(command, temp_location)

        # clean
        command = [superpmi_path, "-v", "ewmi", "-f", "fail.mcl", jitlib, "temp.mch"]
        run_command(command, temp_location)

        # strip
        if is_nonzero_length_file("fail.mcl"):
            print("Replay had failures, cleaning...");
            fail_file = path.join(coreclr_args.output_mch_path, "fail.mcl");
            command = [mcs_path, "-strip", "fail.mcl", "temp.mch", mch_file]
            run_command(command, temp_location)
        else:
            print("Replay was clean...");
            shutil.copy2("temp.mch", mch_file)

        # index
        command = [mcs_path, "-toc", mch_file]
        run_command(command, temp_location)

        # overall summary
        print("Merged summary for " + mch_file)
        command = [mcs_path, "-jitflags", mch_file]
        run_command(command, temp_location)
def main(main_args):
    """ Run superpmi asmdiffs process on the Helix machines.

    See superpmi_asmdiffs_checked_release_setup.py for how the directory structure is set up
    in the correlation payload. This script lives in the root of that directory tree.

    Args:
        main_args ([type]): Arguments to the script
    """

    python_path = sys.executable
    script_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
    coreclr_args = setup_args(main_args)

    # It doesn't really matter where we put the downloaded SPMI artifacts.
    # Here, they are put in <correlation_payload>/artifacts/spmi.
    spmi_location = os.path.join(script_dir, "artifacts", "spmi")

    log_directory = coreclr_args.log_directory
    platform_name = coreclr_args.platform

    # Figure out which JITs to use
    os_name = "win" if platform_name.lower() == "windows" else "unix"
    arch_name = coreclr_args.arch
    host_arch_name = "x64" if arch_name.endswith("64") else "x86"
    os_name = "universal" if arch_name.startswith("arm") else os_name
    base_jit_path = os.path.join(
        coreclr_args.base_jit_directory,
        'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))
    diff_jit_path = os.path.join(
        coreclr_args.diff_jit_directory,
        'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))

    # Core_Root is where the superpmi tools (superpmi.exe, mcs.exe) are expected to be found.
    # We pass the full path of the JITs to use as arguments.
    core_root_dir = script_dir

    print("Running superpmi.py download to get MCH files")

    log_file = os.path.join(
        log_directory,
        "superpmi_download_{}_{}.log".format(platform_name, arch_name))
    run_command([
        python_path,
        os.path.join(script_dir, "superpmi.py"), "download", "--no_progress",
        "-core_root", core_root_dir, "-target_os", platform_name,
        "-target_arch", arch_name, "-spmi_location", spmi_location,
        "-log_level", "debug", "-log_file", log_file
    ],
                _exit_on_fail=True)

    print("Running superpmi.py asmdiffs between checked and release binaries")
    log_file = os.path.join(
        log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))

    _, _, return_code = run_command([
        python_path,
        os.path.join(script_dir, "superpmi.py"), "asmdiffs",
        "--diff_with_release", "--no_progress", "-core_root", core_root_dir,
        "-target_os", platform_name, "-target_arch", arch_name, "-arch",
        host_arch_name, "-base_jit_path", base_jit_path, "-diff_jit_path",
        diff_jit_path, "-spmi_location", spmi_location, "-error_limit", "100",
        "-log_level", "debug", "-log_file", log_file
    ])

    # TODO: the superpmi.py asmdiffs command returns a failure code if there are MISSING data even if there are
    # no asm diffs. We should probably only fail if there are actual failures (not MISSING or asm diffs).

    if return_code != 0:
        print("Failure in {}".format(log_file))
        return 1

    return 0
Exemplo n.º 11
0
def main(main_args):
    """ Prepare the Helix data for SuperPMI asmdiffs Azure DevOps pipeline.

    The Helix correlation payload directory is created and populated as follows:

    <source_directory>\payload -- the correlation payload directory
        -- contains the *.py scripts from <source_directory>\src\coreclr\scripts
        -- contains superpmi.exe, mcs.exe from the target-specific build
    <source_directory>\payload\base
        -- contains the baseline JITs
    <source_directory>\payload\diff
        -- contains the diff JITs
    <source_directory>\payload\jit-analyze
        -- contains the self-contained jit-analyze build (from dotnet/jitutils)
    <source_directory>\payload\git
        -- contains a Portable ("xcopy installable") `git` tool, downloaded from:
        https://netcorenativeassets.blob.core.windows.net/resource-packages/external/windows/git/Git-2.32.0-64-bit.zip
        This is needed by jit-analyze to do `git diff` on the generated asm. The `<source_directory>\payload\git\cmd`
        directory is added to the PATH.
        NOTE: this only runs on Windows.

    Then, AzDO pipeline variables are set.

    Args:
        main_args ([type]): Arguments to the script

    Returns:
        0 on success, otherwise a failure code
    """

    # Set up logging.
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    stream_handler = logging.StreamHandler(sys.stdout)
    stream_handler.setLevel(logging.INFO)
    logger.addHandler(stream_handler)

    coreclr_args = setup_args(main_args)

    arch = coreclr_args.arch
    source_directory = coreclr_args.source_directory
    product_directory = coreclr_args.product_directory

    python_path = sys.executable

    # CorrelationPayload directories
    correlation_payload_directory = os.path.join(source_directory, "payload")
    superpmi_scripts_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts')
    base_jit_directory = os.path.join(correlation_payload_directory, "base")
    diff_jit_directory = os.path.join(correlation_payload_directory, "diff")
    jit_analyze_build_directory = os.path.join(correlation_payload_directory, "jit-analyze")
    git_directory = os.path.join(correlation_payload_directory, "git")

    ######## Get the portable `git` package

    git_url = "https://netcorenativeassets.blob.core.windows.net/resource-packages/external/windows/git/Git-2.32.0-64-bit.zip"

    print('Downloading {} -> {}'.format(git_url, git_directory))

    urls = [ git_url ]
    # There are too many files to be verbose in the download and copy.
    download_files(urls, git_directory, verbose=False, display_progress=False)
    git_exe_tool = os.path.join(git_directory, "cmd", "git.exe")
    if not os.path.isfile(git_exe_tool):
        print('Error: `git` not found at {}'.format(git_exe_tool))
        return 1

    ######## Get SuperPMI python scripts

    # Copy *.py to CorrelationPayload
    print('Copying {} -> {}'.format(superpmi_scripts_directory, correlation_payload_directory))
    copy_directory(superpmi_scripts_directory, correlation_payload_directory, verbose_copy=True,
                   match_func=lambda path: any(path.endswith(extension) for extension in [".py"]))

    ######## Get baseline JIT

    # Figure out which baseline JIT to use, and download it.
    if not os.path.exists(base_jit_directory):
        os.makedirs(base_jit_directory)

    print("Fetching history of `main` branch so we can find the baseline JIT")
    run_command(["git", "fetch", "--depth=500", "origin", "main"], source_directory, _exit_on_fail=True)

    # Note: we only support downloading Windows versions of the JIT currently. To support downloading
    # non-Windows JITs on a Windows machine, pass `-host_os <os>` to jitrollingbuild.py.
    print("Running jitrollingbuild.py download to get baseline JIT")
    jit_rolling_build_script = os.path.join(superpmi_scripts_directory, "jitrollingbuild.py")
    _, _, return_code = run_command([
        python_path,
        jit_rolling_build_script,
        "download",
        "-arch", arch,
        "-target_dir", base_jit_directory],
        source_directory)
    if return_code != 0:
        print('{} failed with {}'.format(jit_rolling_build_script, return_code))
        return return_code

    ######## Get diff JIT

    print('Copying diff binaries {} -> {}'.format(product_directory, diff_jit_directory))
    copy_directory(product_directory, diff_jit_directory, verbose_copy=True, match_func=match_jit_files)

    ######## Get SuperPMI tools

    # Put the SuperPMI tools directly in the root of the correlation payload directory.
    print('Copying SuperPMI tools {} -> {}'.format(product_directory, correlation_payload_directory))
    copy_directory(product_directory, correlation_payload_directory, verbose_copy=True, match_func=match_superpmi_tool_files)

    ######## Clone and build jitutils: we only need jit-analyze

    try:
        with TempDir() as jitutils_directory:
            run_command(
                ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory])

            # Make sure ".dotnet" directory exists, by running the script at least once
            dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh"
            dotnet_script_path = os.path.join(source_directory, dotnet_script_name)
            run_command([dotnet_script_path, "--info"], jitutils_directory)

            # Build jit-analyze only, and build it as a self-contained app (not framework-dependent).
            # What target RID are we building? It depends on where we're going to run this code.
            # The RID catalog is here: https://docs.microsoft.com/en-us/dotnet/core/rid-catalog.
            #   Windows x64 => win-x64
            #   Windows x86 => win-x86
            #   Windows arm32 => win-arm
            #   Windows arm64 => win-arm64
            #   Linux x64 => linux-x64
            #   Linux arm32 => linux-arm
            #   Linux arm64 => linux-arm64
            #   macOS x64 => osx-x64

            # NOTE: we currently only support running on Windows x86/x64 (we don't pass the target OS)
            RID = None
            if arch == "x86":
                RID = "win-x86"
            if arch == "x64":
                RID = "win-x64"

            # Set dotnet path to run build
            os.environ["PATH"] = os.path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"]

            run_command([
                "dotnet",
                "publish",
                "-c", "Release",
                "--runtime", RID,
                "--self-contained",
                "--output", jit_analyze_build_directory,
                os.path.join(jitutils_directory, "src", "jit-analyze", "jit-analyze.csproj")],
                jitutils_directory)
    except PermissionError as pe_error:
        # Details: https://bugs.python.org/issue26660
        print('Ignoring PermissionError: {0}'.format(pe_error))

    jit_analyze_tool = os.path.join(jit_analyze_build_directory, "jit-analyze.exe")
    if not os.path.isfile(jit_analyze_tool):
        print('Error: {} not found'.format(jit_analyze_tool))
        return 1

    ######## Set pipeline variables

    helix_source_prefix = "official"
    creator = ""

    print('Setting pipeline variables:')
    set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory)
    set_pipeline_variable("Architecture", arch)
    set_pipeline_variable("Creator", creator)
    set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)

    return 0
Exemplo n.º 12
0
def main(main_args):
    """Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """

    coreclr_args = setup_args(main_args)

    fuzzlyn_directory = coreclr_args.fuzzlyn_directory
    core_root = coreclr_args.core_root
    tag_name = "{}-{}".format(coreclr_args.run_configuration,
                              coreclr_args.partition)
    output_directory = coreclr_args.output_directory
    if not coreclr_args.run_duration:
        run_duration = 60 * 60  # 60 minutes by default
    else:
        run_duration = int(
            coreclr_args.run_duration) * 60  # Run for duration in seconds

    path_to_corerun = os.path.join(core_root, "corerun")
    path_to_tool = os.path.join(fuzzlyn_directory, "Fuzzlyn")
    if is_windows:
        path_to_corerun += ".exe"
        path_to_tool += ".exe"

    os.makedirs(output_directory, exist_ok=True)

    with TempDir() as temp_location:
        summary_file_name = "issues-summary-{}.txt".format(tag_name)
        summary_file_path = path.join(temp_location, summary_file_name)
        with open(summary_file_path, 'w'):
            pass

        upload_fuzzer_output_path = path.join(
            output_directory, "Fuzzlyn-{}.log".format(tag_name))

        with open(summary_file_path, 'r') as fp:
            exit_evt = threading.Event()
            reduce_examples = ReduceExamples(fp, temp_location, path_to_tool,
                                             path_to_corerun, exit_evt)
            reduce_examples.start()

            run_command([
                path_to_tool, "--seconds-to-run",
                str(run_duration), "--output-events-to", summary_file_path,
                "--host", path_to_corerun, "--parallelism", "-1"
            ],
                        _exit_on_fail=True,
                        _output_file=upload_fuzzer_output_path)

            exit_evt.set()
            reduce_examples.join()

        upload_summary_file_path = path.join(output_directory,
                                             summary_file_name)
        print("Copying summary: {} -> {}".format(summary_file_path,
                                                 upload_summary_file_path))
        shutil.copy2(summary_file_path, upload_summary_file_path)

        upload_issues_zip_path = path.join(output_directory,
                                           "AllIssues-{}".format(tag_name))
        print("Creating zip {}.zip".format(upload_issues_zip_path))
        shutil.make_archive(upload_issues_zip_path, 'zip', temp_location)
Exemplo n.º 13
0
def main(main_args):
    """Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """

    coreclr_args = setup_args(main_args)
    arch_name = coreclr_args.arch
    os_name = "win" if coreclr_args.platform.lower() == "windows" else "linux"
    run_configuration = "{}-{}".format(os_name, arch_name)
    source_directory = coreclr_args.source_directory

    # CorrelationPayload directories
    correlation_payload_directory = path.join(coreclr_args.source_directory,
                                              "payload")
    scripts_src_directory = path.join(source_directory, "src", "coreclr",
                                      'scripts')
    coreroot_directory = path.join(correlation_payload_directory, "CoreRoot")
    dst_directory = path.join(correlation_payload_directory, "exploratory")

    helix_source_prefix = "official"
    creator = ""

    repo_urls = {
        "Antigen": "https://github.com/kunalspathak/Antigen.git",
        "Fuzzlyn": "https://github.com/jakobbotsch/Fuzzlyn.git",
    }

    # tool_name is verifed in setup_args
    assert coreclr_args.tool_name in repo_urls
    repo_url = repo_urls[coreclr_args.tool_name]

    # create exploratory directory
    print('Copying {} -> {}'.format(scripts_src_directory, coreroot_directory))
    copy_directory(scripts_src_directory,
                   coreroot_directory,
                   verbose_output=True,
                   match_func=lambda path: any(
                       path.endswith(extension) for extension in [".py"]))

    if is_windows:
        acceptable_copy = lambda path: any(
            path.endswith(extension)
            for extension in [".py", ".dll", ".exe", ".json"])
    else:
        # Need to accept files without any extension, which is how executable file's names look.
        acceptable_copy = lambda path: (os.path.basename(path).find(
            ".") == -1) or any(
                path.endswith(extension)
                for extension in [".py", ".dll", ".so", ".json", ".a"])

    # copy CORE_ROOT
    print('Copying {} -> {}'.format(coreclr_args.core_root_directory,
                                    coreroot_directory))
    copy_directory(coreclr_args.core_root_directory,
                   coreroot_directory,
                   verbose_output=True,
                   match_func=acceptable_copy)

    try:
        with TempDir() as tool_code_directory:
            # clone the tool
            run_command([
                "git", "clone", "--quiet", "--depth", "1", repo_url,
                tool_code_directory
            ])

            publish_dir = path.join(tool_code_directory, "publish")

            # build the tool
            with ChangeDir(tool_code_directory):
                dotnet_cmd = os.path.join(source_directory, "dotnet.cmd")
                if not is_windows:
                    dotnet_cmd = os.path.join(source_directory, "dotnet.sh")
                run_command([
                    dotnet_cmd, "publish", "-c", "Release", "--self-contained",
                    "-r", run_configuration, "-o", publish_dir
                ],
                            _exit_on_fail=True)

            dll_name = coreclr_args.tool_name + ".dll"
            if not os.path.exists(path.join(publish_dir, dll_name)):
                raise FileNotFoundError("{} not present at {}".format(
                    dll_name, publish_dir))

            # copy tool
            print('Copying {} -> {}'.format(publish_dir, dst_directory))
            copy_directory(publish_dir,
                           dst_directory,
                           verbose_output=True,
                           match_func=acceptable_copy)
    except PermissionError as pe:
        print("Skipping file. Got error: %s", pe)

    # create foo.txt in work_item directories
    workitem_directory = path.join(source_directory, "workitem")
    os.mkdir(workitem_directory)
    foo_txt = os.path.join(workitem_directory, "foo.txt")
    with open(foo_txt, "w") as foo_txt_file:
        foo_txt_file.write("hello world!")

    # Set variables
    print('Setting pipeline variables:')
    set_pipeline_variable("CorrelationPayloadDirectory",
                          correlation_payload_directory)
    set_pipeline_variable("WorkItemDirectory", workitem_directory)
    set_pipeline_variable("RunConfiguration", run_configuration)
    set_pipeline_variable("Creator", creator)
    set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
Exemplo n.º 14
0
def build_and_run(coreclr_args, output_mch_name):
    """Build the microbenchmarks and run them under "superpmi collect"

    Args:
        coreclr_args (CoreClrArguments): Arguments use to drive
        output_mch_name (string): Name of output mch file name
    """
    arch = coreclr_args.arch
    python_path = sys.executable
    core_root = coreclr_args.core_root
    superpmi_directory = coreclr_args.superpmi_directory
    performance_directory = coreclr_args.performance_directory
    log_file = coreclr_args.log_file
    partition_count = coreclr_args.partition_count
    partition_index = coreclr_args.partition_index
    dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch)
    dotnet_exe = os.path.join(dotnet_directory, "dotnet")

    artifacts_directory = os.path.join(performance_directory, "artifacts")
    artifacts_packages_directory = os.path.join(artifacts_directory, "packages")
    project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj")
    benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll")

    # Workaround https://github.com/dotnet/sdk/issues/23430
    project_file = realpath(project_file)

    if is_windows:
        shim_name = "%JitName%"
        corerun_exe = "CoreRun.exe"
        script_name = "run_microbenchmarks.bat"
    else:
        shim_name = "$JitName"
        corerun_exe = "corerun"
        script_name = "run_microbenchmarks.sh"

    make_executable(dotnet_exe)

    run_command(
        [dotnet_exe, "restore", project_file, "--packages",
         artifacts_packages_directory], _exit_on_fail=True)

    run_command(
        [dotnet_exe, "build", project_file, "--configuration", "Release",
         "--framework", "net7.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory,
         "-o", artifacts_directory], _exit_on_fail=True)

    # Disable ReadyToRun so we always JIT R2R methods and collect them
    collection_command = f"{dotnet_exe} {benchmarks_dll}  --filter \"*\" --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \
                         f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \
                         " COMPlus_ZapDisable:1  COMPlus_ReadyToRun:0 " \
                         "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart"

    # Generate the execution script in Temp location
    with TempDir() as temp_location:
        script_name = path.join(temp_location, script_name)

        contents = []
        # Unset the JitName so dotnet process will not fail
        if is_windows:
            contents.append("set JitName=%COMPlus_JitName%")
            contents.append("set COMPlus_JitName=")
        else:
            contents.append("#!/bin/bash")
            contents.append("export JitName=$COMPlus_JitName")
            contents.append("unset COMPlus_JitName")
        contents.append(f"pushd {performance_directory}")
        contents.append(collection_command)

        with open(script_name, "w") as collection_script:
            collection_script.write(os.linesep.join(contents))

        print()
        print(f"{script_name} contents:")
        print("******************************************")
        print(os.linesep.join(contents))
        print("******************************************")

        make_executable(script_name)

        run_command([
            python_path, path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root,
            "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug",
            script_name], _exit_on_fail=True)
Exemplo n.º 15
0
def build_and_run(coreclr_args, output_mch_name):
    """Build the microbenchmarks and run them under "superpmi collect"

    Args:
        coreclr_args (CoreClrArguments): Arguments use to drive
        output_mch_name (string): Name of output mch file name
    """
    arch = coreclr_args.arch
    python_path = sys.executable
    core_root = coreclr_args.core_root
    superpmi_directory = coreclr_args.superpmi_directory
    performance_directory = coreclr_args.performance_directory
    log_file = coreclr_args.log_file
    partition_count = coreclr_args.partition_count
    partition_index = coreclr_args.partition_index
    dotnet_directory = os.path.join(performance_directory, "tools", "dotnet",
                                    arch)
    dotnet_exe = os.path.join(dotnet_directory, "dotnet")

    artifacts_directory = os.path.join(performance_directory, "artifacts")
    artifacts_packages_directory = os.path.join(artifacts_directory,
                                                "packages")
    project_file = os.path.join(performance_directory, "src", "benchmarks",
                                "micro", "MicroBenchmarks.csproj")
    benchmarks_dll = os.path.join(artifacts_directory, "MicroBenchmarks.dll")

    # Workaround https://github.com/dotnet/sdk/issues/23430
    project_file = os.path.realpath(project_file)

    if is_windows:
        shim_name = "%JitName%"
        corerun_exe = "CoreRun.exe"
        script_name = "run_microbenchmarks.bat"
    else:
        shim_name = "$JitName"
        corerun_exe = "corerun"
        script_name = "run_microbenchmarks.sh"

    make_executable(dotnet_exe)

    # Start with a "dotnet --info" to see what we've got.
    run_command([dotnet_exe, "--info"])

    env_copy = os.environ.copy()
    if is_windows:
        # Try to work around problem with random NuGet failures in "dotnet restore":
        #   error NU3037: Package 'System.Runtime 4.1.0' from source 'https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-public/nuget/v3/index.json':
        #     The repository primary signature validity period has expired. [C:\h\w\A3B008C0\w\B581097F\u\performance\src\benchmarks\micro\MicroBenchmarks.csproj]
        # Using environment variable specified in https://github.com/NuGet/NuGet.Client/pull/4259.
        env_copy["NUGET_EXPERIMENTAL_CHAIN_BUILD_RETRY_POLICY"] = "9,2000"

    # If `dotnet restore` fails, retry.
    num_tries = 3
    for try_num in range(num_tries):
        # On the last try, exit on fail
        exit_on_fail = try_num + 1 == num_tries
        (_, _, return_code) = run_command([
            dotnet_exe, "restore", project_file, "--packages",
            artifacts_packages_directory
        ],
                                          _exit_on_fail=exit_on_fail,
                                          _env=env_copy)
        if return_code == 0:
            # It succeeded!
            break
        print("Try {} of {} failed with error code {}: trying again".format(
            try_num + 1, num_tries, return_code))
        # Sleep 5 seconds before trying again
        time.sleep(5)

    run_command([
        dotnet_exe, "build", project_file, "--configuration", "Release",
        "--framework", "net7.0", "--no-restore", "/p:NuGetPackageRoot=" +
        artifacts_packages_directory, "-o", artifacts_directory
    ],
                _exit_on_fail=True)

    # Disable ReadyToRun so we always JIT R2R methods and collect them
    collection_command = f"{dotnet_exe} {benchmarks_dll}  --filter \"*\" --corerun {os.path.join(core_root, corerun_exe)} --partition-count {partition_count} " \
                         f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \
                         " COMPlus_ZapDisable:1  COMPlus_ReadyToRun:0 " \
                         "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart"

    # Generate the execution script in Temp location
    with TempDir() as temp_location:
        script_name = os.path.join(temp_location, script_name)

        contents = []
        # Unset the JitName so dotnet process will not fail
        if is_windows:
            contents.append("set JitName=%COMPlus_JitName%")
            contents.append("set COMPlus_JitName=")
        else:
            contents.append("#!/bin/bash")
            contents.append("export JitName=$COMPlus_JitName")
            contents.append("unset COMPlus_JitName")
        contents.append(f"pushd {performance_directory}")
        contents.append(collection_command)

        with open(script_name, "w") as collection_script:
            collection_script.write(os.linesep.join(contents))

        print()
        print(f"{script_name} contents:")
        print("******************************************")
        print(os.linesep.join(contents))
        print("******************************************")

        make_executable(script_name)

        run_command([
            python_path,
            os.path.join(superpmi_directory, "superpmi.py"), "collect",
            "-core_root", core_root, "-output_mch_path", output_mch_name,
            "-log_file", log_file, "-log_level", "debug", script_name
        ],
                    _exit_on_fail=True)
Exemplo n.º 16
0
def main(main_args):
    """ Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """
    coreclr_args = setup_args(main_args)
    source_directory = coreclr_args.source_directory

    # CorrelationPayload directories
    correlation_payload_directory = os.path.join(coreclr_args.source_directory,
                                                 "payload")
    superpmi_src_directory = os.path.join(source_directory, 'src', 'coreclr',
                                          'scripts')
    superpmi_dst_directory = os.path.join(correlation_payload_directory,
                                          "superpmi")
    arch = coreclr_args.arch
    helix_source_prefix = "official"
    creator = ""
    ci = True
    if is_windows:
        helix_queue = "Windows.10.Arm64" if arch == "arm64" else "Windows.10.Amd64.X86.Rt"
    else:
        if arch == "arm":
            helix_queue = "(Ubuntu.1804.Arm32)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440"
        elif arch == "arm64":
            helix_queue = "(Ubuntu.1804.Arm64)[email protected]/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-20210531091519-97d8652"
        else:
            helix_queue = "Ubuntu.1804.Amd64"

    # create superpmi directory
    print('Copying {} -> {}'.format(superpmi_src_directory,
                                    superpmi_dst_directory))
    copy_directory(superpmi_src_directory,
                   superpmi_dst_directory,
                   verbose_output=True,
                   match_func=lambda path: any(
                       path.endswith(extension) for extension in [".py"]))

    if is_windows:
        acceptable_copy = lambda path: any(
            path.endswith(extension)
            for extension in [".py", ".dll", ".exe", ".json"])
    else:
        # Need to accept files without any extension, which is how executable file's names look.
        acceptable_copy = lambda path: (os.path.basename(path).find(
            ".") == -1) or any(
                path.endswith(extension)
                for extension in [".py", ".dll", ".so", ".json"])

    print('Copying {} -> {}'.format(coreclr_args.core_root_directory,
                                    superpmi_dst_directory))
    copy_directory(coreclr_args.core_root_directory,
                   superpmi_dst_directory,
                   verbose_output=True,
                   match_func=acceptable_copy)

    # Copy all the test files to CORE_ROOT
    # The reason is there are lot of dependencies with *.Tests.dll and to ensure we do not get
    # Reflection errors, just copy everything to CORE_ROOT so for all individual partitions, the
    # references will be present in CORE_ROOT.
    if coreclr_args.collection_name == "libraries_tests":
        print('Copying {} -> {}'.format(coreclr_args.input_directory,
                                        superpmi_dst_directory))

        def make_readable(folder_name):
            """Make file executable by changing the permission

            Args:
                folder_name (string): folder to mark with 744
            """
            if is_windows:
                return

            print("Inside make_readable")
            run_command(["ls", "-l", folder_name])
            for file_path, dirs, files in os.walk(folder_name, topdown=True):
                for d in dirs:
                    os.chmod(
                        os.path.join(file_path, d),
                        # read+write+execute for owner
                        (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
                        # read for group
                        (stat.S_IRGRP) |
                        # read for other
                        (stat.S_IROTH))

                for f in files:
                    os.chmod(
                        os.path.join(file_path, f),
                        # read+write+execute for owner
                        (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
                        # read for group
                        (stat.S_IRGRP) |
                        # read for other
                        (stat.S_IROTH))
            run_command(["ls", "-l", folder_name])

        make_readable(coreclr_args.input_directory)
        copy_directory(coreclr_args.input_directory,
                       superpmi_dst_directory,
                       verbose_output=True,
                       match_func=acceptable_copy)

    # Workitem directories
    workitem_directory = os.path.join(source_directory, "workitem")
    input_artifacts = ""

    if coreclr_args.collection_name == "benchmarks":
        # Setup microbenchmarks
        setup_microbenchmark(workitem_directory, arch)
    else:
        # Setup for pmi/crossgen runs

        # Clone and build jitutils
        try:
            with TempDir() as jitutils_directory:
                run_command([
                    "git", "clone", "--quiet", "--depth", "1",
                    "https://github.com/dotnet/jitutils", jitutils_directory
                ])

                # Make sure ".dotnet" directory exists, by running the script at least once
                dotnet_script_name = "dotnet.cmd" if is_windows else "dotnet.sh"
                dotnet_script_path = os.path.join(source_directory,
                                                  dotnet_script_name)
                run_command([dotnet_script_path, "--info"], jitutils_directory)

                # Set dotnet path to run build
                os.environ["PATH"] = os.path.join(
                    source_directory,
                    ".dotnet") + os.pathsep + os.environ["PATH"]
                build_file = "build.cmd" if is_windows else "build.sh"
                run_command(
                    [os.path.join(jitutils_directory, build_file), "-p"],
                    jitutils_directory)

                copy_files(
                    os.path.join(jitutils_directory,
                                 "bin"), superpmi_dst_directory,
                    [os.path.join(jitutils_directory, "bin", "pmi.dll")])
        except PermissionError as pe_error:
            # Details: https://bugs.python.org/issue26660
            print('Ignoring PermissionError: {0}'.format(pe_error))

        # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a
        # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2.
        # The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios.

        # # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll
        # if coreclr_args.collection_type == "crossgen2":
        #     dotnet_src_directory = os.path.join(source_directory, ".dotnet")
        #     dotnet_dst_directory = os.path.join(correlation_payload_directory, ".dotnet")
        #     print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory))
        #     copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False)

        # payload
        pmiassemblies_directory = os.path.join(workitem_directory,
                                               "pmiAssembliesDirectory")
        input_artifacts = os.path.join(pmiassemblies_directory,
                                       coreclr_args.collection_name)
        exclude_directory = [
            'Core_Root'
        ] if coreclr_args.collection_name == "coreclr_tests" else []
        exclude_files = native_binaries_to_ignore
        if coreclr_args.collection_type == "crossgen2":
            print('Adding exclusions for crossgen2')
            # Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it.
            exclude_files += ["Microsoft.Build.dll"]

        if coreclr_args.collection_name == "libraries_tests":
            # libraries_tests artifacts contains files from core_root folder. Exclude them.
            core_root_dir = coreclr_args.core_root_directory
            exclude_files += [
                item for item in os.listdir(core_root_dir)
                if os.path.isfile(os.path.join(core_root_dir, item)) and (
                    item.endswith(".dll") or item.endswith(".exe"))
            ]

        partition_files(coreclr_args.input_directory, input_artifacts,
                        coreclr_args.max_size, exclude_directory,
                        exclude_files)

    # Set variables
    print('Setting pipeline variables:')
    set_pipeline_variable("CorrelationPayloadDirectory",
                          correlation_payload_directory)
    set_pipeline_variable("WorkItemDirectory", workitem_directory)
    set_pipeline_variable("InputArtifacts", input_artifacts)
    set_pipeline_variable("Python", ' '.join(get_python_name()))
    set_pipeline_variable("Architecture", arch)
    set_pipeline_variable("Creator", creator)
    set_pipeline_variable("Queue", helix_queue)
    set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
    set_pipeline_variable("MchFileTag", coreclr_args.mch_file_tag)
Exemplo n.º 17
0
def main(main_args):
    """Main entrypoint

    Args:
        main_args ([type]): Arguments to the script
    """

    coreclr_args = setup_args(main_args)

    fuzzlyn_directory = coreclr_args.fuzzlyn_directory
    core_root = coreclr_args.core_root
    tag_name = "{}-{}".format(coreclr_args.run_configuration,
                              coreclr_args.partition)
    output_directory = coreclr_args.output_directory
    if not coreclr_args.run_duration:
        run_duration = 60 * 60  # 60 minutes by default
    else:
        run_duration = int(
            coreclr_args.run_duration) * 60  # Run for duration in seconds

    path_to_corerun = os.path.join(core_root, "corerun")
    path_to_tool = os.path.join(fuzzlyn_directory, "Fuzzlyn")
    if is_windows:
        path_to_corerun += ".exe"
        path_to_tool += ".exe"

    os.makedirs(output_directory, exist_ok=True)

    if not is_windows:
        # Disable core dumps. The fuzzers have their own graceful handling for
        # runtime crashes. Especially on macOS we can quickly fill up the drive
        # with dumps if we find lots of crashes since dumps there are very big.
        import resource
        resource.setrlimit(resource.RLIMIT_CORE, (0, 0))

    with TempDir() as temp_location:
        summary_file_name = "issues-summary-{}.txt".format(tag_name)
        summary_file_path = path.join(temp_location, summary_file_name)
        with open(summary_file_path, 'w'):
            pass

        upload_fuzzer_output_path = path.join(
            output_directory, "Fuzzlyn-{}.log".format(tag_name))

        with open(summary_file_path, 'r') as fp:
            exit_evt = threading.Event()
            reduce_examples = ReduceExamples(fp, temp_location, path_to_tool,
                                             path_to_corerun, exit_evt)
            reduce_examples.start()

            run_command([
                path_to_tool, "--seconds-to-run",
                str(run_duration), "--output-events-to", summary_file_path,
                "--host", path_to_corerun, "--parallelism", "-1",
                "--known-errors", "dotnet/runtime"
            ],
                        _exit_on_fail=True,
                        _output_file=upload_fuzzer_output_path)

            exit_evt.set()
            reduce_examples.join()

        upload_summary_file_path = path.join(output_directory,
                                             summary_file_name)
        print("Copying summary: {} -> {}".format(summary_file_path,
                                                 upload_summary_file_path))
        shutil.copy2(summary_file_path, upload_summary_file_path)

        upload_issues_zip_path = path.join(output_directory,
                                           "AllIssues-{}".format(tag_name))
        print("Creating zip {}.zip".format(upload_issues_zip_path))
        shutil.make_archive(upload_issues_zip_path, 'zip', temp_location)
Exemplo n.º 18
0
def main(main_args):
    """ Run superpmi asmdiffs process on the Helix machines.

    See superpmi_asmdiffs_setup.py for how the directory structure is set up in the
    correlation payload. This script lives in the root of that directory tree.

    Args:
        main_args ([type]): Arguments to the script
    """

    python_path = sys.executable
    script_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
    coreclr_args = setup_args(main_args)

    # It doesn't really matter where we put the downloaded SPMI artifacts.
    # Here, they are put in <correlation_payload>/artifacts/spmi.
    spmi_location = os.path.join(script_dir, "artifacts", "spmi")

    log_directory = coreclr_args.log_directory
    platform_name = coreclr_args.platform

    # Find the built jit-analyze and put its directory on the PATH
    jit_analyze_dir = os.path.join(script_dir, "jit-analyze")
    if not os.path.isdir(jit_analyze_dir):
        print("Error: jit-analyze not found in {} (continuing)".format(
            jit_analyze_dir))
    else:
        # Put the jit-analyze directory on the PATH so superpmi.py can find it.
        print("Adding {} to PATH".format(jit_analyze_dir))
        os.environ["PATH"] = jit_analyze_dir + os.pathsep + os.environ["PATH"]

    # Find the portable `git` installation, and put `git.exe` on the PATH, for use by `jit-analyze`.
    git_directory = os.path.join(script_dir, "git", "cmd")
    git_exe_tool = os.path.join(git_directory, "git.exe")
    if not os.path.isfile(git_exe_tool):
        print("Error: `git` not found at {} (continuing)".format(git_exe_tool))
    else:
        # Put the git/cmd directory on the PATH so jit-analyze can find it.
        print("Adding {} to PATH".format(git_directory))
        os.environ["PATH"] = git_directory + os.pathsep + os.environ["PATH"]

    # Figure out which JITs to use
    os_name = "win" if platform_name.lower() == "windows" else "unix"
    arch_name = coreclr_args.arch
    host_arch_name = "x64" if arch_name.endswith("64") else "x86"
    os_name = "universal" if arch_name.startswith("arm") else os_name
    base_jit_path = os.path.join(
        coreclr_args.base_jit_directory,
        'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))
    diff_jit_path = os.path.join(
        coreclr_args.diff_jit_directory,
        'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))

    # Core_Root is where the superpmi tools (superpmi.exe, mcs.exe) are expected to be found.
    # We pass the full path of the JITs to use as arguments.
    core_root_dir = script_dir

    print("Running superpmi.py download to get MCH files")

    log_file = os.path.join(
        log_directory,
        "superpmi_download_{}_{}.log".format(platform_name, arch_name))
    run_command([
        python_path,
        os.path.join(script_dir, "superpmi.py"), "download", "--no_progress",
        "-core_root", core_root_dir, "-target_os", platform_name,
        "-target_arch", arch_name, "-spmi_location", spmi_location,
        "-log_level", "debug", "-log_file", log_file
    ],
                _exit_on_fail=True)

    print("Running superpmi.py asmdiffs")
    log_file = os.path.join(
        log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))

    overall_md_summary_file = os.path.join(spmi_location, "diff_summary.md")
    if os.path.isfile(overall_md_summary_file):
        os.remove(overall_md_summary_file)

    _, _, return_code = run_command([
        python_path,
        os.path.join(script_dir, "superpmi.py"), "asmdiffs", "--no_progress",
        "-core_root", core_root_dir, "-target_os", platform_name,
        "-target_arch", arch_name, "-arch", host_arch_name, "-base_jit_path",
        base_jit_path, "-diff_jit_path", diff_jit_path, "-spmi_location",
        spmi_location, "-error_limit", "100", "-log_level", "debug",
        "-log_file", log_file, "-retainOnlyTopFiles"
    ])

    # If there are asm diffs, and jit-analyze ran, we'll get a diff_summary.md file in the spmi_location directory.
    # We make sure the file doesn't exist before we run diffs, so we don't need to worry about superpmi.py creating
    # a unique, numbered file. If there are no diffs, we still want to create this file and indicate there were no diffs.

    overall_md_summary_file_target = os.path.join(
        log_directory,
        "superpmi_diff_summary_{}_{}.md".format(platform_name, arch_name))
    if os.path.isfile(overall_md_summary_file):
        try:
            print("Copying summary file {} -> {}".format(
                overall_md_summary_file, overall_md_summary_file_target))
            shutil.copy2(overall_md_summary_file,
                         overall_md_summary_file_target)
        except PermissionError as pe_error:
            print('Ignoring PermissionError: {0}'.format(pe_error))
    else:
        # Write a basic summary file. Ideally, we should not generate a summary.md file. However, currently I'm seeing
        # errors where the Helix work item fails to upload this specified file if it doesn't exist. We should change the
        # upload to be conditional, or otherwise not error.
        with open(overall_md_summary_file_target, "a") as f:
            f.write("""\
No diffs found
""")

    # Finally prepare files to upload from helix.
    copy_dasm_files(spmi_location, log_directory,
                    "{}_{}".format(platform_name, arch_name))

    if return_code != 0:
        print("Failure in {}".format(log_file))
        return 1

    return 0