Esempio n. 1
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    if not args.frameworks:
        raise Exception("Framework version (-f) must be specified.")

    target_framework_monikers = dotnet \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet)
    init_tools(architecture=args.architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.1, 3.0, 3.1 and 5.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PERFLAB_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        upload_container = UPLOAD_CONTAINER
        try:
            for framework in args.frameworks:
                micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                     framework, verbose, args)
            globpath = os.path.join(
                get_artifacts_directory() if not args.bdn_artifacts else
                args.bdn_artifacts, '**', '*perf-lab-report.json')
        except CalledProcessError:
            getLogger().info("Run failure registered")
            # rethrow the caught CalledProcessError exception so that the exception being bubbled up correctly.
            raise

        dotnet.shutdown_server(verbose)

        if args.upload_to_perflab_container:
            import upload
            upload.upload(globpath, upload_container, UPLOAD_QUEUE,
                          UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI)
Esempio n. 2
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # This validation could be cleaner
    if args.generate_benchview_data and not args.benchview_submission_name:
        raise RuntimeError("""In order to generate BenchView data,
            `--benchview-submission-name` must be provided.""")

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(architecture=args.architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PYTHON_SCRIPT_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        for framework in args.frameworks:
            micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                 framework, verbose, args)

        benchview.run_scripts(args, verbose, BENCHMARKS_CSPROJ)

        if args.upload_to_perflab_container:
            if args.architecture == 'arm64':
                globpath = os.path.join(
                    get_artifacts_directory() if not args.bdn_artifacts else
                    args.bdn_artifacts, '**', '*perf-lab-report.json')

                upload.upload(globpath, 'results', 'PERFLAB_UPLOAD_TOKEN',
                              'pvscmdupload.blob.core.windows.net')
            else:
                AzCopy.upload_results('', args.bdn_artifacts, verbose=verbose)
Esempio n. 3
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # This validation could be cleaner
    if args.generate_benchview_data and not args.benchview_submission_name:
        raise RuntimeError("""In order to generate BenchView data,
            `--benchview-submission-name` must be provided.""")

    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(architecture=args.architecture,
               frameworks=args.frameworks,
               verbose=verbose)

    # Configure .NET Runtime
    # TODO: Is this still correct across releases?
    #   Does it belong in the script?
    if args.optimization_level == 'min_opt':
        os.environ['COMPlus_JITMinOpts'] = '1'
        os.environ['COMPlus_TieredCompilation'] = '0'
    elif args.optimization_level == 'full_opt':
        os.environ['COMPlus_TieredCompilation'] = '0'

    # dotnet --info
    dotnet.info(verbose=verbose)

    # .NET micro-benchmarks
    # Restore and build micro-benchmarks
    micro_benchmarks.build(args.configuration, args.frameworks,
                           args.incremental, verbose)

    # Run micro-benchmarks
    for framework in args.frameworks:
        run_args = ['--']
        if args.category:
            run_args += ['--allCategories', args.category]
        if args.corerun:
            run_args += ['--coreRun', args.corerun]
        if args.cli:
            run_args += ['--cli', args.cli]
        if args.enable_pmc:
            run_args += [
                '--counters',
                'BranchMispredictions+CacheMisses+InstructionRetired',
            ]
        if args.filter:
            run_args += ['--filter'] + args.filter

        # Extra BenchmarkDotNet cli arguments.
        if args.bdn_arguments:
            run_args += args.bdn_arguments

        micro_benchmarks.run(args.configuration, framework, verbose, *run_args)

    __run_benchview_scripts(args, verbose)
Esempio n. 4
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet)
    init_tools(architecture=args.architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PERFLAB_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        for framework in args.frameworks:
            micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                 framework, verbose, args)

        dotnet.shutdown_server(verbose)

        if args.upload_to_perflab_container:
            import upload
            globpath = os.path.join(
                get_artifacts_directory() if not args.bdn_artifacts else
                args.bdn_artifacts, '**', '*perf-lab-report.json')

            #No queue insertion
            upload.upload(globpath, UPLOAD_CONTAINER, None, UPLOAD_TOKEN_VAR,
                          UPLOAD_STORAGE_URI)
            #With queue insertion
            upload.upload(globpath, 'resultsandbox', UPLOAD_QUEUE,
                          UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI)
Esempio n. 5
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # This validation could be cleaner
    if args.generate_benchview_data and not args.benchview_submission_name:
        raise RuntimeError("""In order to generate BenchView data,
            `--benchview-submission-name` must be provided.""")

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(architecture=args.architecture,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PYTHON_SCRIPT_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        for framework in args.frameworks:
            micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                 framework, verbose, args)

        benchview.run_scripts(args, verbose, BENCHMARKS_CSPROJ)