def __main(args: list) -> int: validate_supported_runtime() args = __process_arguments(args) verbose = not args.quiet setup_loggers(verbose=verbose) if not args.frameworks: raise Exception("Framework version (-f) must be specified.") target_framework_monikers = dotnet \ .FrameworkAction \ .get_target_framework_monikers(args.frameworks) # Acquire necessary tools (dotnet) init_tools(architecture=args.architecture, dotnet_versions=args.dotnet_versions, target_framework_monikers=target_framework_monikers, verbose=verbose) # WORKAROUND # The MicroBenchmarks.csproj targets .NET Core 2.1, 3.0, 3.1 and 5.0 # to avoid a build failure when using older frameworks (error NETSDK1045: # The current .NET SDK does not support targeting .NET Core $XYZ) # we set the TFM to what the user has provided. os.environ['PERFLAB_TARGET_FRAMEWORKS'] = ';'.join( target_framework_monikers) # dotnet --info dotnet.info(verbose=verbose) BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile, bin_directory=args.bin_directory) if not args.run_only: # .NET micro-benchmarks # Restore and build micro-benchmarks micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration, target_framework_monikers, args.incremental, verbose) # Run micro-benchmarks if not args.build_only: upload_container = UPLOAD_CONTAINER try: for framework in args.frameworks: micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration, framework, verbose, args) globpath = os.path.join( get_artifacts_directory() if not args.bdn_artifacts else args.bdn_artifacts, '**', '*perf-lab-report.json') except CalledProcessError: getLogger().info("Run failure registered") # rethrow the caught CalledProcessError exception so that the exception being bubbled up correctly. raise dotnet.shutdown_server(verbose) if args.upload_to_perflab_container: import upload upload.upload(globpath, upload_container, UPLOAD_QUEUE, UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI)
def __main(args: list) -> int: validate_supported_runtime() args = __process_arguments(args) verbose = not args.quiet setup_loggers(verbose=verbose) # This validation could be cleaner if args.generate_benchview_data and not args.benchview_submission_name: raise RuntimeError("""In order to generate BenchView data, `--benchview-submission-name` must be provided.""") target_framework_monikers = micro_benchmarks \ .FrameworkAction \ .get_target_framework_monikers(args.frameworks) # Acquire necessary tools (dotnet, and BenchView) init_tools(architecture=args.architecture, dotnet_versions=args.dotnet_versions, target_framework_monikers=target_framework_monikers, verbose=verbose) # WORKAROUND # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0 # to avoid a build failure when using older frameworks (error NETSDK1045: # The current .NET SDK does not support targeting .NET Core $XYZ) # we set the TFM to what the user has provided. os.environ['PYTHON_SCRIPT_TARGET_FRAMEWORKS'] = ';'.join( target_framework_monikers) # dotnet --info dotnet.info(verbose=verbose) BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile, bin_directory=args.bin_directory) if not args.run_only: # .NET micro-benchmarks # Restore and build micro-benchmarks micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration, target_framework_monikers, args.incremental, verbose) # Run micro-benchmarks if not args.build_only: for framework in args.frameworks: micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration, framework, verbose, args) benchview.run_scripts(args, verbose, BENCHMARKS_CSPROJ) if args.upload_to_perflab_container: if args.architecture == 'arm64': globpath = os.path.join( get_artifacts_directory() if not args.bdn_artifacts else args.bdn_artifacts, '**', '*perf-lab-report.json') upload.upload(globpath, 'results', 'PERFLAB_UPLOAD_TOKEN', 'pvscmdupload.blob.core.windows.net') else: AzCopy.upload_results('', args.bdn_artifacts, verbose=verbose)
def __main(args: list) -> int: validate_supported_runtime() args = __process_arguments(args) verbose = not args.quiet setup_loggers(verbose=verbose) target_framework_monikers = micro_benchmarks \ .FrameworkAction \ .get_target_framework_monikers(args.frameworks) # Acquire necessary tools (dotnet) init_tools(architecture=args.architecture, dotnet_versions=args.dotnet_versions, target_framework_monikers=target_framework_monikers, verbose=verbose) # WORKAROUND # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0 # to avoid a build failure when using older frameworks (error NETSDK1045: # The current .NET SDK does not support targeting .NET Core $XYZ) # we set the TFM to what the user has provided. os.environ['PERFLAB_TARGET_FRAMEWORKS'] = ';'.join( target_framework_monikers) # dotnet --info dotnet.info(verbose=verbose) BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile, bin_directory=args.bin_directory) if not args.run_only: # .NET micro-benchmarks # Restore and build micro-benchmarks micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration, target_framework_monikers, args.incremental, verbose) # Run micro-benchmarks if not args.build_only: for framework in args.frameworks: micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration, framework, verbose, args) dotnet.shutdown_server(verbose) if args.upload_to_perflab_container: import upload globpath = os.path.join( get_artifacts_directory() if not args.bdn_artifacts else args.bdn_artifacts, '**', '*perf-lab-report.json') #No queue insertion upload.upload(globpath, UPLOAD_CONTAINER, None, UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI) #With queue insertion upload.upload(globpath, 'resultsandbox', UPLOAD_QUEUE, UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI)
def __main(args: list) -> int: try: validate_supported_runtime() args = __process_arguments(args) configuration = args.configuration frameworks = args.frameworks incremental = args.incremental verbose = args.verbose target_framework_monikers = dotnet.FrameworkAction. \ get_target_framework_monikers(frameworks) setup_loggers(verbose=verbose) # dotnet --info dotnet.info(verbose) BENCHMARKS_CSPROJ = dotnet.CSharpProject( project=args.csprojfile, bin_directory=args.bin_directory ) # dotnet build build( BENCHMARKS_CSPROJ, configuration, target_framework_monikers, incremental, verbose ) for framework in frameworks: # dotnet run run( BENCHMARKS_CSPROJ, configuration, framework, verbose, args ) return 0 except CalledProcessError as ex: getLogger().error( 'Command: "%s", exited with status: %s', ex.cmd, ex.returncode) except IOError as ex: getLogger().error( "I/O error (%s): %s: %s", ex.errno, ex.strerror, ex.filename) except SystemExit: # Argparse throws this exception when it exits. pass except Exception: getLogger().error('Unexpected error: %s', sys.exc_info()[0]) getLogger().error(format_exc()) return 1
def __main(args: list) -> int: validate_supported_runtime() args = __process_arguments(args) verbose = not args.quiet setup_loggers(verbose=verbose) # This validation could be cleaner if args.generate_benchview_data and not args.benchview_submission_name: raise RuntimeError("""In order to generate BenchView data, `--benchview-submission-name` must be provided.""") target_framework_monikers = micro_benchmarks \ .FrameworkAction \ .get_target_framework_monikers(args.frameworks) # Acquire necessary tools (dotnet, and BenchView) init_tools(architecture=args.architecture, target_framework_monikers=target_framework_monikers, verbose=verbose) # WORKAROUND # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0 # to avoid a build failure when using older frameworks (error NETSDK1045: # The current .NET SDK does not support targeting .NET Core $XYZ) # we set the TFM to what the user has provided. os.environ['PYTHON_SCRIPT_TARGET_FRAMEWORKS'] = ';'.join( target_framework_monikers) # dotnet --info dotnet.info(verbose=verbose) BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile, bin_directory=args.bin_directory) if not args.run_only: # .NET micro-benchmarks # Restore and build micro-benchmarks micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration, target_framework_monikers, args.incremental, verbose) # Run micro-benchmarks if not args.build_only: for framework in args.frameworks: micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration, framework, verbose, args) benchview.run_scripts(args, verbose, BENCHMARKS_CSPROJ)
__log_script_header("Running .NET micro benchmarks for '{}'".format( framework )) # dotnet run BENCHMARKS_CSPROJ.run(configuration, framework, verbose, *args) def __log_script_header(message: str): getLogger().info('-' * len(message)) getLogger().info(message) getLogger().info('-' * len(message)) BENCHMARKS_CSPROJ = dotnet.CSharpProject( working_directory=path.join( get_repo_root_path(), 'src', 'benchmarks', 'micro'), csproj_file='MicroBenchmarks.csproj' ) def __main(args: list) -> int: try: validate_supported_runtime() args = __process_arguments(args) configuration = args.configuration frameworks = args.frameworks incremental = args.incremental verbose = args.verbose setup_loggers(verbose=verbose)