Exemple #1
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    if not args.frameworks:
        raise Exception("Framework version (-f) must be specified.")

    target_framework_monikers = dotnet \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet)
    init_tools(architecture=args.architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.1, 3.0, 3.1 and 5.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PERFLAB_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        upload_container = UPLOAD_CONTAINER
        try:
            for framework in args.frameworks:
                micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                     framework, verbose, args)
            globpath = os.path.join(
                get_artifacts_directory() if not args.bdn_artifacts else
                args.bdn_artifacts, '**', '*perf-lab-report.json')
        except CalledProcessError:
            getLogger().info("Run failure registered")
            # rethrow the caught CalledProcessError exception so that the exception being bubbled up correctly.
            raise

        dotnet.shutdown_server(verbose)

        if args.upload_to_perflab_container:
            import upload
            upload.upload(globpath, upload_container, UPLOAD_QUEUE,
                          UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI)
Exemple #2
0
def main():
    parser = ArgumentParser()
    parser.add_argument('operation', choices=operations)
    args = parser.parse_args()

    setup_loggers(True)

    if args.operation == SETUP_BUILD:
        shutdown_dotnet_servers()
        if not os.path.isdir(const.TMPDIR):
            if not os.path.isdir(const.APPDIR):
                raise Exception(
                    "\'app\' folder should exist. Please run pre.py.")
            getLogger().info("Backing up project directory...")
            shutil.copytree(const.APPDIR,
                            const.TMPDIR)  # backup from app to tmp
        else:
            if os.path.isdir(const.APPDIR):
                shutil.rmtree(const.APPDIR)
            getLogger().info("Copying clean project directory...")
            shutil.copytree(const.TMPDIR, const.APPDIR)  # use the copy

    if args.operation == SETUP_NEW:
        if not os.path.isdir(const.APPDIR):
            getLogger().info("Creating new project directory...")
            os.mkdir(const.APPDIR)

    if args.operation == CLEANUP:
        if os.path.isdir(const.APPDIR):
            getLogger().info("Removing project directory...")
            shutil.rmtree(const.APPDIR)
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # This validation could be cleaner
    if args.generate_benchview_data and not args.benchview_submission_name:
        raise RuntimeError("""In order to generate BenchView data,
            `--benchview-submission-name` must be provided.""")

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(architecture=args.architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PYTHON_SCRIPT_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        for framework in args.frameworks:
            micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                 framework, verbose, args)

        benchview.run_scripts(args, verbose, BENCHMARKS_CSPROJ)

        if args.upload_to_perflab_container:
            if args.architecture == 'arm64':
                globpath = os.path.join(
                    get_artifacts_directory() if not args.bdn_artifacts else
                    args.bdn_artifacts, '**', '*perf-lab-report.json')

                upload.upload(globpath, 'results', 'PERFLAB_UPLOAD_TOKEN',
                              'pvscmdupload.blob.core.windows.net')
            else:
                AzCopy.upload_results('', args.bdn_artifacts, verbose=verbose)
Exemple #4
0
 def __init__(self, traits: TestTraits):
     self.traits = traits
     self.testtype = None
     self.sdktype = None
     self.scenarioname = None
     self.coreroot = None
     self.crossgenfile = None
     setup_loggers(True)
Exemple #5
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # This validation could be cleaner
    if args.generate_benchview_data and not args.benchview_submission_name:
        raise RuntimeError("""In order to generate BenchView data,
            `--benchview-submission-name` must be provided.""")

    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(architecture=args.architecture,
               frameworks=args.frameworks,
               verbose=verbose)

    # Configure .NET Runtime
    # TODO: Is this still correct across releases?
    #   Does it belong in the script?
    if args.optimization_level == 'min_opt':
        os.environ['COMPlus_JITMinOpts'] = '1'
        os.environ['COMPlus_TieredCompilation'] = '0'
    elif args.optimization_level == 'full_opt':
        os.environ['COMPlus_TieredCompilation'] = '0'

    # dotnet --info
    dotnet.info(verbose=verbose)

    # .NET micro-benchmarks
    # Restore and build micro-benchmarks
    micro_benchmarks.build(args.configuration, args.frameworks,
                           args.incremental, verbose)

    # Run micro-benchmarks
    for framework in args.frameworks:
        run_args = ['--']
        if args.category:
            run_args += ['--allCategories', args.category]
        if args.corerun:
            run_args += ['--coreRun', args.corerun]
        if args.cli:
            run_args += ['--cli', args.cli]
        if args.enable_pmc:
            run_args += [
                '--counters',
                'BranchMispredictions+CacheMisses+InstructionRetired',
            ]
        if args.filter:
            run_args += ['--filter'] + args.filter

        # Extra BenchmarkDotNet cli arguments.
        if args.bdn_arguments:
            run_args += args.bdn_arguments

        micro_benchmarks.run(args.configuration, framework, verbose, *run_args)

    __run_benchview_scripts(args, verbose)
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet)
    init_tools(architecture=args.architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PERFLAB_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        for framework in args.frameworks:
            micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                 framework, verbose, args)

        dotnet.shutdown_server(verbose)

        if args.upload_to_perflab_container:
            import upload
            globpath = os.path.join(
                get_artifacts_directory() if not args.bdn_artifacts else
                args.bdn_artifacts, '**', '*perf-lab-report.json')

            #No queue insertion
            upload.upload(globpath, UPLOAD_CONTAINER, None, UPLOAD_TOKEN_VAR,
                          UPLOAD_STORAGE_URI)
            #With queue insertion
            upload.upload(globpath, 'resultsandbox', UPLOAD_QUEUE,
                          UPLOAD_TOKEN_VAR, UPLOAD_STORAGE_URI)
Exemple #7
0
def main():
    setup_loggers(True)
    shutdown_server(verbose=True)
    getLogger().info("Removing project directory...")
    if os.path.isdir(const.APPDIR):
        shutil.rmtree(const.APPDIR)
    getLogger().info("Copying clean project directory...")
    copyfrom = const.TMPDIR if os.path.isdir(
        const.TMPDIR) else const.SRCDIR  # tmp dir for dynamic templates
    shutil.copytree(copyfrom, const.APPDIR)
Exemple #8
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    setup_loggers(verbose=args.verbose)
    install(
        architecture=args.architecture,
        channels=args.channels,
        verbose=args.verbose,
        install_dir=args.install_dir,
    )
def __main(args: list) -> int:
    try:
        validate_supported_runtime()
        args = __process_arguments(args)

        configuration = args.configuration
        frameworks = args.frameworks
        incremental = args.incremental
        verbose = args.verbose

        setup_loggers(verbose=verbose)

        # dotnet --info
        dotnet.info(verbose)

        # dotnet build
        build(configuration, frameworks, incremental, verbose)

        for framework in frameworks:
            run_args = ['--']
            if args.category:
                run_args += ['--allCategories', args.category]
            if args.corerun:
                run_args += ['--coreRun', args.corerun]
            if args.cli:
                run_args += ['--cli', args.cli]
            if args.enable_pmc:
                run_args += [
                    '--counters',
                    'BranchMispredictions+CacheMisses+InstructionRetired',
                ]
            if args.filter:
                run_args += ['--filter'] + args.filter

            # Extra BenchmarkDotNet cli arguments.
            if args.bdn_arguments:
                run_args += args.bdn_arguments

            # dotnet run
            run(configuration, framework, verbose, *run_args)

        return 0
    except CalledProcessError as ex:
        getLogger().error(
            'Command: "%s", exited with status: %s', ex.cmd, ex.returncode)
    except IOError as ex:
        getLogger().error(
            "I/O error (%s): %s: %s", ex.errno, ex.strerror, ex.filename)
    except SystemExit:  # Argparse throws this exception when it exits.
        pass
    except Exception:
        getLogger().error('Unexpected error: %s', sys.exc_info()[0])
        getLogger().error(format_exc())
    return 1
Exemple #10
0
def __main(args: list) -> int:
    try:
        validate_supported_runtime()
        args = __process_arguments(args)

        configuration = args.configuration
        frameworks = args.frameworks
        incremental = args.incremental
        verbose = args.verbose
        target_framework_monikers = dotnet.FrameworkAction. \
            get_target_framework_monikers(frameworks)

        setup_loggers(verbose=verbose)

        # dotnet --info
        dotnet.info(verbose)

        BENCHMARKS_CSPROJ = dotnet.CSharpProject(
            project=args.csprojfile,
            bin_directory=args.bin_directory
        )

        # dotnet build
        build(
            BENCHMARKS_CSPROJ,
            configuration,
            target_framework_monikers,
            incremental,
            verbose
        )

        for framework in frameworks:
            # dotnet run
            run(
                BENCHMARKS_CSPROJ,
                configuration,
                framework,
                verbose,
                args
            )

        return 0
    except CalledProcessError as ex:
        getLogger().error(
            'Command: "%s", exited with status: %s', ex.cmd, ex.returncode)
    except IOError as ex:
        getLogger().error(
            "I/O error (%s): %s: %s", ex.errno, ex.strerror, ex.filename)
    except SystemExit:  # Argparse throws this exception when it exits.
        pass
    except Exception:
        getLogger().error('Unexpected error: %s', sys.exc_info()[0])
        getLogger().error(format_exc())
    return 1
Exemple #11
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # This validation could be cleaner
    if args.generate_benchview_data and not args.benchview_submission_name:
        raise RuntimeError("""In order to generate BenchView data,
            `--benchview-submission-name` must be provided.""")

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(architecture=args.architecture,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # WORKAROUND
    # The MicroBenchmarks.csproj targets .NET Core 2.0, 2.1, 2.2 and 3.0
    # to avoid a build failure when using older frameworks (error NETSDK1045:
    # The current .NET SDK does not support targeting .NET Core $XYZ)
    # we set the TFM to what the user has provided.
    os.environ['PYTHON_SCRIPT_TARGET_FRAMEWORKS'] = ';'.join(
        target_framework_monikers)

    # dotnet --info
    dotnet.info(verbose=verbose)

    BENCHMARKS_CSPROJ = dotnet.CSharpProject(project=args.csprojfile,
                                             bin_directory=args.bin_directory)

    if not args.run_only:
        # .NET micro-benchmarks
        # Restore and build micro-benchmarks
        micro_benchmarks.build(BENCHMARKS_CSPROJ, args.configuration,
                               target_framework_monikers, args.incremental,
                               verbose)

    # Run micro-benchmarks
    if not args.build_only:
        for framework in args.frameworks:
            micro_benchmarks.run(BENCHMARKS_CSPROJ, args.configuration,
                                 framework, verbose, args)

        benchview.run_scripts(args, verbose, BENCHMARKS_CSPROJ)
Exemple #12
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # if repository is not set, then we are doing a core-sdk in performance repo run
    # if repository is set, user needs to supply the commit_sha
    if not ((args.commit_sha is None) == (args.repository is None)):
        raise ValueError(
            'Either both commit_sha and repository should be set or neither')

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)

    # Acquire necessary tools (dotnet, and BenchView)
    # For arm64 runs, download the x64 version so we can get the information we need, but set all variables
    # as if we were running normally. This is a workaround due to the fact that arm64 binaries cannot run
    # in the cross containers, so we are running the ci setup script in a normal ubuntu container
    architecture = 'x64' if args.architecture == 'arm64' else args.architecture

    init_tools(architecture=architecture,
               dotnet_versions=args.dotnet_versions,
               target_framework_monikers=target_framework_monikers,
               verbose=verbose)

    # dotnet --info
    dotnet.info(verbose=verbose)

    # When running on internal repos, the repository comes to us incorrectly
    # (ie https://github.com/dotnet-coreclr). Replace dashes with slashes in that case.
    repo_url = None if args.repository is None else args.repository.replace(
        '-', '/')

    variable_format = 'set %s=%s\n' if sys.platform == 'win32' else 'export %s=%s\n'
    owner, repo = ('dotnet', 'core-sdk') if args.repository is None else (
        dotnet.get_repository(repo_url))
    config_string = ';'.join(
        args.build_configs) if sys.platform == 'win32' else '"%s"' % ';'.join(
            args.build_configs)

    remove_dotnet = False

    output = ''

    with push_dir(get_repo_root_path()):
        output = check_output(['git', 'rev-parse', 'HEAD'])

    decoded_lines = []

    for line in output.splitlines():
        decoded_lines = decoded_lines + [line.decode('utf-8')]

    decoded_output = ''.join(decoded_lines)

    perfHash = decoded_output if args.get_perf_hash else args.perf_hash

    remove_frameworks = ['netcoreapp3.0', 'netcoreapp5.0']

    for framework in target_framework_monikers:
        if framework.startswith('netcoreapp'):
            if framework in remove_frameworks:
                remove_dotnet = True
            target_framework_moniker = micro_benchmarks.FrameworkAction.get_target_framework_moniker(
                framework)
            dotnet_version = dotnet.get_dotnet_version(
                target_framework_moniker, args.cli)
            commit_sha = dotnet.get_dotnet_sdk(
                target_framework_moniker,
                args.cli) if args.commit_sha is None else args.commit_sha
            source_timestamp = dotnet.get_commit_date(target_framework_moniker,
                                                      commit_sha, repo_url)

            branch = micro_benchmarks.FrameworkAction.get_branch(
                target_framework_moniker) if not args.branch else args.branch

            getLogger().info("Writing script to %s" % args.output_file)

            with open(args.output_file, 'w') as out_file:
                out_file.write(variable_format % ('PERFLAB_INLAB', '1'))
                out_file.write(variable_format %
                               ('PERFLAB_REPO', '/'.join([owner, repo])))
                out_file.write(variable_format % ('PERFLAB_BRANCH', branch))
                out_file.write(variable_format %
                               ('PERFLAB_PERFHASH', perfHash))
                out_file.write(variable_format % ('PERFLAB_HASH', commit_sha))
                out_file.write(variable_format % ('PERFLAB_QUEUE', args.queue))
                out_file.write(variable_format %
                               ('PERFLAB_BUILDNUM', args.build_number))
                out_file.write(variable_format %
                               ('PERFLAB_BUILDARCH', args.architecture))
                out_file.write(variable_format %
                               ('PERFLAB_LOCALE', args.locale))
                out_file.write(variable_format %
                               ('PERFLAB_BUILDTIMESTAMP', source_timestamp))
                out_file.write(variable_format %
                               ('PERFLAB_CONFIGS', config_string))
                out_file.write(variable_format %
                               ('DOTNET_VERSION', dotnet_version))
                out_file.write(variable_format %
                               ('PERFLAB_TARGET_FRAMEWORKS', framework))

        else:
            with open(args.output_file, 'w') as out_file:
                out_file.write(variable_format % ('PERFLAB_INLAB', '0'))
                out_file.write(variable_format %
                               ('PERFLAB_TARGET_FRAMEWORKS', framework))

    # On non-windows platforms, delete dotnet, so that we don't have to deal with chmoding it on the helix machines
    # This is only necessary for netcoreapp3.0 and netcoreapp5.0
    if sys.platform != 'win32' and remove_dotnet:
        dotnet.remove_dotnet(architecture)
Exemple #13
0
 def __init__(self, traits: TestTraits):
     self.traits = traits
     self.testtype = None
     self.sdktype = None
     setup_loggers(True)
def __main(args: list) -> int:
    setup_loggers(verbose=True)

    args = __process_arguments(args)
    rootPath = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
    sdkPath = os.path.join(rootPath, 'tools', 'dotnet')

    logPrefix = ''
    logger = getLogger()
    logLevel = logger.getEffectiveLevel()

    def log(text: str):
        logger.log(logLevel, logPrefix + text)

    if args.dry_run:
        logPrefix = '[DRY RUN] '

    if args.run_once:
        if args.bdn_arguments:
            args.bdn_arguments += '--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart'
        else:
            args.bdn_arguments = '--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart'

    versionTarFiles = []

    for versionName in args.versions:
        version = get_version_from_name(versionName)
        moniker = version['tfm'].replace(
            'nativeaot',
            'net')  # results of nativeaotX.0 are stored in netX.0 folder
        resultsPath = os.path.join(rootPath, 'artifacts', 'bin',
                                   'MicroBenchmarks', 'Release', moniker,
                                   'BenchmarkDotNet.Artifacts', 'results')

        if not args.no_clean:
            # Delete any preexisting SDK installations, which allows
            # multiple versions to be run from a single command
            if os.path.isdir(sdkPath):
                log('rmdir -r ' + sdkPath)

                if not args.dry_run:
                    shutil.rmtree(sdkPath)

        benchmarkArgs = [
            '--skip-logger-setup', '--filter', args.filter, '--architecture',
            args.architecture, '-f', version['tfm']
        ]

        if 'build' in version:
            benchmarkArgs += ['--dotnet-versions', version['build']]

        if args.resume:
            benchmarkArgs += ['--resume']
        else:
            if os.path.isdir(resultsPath):
                log('rmdir -r ' + resultsPath)

                if not args.dry_run:
                    shutil.rmtree(resultsPath)

        if args.bdn_arguments:
            if version['tfm'].startswith('nativeaot'):
                benchmarkArgs += [
                    '--bdn-arguments', args.bdn_arguments +
                    ' --ilCompilerVersion ' + version['ilc']
                ]
            else:
                benchmarkArgs += ['--bdn-arguments', args.bdn_arguments]
        elif version['tfm'].startswith('nativeaot'):
            benchmarkArgs += [
                '--bdn-arguments', '--ilCompilerVersion ' + version['ilc']
            ]

        log('Executing: benchmarks_ci.py ' + str.join(' ', benchmarkArgs))

        if not args.dry_run:
            try:
                benchmarks_ci.__main(benchmarkArgs)
            except CalledProcessError:
                log('benchmarks_ci exited with non zero exit code, please check the log and report benchmark failure'
                    )
                # don't rethrow if some results were produced, as we want to create the tar file with results anyway
                if not os.path.isdir(resultsPath):
                    raise

        log('Results were created in the following folder:')
        log('  ' + resultsPath)

        timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M')

        if args.device_name:
            resultsName = timestamp + '-' + args.device_name + '-' + versionName
        else:
            resultsName = timestamp + '-' + versionName

        resultsName = args.architecture + '-' + resultsName
        resultsTarPath = os.path.join(rootPath, 'artifacts',
                                      resultsName + '.tar.gz')
        versionTarFiles += [resultsTarPath]

        if not args.dry_run:
            resultsTar = tarfile.open(resultsTarPath, 'w:gz')
            resultsTar.add(resultsPath, arcname=resultsName)
            resultsTar.close()

    log('Results were collected into the following tar archive(s):')

    for versionTarFile in versionTarFiles:
        log('  ' + versionTarFile)
Exemple #15
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # if repository is not set, then we are doing a core-sdk in performance repo run
    # if repository is set, user needs to supply the commit_sha
    if not ((args.commit_sha is None) == (args.repository is None)):
        raise ValueError(
            'Either both commit_sha and repository should be set or neither')

    # Acquire necessary tools (dotnet)
    # For arm64 runs, download the x64 version so we can get the information we need, but set all variables
    # as if we were running normally. This is a workaround due to the fact that arm64 binaries cannot run
    # in the cross containers, so we are running the ci setup script in a normal ubuntu container
    architecture = 'x64' if args.architecture == 'arm64' else args.architecture

    init_tools(architecture=architecture,
               dotnet_versions=args.dotnet_versions,
               channel=args.channel,
               verbose=verbose,
               install_dir=args.install_dir)

    # dotnet --info
    dotnet.info(verbose=verbose)

    # When running on internal repos, the repository comes to us incorrectly
    # (ie https://github.com/dotnet-coreclr). Replace dashes with slashes in that case.
    repo_url = None if args.repository is None else args.repository.replace(
        '-', '/')

    variable_format = 'set %s=%s\n' if sys.platform == 'win32' else 'export %s=%s\n'
    path_variable = 'set PATH=%%PATH%%;%s\n' if sys.platform == 'win32' else 'export PATH=$PATH:%s\n'
    dotnet_path = '%HELIX_CORRELATION_PAYLOAD%\dotnet' if sys.platform == 'win32' else '$HELIX_CORRELATION_PAYLOAD/dotnet'
    owner, repo = ('dotnet', 'core-sdk') if args.repository is None else (
        dotnet.get_repository(repo_url))
    config_string = ';'.join(
        args.build_configs) if sys.platform == 'win32' else '"%s"' % ';'.join(
            args.build_configs)

    output = ''

    with push_dir(get_repo_root_path()):
        output = check_output(['git', 'rev-parse', 'HEAD'])

    decoded_lines = []

    for line in output.splitlines():
        decoded_lines = decoded_lines + [line.decode('utf-8')]

    decoded_output = ''.join(decoded_lines)

    perfHash = decoded_output if args.get_perf_hash else args.perf_hash

    framework = ChannelMap.get_target_framework_moniker(args.channel)
    if framework.startswith('netcoreapp'):
        target_framework_moniker = dotnet.FrameworkAction.get_target_framework_moniker(
            framework)
        dotnet_version = dotnet.get_dotnet_version(target_framework_moniker,
                                                   args.cli)
        commit_sha = dotnet.get_dotnet_sdk(
            target_framework_moniker,
            args.cli) if args.commit_sha is None else args.commit_sha
        source_timestamp = dotnet.get_commit_date(target_framework_moniker,
                                                  commit_sha, repo_url)

        branch = ChannelMap.get_branch(
            args.channel) if not args.branch else args.branch

        getLogger().info("Writing script to %s" % args.output_file)

        with open(args.output_file, 'w') as out_file:
            out_file.write(variable_format % ('PERFLAB_INLAB', '1'))
            out_file.write(variable_format %
                           ('PERFLAB_REPO', '/'.join([owner, repo])))
            out_file.write(variable_format % ('PERFLAB_BRANCH', branch))
            out_file.write(variable_format % ('PERFLAB_PERFHASH', perfHash))
            out_file.write(variable_format % ('PERFLAB_HASH', commit_sha))
            out_file.write(variable_format % ('PERFLAB_QUEUE', args.queue))
            out_file.write(variable_format %
                           ('PERFLAB_BUILDNUM', args.build_number))
            out_file.write(variable_format %
                           ('PERFLAB_BUILDARCH', args.architecture))
            out_file.write(variable_format % ('PERFLAB_LOCALE', args.locale))
            out_file.write(variable_format %
                           ('PERFLAB_BUILDTIMESTAMP', source_timestamp))
            out_file.write(variable_format %
                           ('PERFLAB_CONFIGS', config_string))
            out_file.write(variable_format %
                           ('DOTNET_VERSION', dotnet_version))
            out_file.write(variable_format %
                           ('PERFLAB_TARGET_FRAMEWORKS', framework))
            out_file.write(variable_format %
                           ('DOTNET_CLI_TELEMETRY_OPTOUT', '1'))
            out_file.write(variable_format % ('DOTNET_MULTILEVEL_LOOKUP', '0'))
            out_file.write(variable_format % ('UseSharedCompilation', 'false'))
            out_file.write(variable_format % ('DOTNET_ROOT', dotnet_path))
            out_file.write(path_variable % dotnet_path)

    else:
        with open(args.output_file, 'w') as out_file:
            out_file.write(variable_format % ('PERFLAB_INLAB', '0'))
            out_file.write(variable_format %
                           ('PERFLAB_TARGET_FRAMEWORKS', framework))
            out_file.write(path_variable % dotnet_path)

    # The '_Framework' is needed for specifying frameworks in proj files and for building tools later in the pipeline
    __write_pipeline_variable('_Framework', framework)
Exemple #16
0
def __main(args: list) -> int:
    validate_supported_runtime()
    args = __process_arguments(args)
    verbose = not args.quiet
    setup_loggers(verbose=verbose)

    # if repository is not set, then we are doing a core-sdk in performance repo run
    # if repository is set, user needs to supply the commit_sha
    if not ((args.commit_sha is None) == (args.repository is None)):
        raise ValueError('Either both commit_sha and repository should be set or neither')

    target_framework_monikers = micro_benchmarks \
        .FrameworkAction \
        .get_target_framework_monikers(args.frameworks)
    # Acquire necessary tools (dotnet, and BenchView)
    init_tools(
        architecture=args.architecture,
        dotnet_versions=args.dotnet_versions,
        target_framework_monikers=target_framework_monikers,
        verbose=verbose
    )

    # dotnet --info
    dotnet.info(verbose=verbose)

    variable_format = 'set %s=%s\n' if sys.platform == 'win32' else 'export %s=%s\n'
    owner, repo = ('dotnet', 'core-sdk') if args.repository is None else (dotnet.get_repository(args.repository))
    config_string = ';'.join(args.build_configs) if sys.platform == 'win32' else '"%s"' % ';'.join(args.build_configs)

    is_netcoreapp_30 = False

    output = ''

    with push_dir(get_repo_root_path()):
        output = check_output(['git', 'rev-parse', 'HEAD'])

    decoded_lines = []

    for line in output.splitlines():
        decoded_lines = decoded_lines + [line.decode('utf-8')]

    decoded_output = ''.join(decoded_lines)

    perfHash = decoded_output if args.get_perf_hash else args.perf_hash

    for framework in target_framework_monikers:
        if framework.startswith('netcoreapp'):
            if framework == 'netcoreapp3.0':
                is_netcoreapp_30 = True
            target_framework_moniker = micro_benchmarks.FrameworkAction.get_target_framework_moniker(framework)
            dotnet_version = dotnet.get_dotnet_version(target_framework_moniker, args.cli)
            commit_sha =  dotnet.get_dotnet_sdk(target_framework_moniker, args.cli) if args.commit_sha is None else args.commit_sha
            source_timestamp = dotnet.get_commit_date(target_framework_moniker, commit_sha, args.repository)

            branch = micro_benchmarks.FrameworkAction.get_branch(target_framework_moniker) if not args.branch else args.branch

            getLogger().info("Writing script to %s" % args.output_file)

            with open(args.output_file, 'w') as out_file:
                out_file.write(variable_format % ('PERFLAB_INLAB', '1'))
                out_file.write(variable_format % ('PERFLAB_REPO', '/'.join([owner, repo])))
                out_file.write(variable_format % ('PERFLAB_BRANCH', branch))
                out_file.write(variable_format % ('PERFLAB_PERFHASH', perfHash))
                out_file.write(variable_format % ('PERFLAB_HASH', commit_sha))
                out_file.write(variable_format % ('PERFLAB_QUEUE', args.queue))
                out_file.write(variable_format % ('PERFLAB_BUILDNUM', args.build_number))
                out_file.write(variable_format % ('PERFLAB_BUILDARCH', args.architecture))
                out_file.write(variable_format % ('PERFLAB_LOCALE', args.locale))
                out_file.write(variable_format % ('PERFLAB_BUILDTIMESTAMP', source_timestamp))
                out_file.write(variable_format % ('PERFLAB_CONFIGS', config_string))
                out_file.write(variable_format % ('DOTNET_VERSION', dotnet_version))

        else:
            with open(args.output_file, 'w') as out_file:
                out_file.write(variable_format % ('PERFLAB_INLAB', '0'))

    # On non-windows platforms, delete dotnet, so that we don't have to deal with chmoding it on the helix machines
    # This is only necessary for netcoreapp3.0
    if sys.platform != 'win32' and is_netcoreapp_30:
        dotnet.remove_dotnet(args.architecture)
Exemple #17
0
'''
pre-command
'''
import sys
from performance.logger import setup_loggers
from shared.precommands import PreCommands
from shared import const
from test import EXENAME

setup_loggers(True)
precommands = PreCommands()
precommands.new(template='razorclasslib',
                output_dir=const.APPDIR,
                bin_dir=const.BINDIR,
                exename=EXENAME,
                working_directory=sys.path[0])
precommands.execute()
Exemple #18
0
def __main():
    validate_supported_runtime()
    setup_loggers(verbose=True)
    install()
def __main(args: list) -> int:
    setup_loggers(verbose=True)

    args = __process_arguments(args)
    rootPath = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
    sdkPath = os.path.join(rootPath, 'tools', 'dotnet', args.architecture)

    logPrefix = ''

    if args.dry_run:
        logPrefix = '[DRY RUN] '

    for versionName in args.versions:
        version = get_version_from_name(versionName)
        resultsPath = os.path.join(rootPath, 'artifacts', 'bin',
                                   'MicroBenchmarks', 'Release',
                                   version['tfm'], 'BenchmarkDotNet.Artifacts',
                                   'results')

        if not args.no_clean:
            # Delete any preexisting SDK and results, which allows
            # multiple versions to be run from a single command
            if os.path.isdir(sdkPath):
                getLogger().log(getLogger().getEffectiveLevel(),
                                logPrefix + 'rmdir -r ' + sdkPath)

                if not args.dry_run:
                    shutil.rmtree(sdkPath)

            if os.path.isdir(resultsPath):
                getLogger().log(getLogger().getEffectiveLevel(),
                                logPrefix + 'rmdir -r ' + resultsPath)

                if not args.dry_run:
                    shutil.rmtree(resultsPath)

        benchmarkArgs = [
            '--skip-logger-setup', '--filter', args.filter, '--architecture',
            args.architecture, '-f', version['tfm']
        ]

        if 'build' in version:
            benchmarkArgs += ['--dotnet-versions', version['build']]

        if args.bdn_arguments:
            benchmarkArgs += ['--bdn-arguments', args.bdn_arguments]

        getLogger().log(
            getLogger().getEffectiveLevel(), logPrefix +
            'Executing: benchmarks_ci.py ' + str.join(' ', benchmarkArgs))

        if not args.dry_run:
            benchmarks_ci.__main(benchmarkArgs)

        getLogger().log(
            getLogger().getEffectiveLevel(),
            logPrefix + 'Results were created in the following folder:')
        getLogger().log(getLogger().getEffectiveLevel(),
                        logPrefix + '  ' + resultsPath)

        timestamp = datetime.now().strftime('%Y-%m-%d-%H-%M')

        if args.device_name:
            resultsName = timestamp + '-' + args.device_name + '-' + versionName
        else:
            resultsName = timestamp + '-' + versionName

        resultsTarPath = os.path.join(rootPath, 'artifacts',
                                      resultsName + '.tar.gz')

        if not args.dry_run:
            resultsTar = tarfile.open(resultsTarPath, 'w:gz')
            resultsTar.add(resultsPath, arcname=resultsName)
            resultsTar.close()

        getLogger().log(
            getLogger().getEffectiveLevel(), logPrefix +
            'Results were collected into the following tar archive:')
        getLogger().log(getLogger().getEffectiveLevel(),
                        logPrefix + '  ' + resultsTarPath)