Пример #1
0
def asv_db():
    # Create an interface to an ASV "database" to write the results to.
    (repo, branch) = utils.getRepoInfo()  # gets repo info from CWD by default
    # allows control of results location
    db_dir = os.environ.get("ASVDB_DIR", "./benchmarks")
    db = ASVDb(dbDir=db_dir, repo=repo, branches=[branch])

    return db
Пример #2
0
def pytest_sessionfinish(session, exitstatus):
    gpuBenchSess = session.config._gpubenchmarksession
    config = session.config
    asvOutputDir = config.getoption("benchmark_asv_output_dir")
    asvMetadata = config.getoption("benchmark_asv_metadata")
    gpuDeviceNums = config.getoption("benchmark_gpu_device")

    if asvOutputDir and gpuBenchSess.benchmarks:

        # FIXME: do not lookup commit metadata if already specified on the
        # command line.
        (commitHash, commitTime) = asvdbUtils.getCommitInfo()
        (commitRepo, commitBranch) = asvdbUtils.getRepoInfo()

        # FIXME: do not make pynvml calls if all the metadata provided by pynvml
        # was specified on the command line.
        smi.nvmlInit()
        # only supporting 1 GPU
        gpuDeviceHandle = smi.nvmlDeviceGetHandleByIndex(gpuDeviceNums[0])

        uname = platform.uname()
        machineName = asvMetadata.get("machineName", uname.machine)
        cpuType = asvMetadata.get("cpuType", uname.processor)
        arch = asvMetadata.get("arch", uname.machine)
        pythonVer = asvMetadata.get("pythonVer",
            ".".join(platform.python_version_tuple()[:-1]))
        cudaVer = asvMetadata.get("cudaVer", _getCudaVersion() or "unknown")
        osType = asvMetadata.get("osType",
            _getOSName() or platform.linux_distribution()[0])
        gpuType = asvMetadata.get("gpuType",
            smi.nvmlDeviceGetName(gpuDeviceHandle).decode())
        ram = asvMetadata.get("ram", "%d" % psutil.virtual_memory().total)
        gpuRam = asvMetadata.get("gpuRam",
            "%d" % smi.nvmlDeviceGetMemoryInfo(gpuDeviceHandle).total)

        commitHash = asvMetadata.get("commitHash", commitHash)
        commitTime = asvMetadata.get("commitTime", commitTime)
        commitRepo = asvMetadata.get("commitRepo", commitRepo)
        commitBranch = asvMetadata.get("commitBranch", commitBranch)
        requirements = asvMetadata.get("requirements", "{}")

        suffixDict = dict(gpu_util="gpuutil",
                          gpu_mem="gpumem",
                          mean="time",
        )
        unitsDict = dict(gpu_util="percent",
                         gpu_mem="bytes",
                         mean="seconds",
        )

        db = ASVDb(asvOutputDir, commitRepo, [commitBranch])

        bInfo = BenchmarkInfo(machineName=machineName,
                              cudaVer=cudaVer,
                              osType=osType,
                              pythonVer=pythonVer,
                              commitHash=commitHash,
                              commitTime=commitTime,
                              branch=commitBranch,
                              gpuType=gpuType,
                              cpuType=cpuType,
                              arch=arch,
                              ram=ram,
                              gpuRam=gpuRam,
                              requirements=requirements)

        for bench in gpuBenchSess.benchmarks:
            benchName = _getHierBenchNameFromFullname(bench.fullname)
            # build the final params dict by extracting them from the
            # bench.params dictionary. Not all benchmarks are parameterized
            params = {}
            bench_params = bench.params.items() if bench.params is not None else []
            for (paramName, paramVal) in bench_params:
                # If the params are coming from a fixture, handle them
                # differently since they will (should be) stored in a special
                # variable accessible with the name of the fixture.
                #
                # NOTE: "fixture_param_names" must be manually set by the
                # benchmark author/user using the "request" fixture! (see below)
                #
                # @pytest.fixture(params=[1,2,3])
                # def someFixture(request):
                #     request.keywords["fixture_param_names"] = ["the_param_name"]
                if hasattr(bench, "fixture_param_names") and \
                   (bench.fixture_param_names is not None) and \
                   (paramName in bench.fixture_param_names):
                    fixtureName = paramName
                    paramNames = _ensureListLike(bench.fixture_param_names[fixtureName])
                    paramValues = _ensureListLike(paramVal)
                    for (pname, pval) in zip(paramNames, paramValues):
                        params[pname] = pval
                # otherwise, a benchmark/test will have params added to the
                # bench.params dict as a standard key:value (paramName:paramVal)
                else:
                    params[paramName] = paramVal

            bench.stats.mean
            getattr(bench.stats, "gpu_mem", None)
            getattr(bench.stats, "gpu_util", None)

            resultList = []
            for statType in ["mean", "gpu_mem", "gpu_util"]:
                bn = "%s_%s" % (benchName, suffixDict[statType])
                val = getattr(bench.stats, statType, None)
                if val is not None:
                    bResult = BenchmarkResult(funcName=bn,
                                              argNameValuePairs=list(params.items()),
                                              result=val)
                    bResult.unit = unitsDict[statType]
                    resultList.append(bResult)

            # If there were any custom metrics, add each of those as well as an
            # individual result to the same bInfo isntance.
            for customMetricName in bench.stats.getCustomMetricNames():
                (result, unitString) = bench.stats.getCustomMetric(customMetricName)
                bn = "%s_%s" % (benchName, customMetricName)
                bResult = BenchmarkResult(funcName=bn,
                                          argNameValuePairs=list(params.items()),
                                          result=result)
                bResult.unit = unitString
                resultList.append(bResult)

            db.addResults(bInfo, resultList)