Exemplo n.º 1
0
def getSysInfo(requirements):
    # Use Node Label from Jenkins if possible
    label = os.environ.get('ASV_LABEL')
    uname = platform.uname()
    if label == None:
        label = uname.machine

    commitHash = getCommandOutput("git rev-parse HEAD")
    commitTime = getCommandOutput("git log -n1 --pretty=%%ct %s" % commitHash)
    commitTime = str(int(commitTime) *
                     1000)  # ASV wants commit to be in milliseconds
    gpuDeviceNums = [0]
    gpuDeviceHandle = smi.nvmlDeviceGetHandleByIndex(gpuDeviceNums[0])

    bInfo = BenchmarkInfo(
        machineName=label,
        cudaVer=getCommandOutput(
            "nvcc --version | grep release | awk '{print $5}' | tr -d ,"),
        osType="%s %s" % (uname.system, uname.release),
        pythonVer=platform.python_version(),
        commitHash=commitHash,
        commitTime=commitTime,
        gpuType=smi.nvmlDeviceGetName(gpuDeviceHandle).decode(),
        cpuType=uname.processor,
        arch=uname.machine,
        ram="%d" % psutil.virtual_memory().total,
        requirements=requirements)

    return bInfo
Exemplo n.º 2
0
def test_getFilteredResults():
    from asvdb import ASVDb, BenchmarkInfo

    tmpDir = tempfile.TemporaryDirectory()
    asvDirName = path.join(tmpDir.name, "dir_that_did_not_exist_before")

    db = ASVDb(asvDirName, repo, [branch])
    bInfo1 = BenchmarkInfo(machineName=machineName,
                           cudaVer="9.2",
                           osType="linux",
                           pythonVer="3.6",
                           commitHash=commitHash,
                           commitTime=commitTime)
    bInfo2 = BenchmarkInfo(machineName=machineName,
                           cudaVer="10.1",
                           osType="linux",
                           pythonVer="3.7",
                           commitHash=commitHash,
                           commitTime=commitTime)
    bInfo3 = BenchmarkInfo(machineName=machineName,
                           cudaVer="10.0",
                           osType="linux",
                           pythonVer="3.7",
                           commitHash=commitHash,
                           commitTime=commitTime)

    addResultsForInfo(db, bInfo1)
    addResultsForInfo(db, bInfo2)
    addResultsForInfo(db, bInfo3)

    # should only return results associated with bInfo1
    brList1 = db.getResults(filterInfoObjList=[bInfo1])
    assert len(brList1) == 1
    assert brList1[0][0] == bInfo1
    assert len(brList1[0][1]) == len(algoRunResults)

    # should only return results associated with bInfo1 or bInfo3
    brList1 = db.getResults(filterInfoObjList=[bInfo1, bInfo3])
    assert len(brList1) == 2
    assert brList1[0][0] in [bInfo1, bInfo3]
    assert brList1[1][0] in [bInfo1, bInfo3]
    assert brList1[0][0] != brList1[1][0]
    assert len(brList1[0][1]) == len(algoRunResults)
    assert len(brList1[1][1]) == len(algoRunResults)
Exemplo n.º 3
0
def cugraph_update_asv(asvDir,
                       datasetName,
                       algoRunResults,
                       cudaVer="",
                       pythonVer="",
                       osType="",
                       machineName="",
                       repo=""):
    """
    algoRunResults is a list of (algoName, exeTime) tuples
    """
    (commitHash, commitTime) = getCommitInfo()
    (actualRepo, branch) = getRepoInfo()
    repo = repo or actualRepo

    db = ASVDb(asvDir, repo, [branch])

    uname = platform.uname()

    prefixDict = dict(
        maxGpuUtil="gpuutil",
        maxGpuMemUsed="gpumem",
        exeTime="time",
    )
    unitsDict = dict(
        maxGpuUtil="percent",
        maxGpuMemUsed="bytes",
        exeTime="seconds",
    )

    bInfo = BenchmarkInfo(machineName=machineName or uname.machine,
                          cudaVer=cudaVer or "unknown",
                          osType=osType
                          or "%s %s" % (uname.system, uname.release),
                          pythonVer=pythonVer or platform.python_version(),
                          commitHash=commitHash,
                          commitTime=commitTime,
                          gpuType="unknown",
                          cpuType=uname.processor,
                          arch=uname.machine,
                          ram="%d" % psutil.virtual_memory().total)

    validKeys = set(list(prefixDict.keys()) + list(unitsDict.keys()))

    for (funcName, metricsDict) in algoRunResults.items():
        for (metricName, val) in metricsDict.items():
            # If an invalid metricName is present (likely due to a benchmark
            # run error), skip
            if metricName in validKeys:
                bResult = BenchmarkResult(
                    funcName="%s_%s" % (funcName, prefixDict[metricName]),
                    argNameValuePairs=[("dataset", datasetName)],
                    result=val)
                bResult.unit = unitsDict[metricName]
                db.addResult(bInfo, bResult)
Exemplo n.º 4
0
def test_concurrency():
    from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult

    tmpDir = tempfile.TemporaryDirectory()
    asvDirName = path.join(tmpDir.name, "dir_that_does_not_exist")
    repo = "somerepo"
    branch1 = "branch1"

    db1 = ASVDb(asvDirName, repo, [branch1])
    db2 = ASVDb(asvDirName, repo, [branch1])
    db3 = ASVDb(asvDirName, repo, [branch1])
    # Use the writeDelay member var to insert a delay during write to properly
    # test collisions by making writes slow.
    db1.writeDelay = 10
    db2.writeDelay = 10

    bInfo = BenchmarkInfo()
    bResult1 = BenchmarkResult(funcName="somebenchmark1", result=43)
    bResult2 = BenchmarkResult(funcName="somebenchmark2", result=43)
    bResult3 = BenchmarkResult(funcName="somebenchmark3", result=43)

    # db1 or db2 should be actively writing the result (because the writeDelay is long)
    # and db3 should be blocked.
    t1 = threading.Thread(target=db1.addResult, args=(bInfo, bResult1))
    t2 = threading.Thread(target=db2.addResult, args=(bInfo, bResult2))
    t3 = threading.Thread(target=db3.addResult, args=(bInfo, bResult3))
    t1.start()
    t2.start()
    time.sleep(0.5)  # ensure t3 tries to write last
    t3.start()

    # Check that db3 is blocked - if locking wasn't working, it would have
    # finished since it has no writeDelay.
    t3.join(timeout=0.5)
    assert t3.is_alive() is True

    # Cancel db1 and db2, allowing db3 to write and finish
    db1.cancelWrite = True
    db2.cancelWrite = True
    t3.join(timeout=11)
    assert t3.is_alive() is False
    t1.join()
    t2.join()
    t3.join()

    # Check that db3 wrote its result
    with open(path.join(asvDirName, "results", "benchmarks.json")) as fobj:
        jo = json.load(fobj)
        assert "somebenchmark3" in jo
        #print(jo)

    tmpDir.cleanup()
Exemplo n.º 5
0
def createAndPopulateASVDb(dbDir):
    from asvdb import ASVDb, BenchmarkInfo

    db = ASVDb(dbDir, repo, [branch])
    bInfo = BenchmarkInfo(machineName=machineName,
                          cudaVer="9.2",
                          osType="linux",
                          pythonVer="3.6",
                          commitHash=commitHash,
                          commitTime=commitTime,
                          branch=branch,
                          gpuType="n/a",
                          cpuType="x86_64",
                          arch="my_arch",
                          ram="123456")

    return addResultsForInfo(db, bInfo)
Exemplo n.º 6
0
def test_concurrency_stress():
    from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult

    tmpDir = tempfile.TemporaryDirectory()
    asvDirName = path.join(tmpDir.name, "dir_that_does_not_exist")
    repo = "somerepo"
    branch1 = "branch1"
    num = 32
    dbs = []
    threads = []
    allFuncNames = []

    bInfo = BenchmarkInfo(machineName=machineName)

    for i in range(num):
        db = ASVDb(asvDirName, repo, [branch1])
        db.writeDelay = 0.5
        dbs.append(db)

        funcName = f"somebenchmark{i}"
        bResult = BenchmarkResult(funcName=funcName, result=43)
        allFuncNames.append(funcName)

        t = threading.Thread(target=db.addResult, args=(bInfo, bResult))
        threads.append(t)

    for i in range(num):
        threads[i].start()

    for i in range(num):
        threads[i].join()

    # There should be num unique results in the db after (re)reading.  Pick any
    # of the db instances to read, they should all see the same results.
    results = dbs[0].getResults()
    assert len(results[0][1]) == num

    # Simply check that all unique func names were read back in.
    allFuncNamesCheck = [r.funcName for r in results[0][1]]
    assert sorted(allFuncNames) == sorted(allFuncNamesCheck)

    tmpDir.cleanup()
Exemplo n.º 7
0
def bench_info():

    # Create a BenchmarkInfo object describing the benchmarking environment.
    # This can/should be reused when adding multiple results from the same environment.

    uname = platform.uname()
    (commitHash, commitTime) = utils.getCommitInfo()  # gets commit info from CWD by default
    cuda_version = os.environ["CUDA_VERSION"]
    # get GPU info from nvidia-smi

    bInfo = BenchmarkInfo(
        machineName=socket.gethostname(),
        cudaVer=cuda_version,
        osType="%s %s" % (uname.system, uname.release),
        pythonVer=platform.python_version(),
        commitHash=commitHash,
        commitTime=commitTime,
        gpuType=cuda.get_current_device().name.decode("utf-8"),
        cpuType=uname.processor,
        arch=uname.machine,
        ram="%d" % psutil.virtual_memory().total,
    )
    return bInfo
Exemplo n.º 8
0
def test_addResults():
    asvDir = tempfile.TemporaryDirectory()
    from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult

    dbDir = asvDir.name
    db = ASVDb(dbDir, repo, [branch])
    bInfo = BenchmarkInfo(machineName=machineName,
                          cudaVer="9.2",
                          osType="linux",
                          pythonVer="3.6",
                          commitHash=commitHash,
                          commitTime=commitTime,
                          branch=branch,
                          gpuType="n/a",
                          cpuType="x86_64",
                          arch="my_arch",
                          ram="123456")

    resultList = []
    for (algoName, exeTime) in algoRunResults:
        bResult = BenchmarkResult(funcName=algoName,
                                  argNameValuePairs=[("dataset", datasetName)],
                                  result=exeTime)
        resultList.append(bResult)

    db.addResults(bInfo, resultList)

    # read back in and check
    dbCheck = ASVDb(dbDir, repo, [branch])
    retList = dbCheck.getResults()
    assert len(retList) == 1
    assert retList[0][0] == bInfo
    assert len(retList[0][1]) == len(algoRunResults)
    assert resultList == retList[0][1]

    asvDir.cleanup()
Exemplo n.º 9
0
def pytest_sessionfinish(session, exitstatus):
    gpuBenchSess = session.config._gpubenchmarksession
    config = session.config
    asvOutputDir = config.getoption("benchmark_asv_output_dir")
    asvMetadata = config.getoption("benchmark_asv_metadata")
    gpuDeviceNums = config.getoption("benchmark_gpu_device")

    if asvOutputDir and gpuBenchSess.benchmarks:

        # FIXME: do not lookup commit metadata if already specified on the
        # command line.
        (commitHash, commitTime) = asvdbUtils.getCommitInfo()
        (commitRepo, commitBranch) = asvdbUtils.getRepoInfo()

        # FIXME: do not make pynvml calls if all the metadata provided by pynvml
        # was specified on the command line.
        smi.nvmlInit()
        # only supporting 1 GPU
        gpuDeviceHandle = smi.nvmlDeviceGetHandleByIndex(gpuDeviceNums[0])

        uname = platform.uname()
        machineName = asvMetadata.get("machineName", uname.machine)
        cpuType = asvMetadata.get("cpuType", uname.processor)
        arch = asvMetadata.get("arch", uname.machine)
        pythonVer = asvMetadata.get("pythonVer",
            ".".join(platform.python_version_tuple()[:-1]))
        cudaVer = asvMetadata.get("cudaVer", _getCudaVersion() or "unknown")
        osType = asvMetadata.get("osType",
            _getOSName() or platform.linux_distribution()[0])
        gpuType = asvMetadata.get("gpuType",
            smi.nvmlDeviceGetName(gpuDeviceHandle).decode())
        ram = asvMetadata.get("ram", "%d" % psutil.virtual_memory().total)
        gpuRam = asvMetadata.get("gpuRam",
            "%d" % smi.nvmlDeviceGetMemoryInfo(gpuDeviceHandle).total)

        commitHash = asvMetadata.get("commitHash", commitHash)
        commitTime = asvMetadata.get("commitTime", commitTime)
        commitRepo = asvMetadata.get("commitRepo", commitRepo)
        commitBranch = asvMetadata.get("commitBranch", commitBranch)
        requirements = asvMetadata.get("requirements", "{}")

        suffixDict = dict(gpu_util="gpuutil",
                          gpu_mem="gpumem",
                          mean="time",
        )
        unitsDict = dict(gpu_util="percent",
                         gpu_mem="bytes",
                         mean="seconds",
        )

        db = ASVDb(asvOutputDir, commitRepo, [commitBranch])

        bInfo = BenchmarkInfo(machineName=machineName,
                              cudaVer=cudaVer,
                              osType=osType,
                              pythonVer=pythonVer,
                              commitHash=commitHash,
                              commitTime=commitTime,
                              branch=commitBranch,
                              gpuType=gpuType,
                              cpuType=cpuType,
                              arch=arch,
                              ram=ram,
                              gpuRam=gpuRam,
                              requirements=requirements)

        for bench in gpuBenchSess.benchmarks:
            benchName = _getHierBenchNameFromFullname(bench.fullname)
            # build the final params dict by extracting them from the
            # bench.params dictionary. Not all benchmarks are parameterized
            params = {}
            bench_params = bench.params.items() if bench.params is not None else []
            for (paramName, paramVal) in bench_params:
                # If the params are coming from a fixture, handle them
                # differently since they will (should be) stored in a special
                # variable accessible with the name of the fixture.
                #
                # NOTE: "fixture_param_names" must be manually set by the
                # benchmark author/user using the "request" fixture! (see below)
                #
                # @pytest.fixture(params=[1,2,3])
                # def someFixture(request):
                #     request.keywords["fixture_param_names"] = ["the_param_name"]
                if hasattr(bench, "fixture_param_names") and \
                   (bench.fixture_param_names is not None) and \
                   (paramName in bench.fixture_param_names):
                    fixtureName = paramName
                    paramNames = _ensureListLike(bench.fixture_param_names[fixtureName])
                    paramValues = _ensureListLike(paramVal)
                    for (pname, pval) in zip(paramNames, paramValues):
                        params[pname] = pval
                # otherwise, a benchmark/test will have params added to the
                # bench.params dict as a standard key:value (paramName:paramVal)
                else:
                    params[paramName] = paramVal

            bench.stats.mean
            getattr(bench.stats, "gpu_mem", None)
            getattr(bench.stats, "gpu_util", None)

            resultList = []
            for statType in ["mean", "gpu_mem", "gpu_util"]:
                bn = "%s_%s" % (benchName, suffixDict[statType])
                val = getattr(bench.stats, statType, None)
                if val is not None:
                    bResult = BenchmarkResult(funcName=bn,
                                              argNameValuePairs=list(params.items()),
                                              result=val)
                    bResult.unit = unitsDict[statType]
                    resultList.append(bResult)

            # If there were any custom metrics, add each of those as well as an
            # individual result to the same bInfo isntance.
            for customMetricName in bench.stats.getCustomMetricNames():
                (result, unitString) = bench.stats.getCustomMetric(customMetricName)
                bn = "%s_%s" % (benchName, customMetricName)
                bResult = BenchmarkResult(funcName=bn,
                                          argNameValuePairs=list(params.items()),
                                          result=result)
                bResult.unit = unitString
                resultList.append(bResult)

            db.addResults(bInfo, resultList)