def report_asv(results_df, output_dir, cudaVer="", pythonVer="", osType="", machineName=""): """Logs the dataframe `results_df` to airspeed velocity format. This writes (or appends to) JSON files in `output_dir` Parameters ---------- results_df : pd.DataFrame DataFrame with one row per benchmark run output_dir : str Directory for ASV output database """ import asvdb import platform import psutil uname = platform.uname() (commitHash, commitTime) = asvdb.utils.getCommitInfo() b_info = asvdb.BenchmarkInfo( machineName=machineName or uname.machine, cudaVer=cudaVer or "unknown", osType=osType or "%s %s" % (uname.system, uname.release), pythonVer=pythonVer or platform.python_version(), commitHash=commitHash, commitTime=commitTime, gpuType="unknown", cpuType=uname.processor, arch=uname.machine, ram="%d" % psutil.virtual_memory().total, ) ( repo, branch, ) = asvdb.utils.getRepoInfo() # gets repo info from CWD by default db = asvdb.ASVDb(dbDir=output_dir, repo=repo, branches=[branch]) for index, row in results_df.iterrows(): val_keys = ['cu_time', 'cpu_time', 'speedup', 'cuml_acc', 'cpu_acc'] params = [(k, v) for k, v in row.items() if k not in val_keys] result = asvdb.BenchmarkResult(row['algo'], params, result=row['cu_time']) db.addResult(b_info, result)
def openAsvdbAtPath(dbDir, repo=None, branches=None, projectName=None, commitUrl=None): """ Either reads the ASV db at dbDir and creates a new db object, or creates a new db object and sets up the db at dbDir for (presumably) writing new results to. """ db = asvdb.ASVDb(dbDir, repo=repo, branches=branches, projectName=projectName, commitUrl=commitUrl) if path.isdir(dbDir): db.loadConfFile() else: db.updateConfFile() return db