def _getCommand(self, repo_info): platform = repo_info["platform"] # Remove it from repo_info to avoid polution, should clean up later del repo_info["platform"] dir_path = os.path.dirname(os.path.realpath(__file__)) unknowns = getUnknowns() # a not so elegant way of merging info construct if '--info' in unknowns: info_idx = unknowns.index('--info') info = json.loads(unknowns[info_idx + 1]) deepMerge(repo_info, info) del unknowns[info_idx + 1] del unknowns[info_idx] info = getString(json.dumps(repo_info)) command = getPythonInterpreter() + " " + \ os.path.join(dir_path, "harness.py") + " " + \ " --platform " + getString(platform) + \ " --framework " + getString(getArgs().framework) + \ " --info " + info + " " + \ ' '.join([getString(u) for u in unknowns]) if getArgs().env: command = command + " --env " env_vars = getArgs().env.split() for env_var in env_vars: command = command + ' ' + env_var + ' ' return command
def _runCommands(self, output, commands, platform, programs, model, test, model_files, input_files, output_files, shared_libs, test_files, total_num, converter, platform_args=None): if platform_args is None: platform_args = {} if test and test.get("log_output", False): platform_args["log_output"] = True if getArgs().timeout > 0: platform_args["timeout"] = getArgs().timeout cmds = self.composeRunCommand(commands, platform, programs, model, test, model_files, input_files, output_files, shared_libs, test_files) for cmd in cmds: one_output = self.runOnPlatform(total_num, cmd, platform, platform_args, converter) deepMerge(output, one_output)
def collectBenchmarks(self, info, source): assert os.path.isfile(source), "Source {} is not a file".format(source) with open(source, 'r') as f: content = json.load(f) meta = content["meta"] if "meta" in content else {} if "meta" in info: deepMerge(meta, info["meta"]) if hasattr(getArgs(), "timeout"): meta["timeout"] = getArgs().timeout benchmarks = [] if "benchmarks" in content: path = os.path.abspath(os.path.dirname(source)) assert "meta" in content, "Meta field is missing in benchmarks" for benchmark_file in content["benchmarks"]: benchmark_file = os.path.join(path, benchmark_file) self._collectOneBenchmark(benchmark_file, meta, benchmarks, info) else: self._collectOneBenchmark(source, meta, benchmarks, info) for b in benchmarks: self._verifyBenchmark(b, b["path"], True) return benchmarks
def runBenchmark(self, info, platform, benchmarks, framework): if getArgs().reboot: platform.rebootDevice() reporters = getReporters() for idx in range(len(benchmarks)): benchmark = benchmarks[idx] # check the framework matches if "model" in benchmark and "framework" in benchmark["model"]: assert(benchmark["model"]["framework"] == getArgs().framework), \ "Framework specified in the json file " \ "{} ".format(benchmark["model"]["framework"]) + \ "does not match the command line argument " \ "{}".format(getArgs().framework) b = copy.deepcopy(benchmark) i = copy.deepcopy(info) success = runOneBenchmark(i, b, framework, platform, getArgs().platform, reporters, self._lock) self.success = self.success and success if idx != len(benchmarks) - 1: # cool down period between multiple benchmark runs cooldown = getArgs().cooldown if "model" in benchmark and "cooldown" in benchmark["model"]: cooldown = float(benchmark["model"]["cooldown"]) time.sleep(cooldown)
def _collectOneBenchmark(self, source, meta, benchmarks, info): assert os.path.isfile(source), \ "Benchmark {} does not exist".format(source) with open(source, 'r') as b: one_benchmark = json.load(b) string_map = json.loads(getArgs().string_map) \ if getArgs().string_map else {} for name in string_map: value = string_map[name] deepReplace(one_benchmark, "{" + name + "}", value) self._verifyBenchmark(one_benchmark, source, False) self._updateFiles(one_benchmark, source) # following change should not appear in updated_json file if meta: deepMerge(one_benchmark["model"], meta) self._updateTests(one_benchmark, source) # Add fields that should not appear in the saved benchmark file # Adding path to benchmark file one_benchmark["path"] = os.path.abspath(source) # One test per benchmark if len(one_benchmark["tests"]) == 1: benchmarks.append(one_benchmark) else: tests = copy.deepcopy(one_benchmark["tests"]) one_benchmark["tests"] = [] for test in tests: new_benchmark = copy.deepcopy(one_benchmark) new_benchmark["tests"].append(test) benchmarks.append(new_benchmark)
def stopRun(): if getArgs().status_file and os.path.isfile(getArgs().status_file): with open(getArgs().status_file, 'r') as file: content = file.read().strip() if content == "0": return True return False
def _getBuildScript(self, platform): assert os.path.isdir(getArgs().frameworks_dir), \ "Frameworks dir is not specified." frameworks_dir = getArgs().frameworks_dir assert os.path.isdir(frameworks_dir), \ "{} must be specified.".format(frameworks_dir) framework_dir = frameworks_dir + "/" + getArgs().framework assert os.path.isdir(framework_dir), \ "{} must be specified.".format(framework_dir) platform_dir = framework_dir + "/" + platform build_script = None if os.path.isdir(platform_dir): if os.path.isfile(platform_dir + "/build.sh"): build_script = platform_dir + "/build.sh" if build_script is None: # Ideally, should check the parent directory until the # framework directory. Save this for the future build_script = framework_dir + "/build.sh" getLogger().warning("Directory {} ".format(platform_dir) + "doesn't exist. Use " + "{} instead".format(framework_dir)) assert os.path.isfile(build_script), \ "Cannot find build script in {} for ".framework_dir + \ "platform {}".format(platform) return build_script
def _buildOneCommitExecutable(self, platform, commit_hash): repo_info = {} repo_info_treatment = self._setupRepoStep(platform, commit_hash) if repo_info_treatment is None: return None repo_info['treatment'] = repo_info_treatment if getArgs().ab_testing: # only build control on regression detection # figure out the base commit. It is the first commit in the week control_commit_hash = self._getControlCommit( repo_info_treatment['commit_time'], getArgs().base_commit) repo_info_control = self._setupRepoStep(platform, control_commit_hash) if repo_info_control is None: return None repo_info['control'] = repo_info_control if getArgs().regression: repo_info["regression_commits"] = \ self._getCompareCommits(repo_info_treatment['commit']) # use repo_info to pass the value of platform repo_info['platform'] = platform return repo_info
def _pullNewCommits(self): new_commit_hash = None if _runIndividual(): new_commit_hash = self.repo.getCurrentCommitHash() if new_commit_hash is None: getLogger().error("Commit is not specified") return False else: # first get into the correct branch self.repo.checkout(getArgs().branch) self.repo.pull(getArgs().remote_repository, getArgs().branch) if self.current_commit_hash is None: self.current_commit_hash = self._getSavedCommit() if self.current_commit_hash is None: new_commit_hash = self.repo.getCommitHash(getArgs().commit) else: new_commit_hash = self.repo.getNextCommitHash( self.current_commit_hash, getArgs().step) if new_commit_hash == self.current_commit_hash: getLogger().info("Commit %s is already processed, sleeping...", new_commit_hash) return False self.current_commit_hash = new_commit_hash return True
def _getSerialno(hash): serialno = None if getArgs().monsoon_map: map = json.loads(getArgs().monsoon_map) if hash in map: serialno = map[hash] return serialno
def _buildOneCommitExecutable(self, platform, commit_hash): repo_info = {} repo_info_treatment = self._setupRepoStep(platform, commit_hash) if repo_info_treatment is None: return None repo_info['treatment'] = repo_info_treatment if getArgs().ab_testing: # only build control on regression detection # figure out the base commit. It is the first commit in the week control_commit_hash = self._getControlCommit( repo_info_treatment['commit_time'], getArgs().base_commit) repo_info_control = self._setupRepoStep(platform, control_commit_hash) if repo_info_control is None: return None repo_info['control'] = repo_info_control # Pass meta file from build to benchmark meta_file = os.path.join(getArgs().frameworks_dir, getArgs().framework, platform, "meta.json") if os.path.isfile(meta_file): with open(meta_file, "r") as f: meta = json.load(f) repo_info["meta"] = meta if getArgs().regression: repo_info["regression_commits"] = \ self._getCompareCommits(repo_info_treatment['commit']) # use repo_info to pass the value of platform repo_info['platform'] = platform return repo_info
def report(self, content): if not getArgs().remote_reporter: return access_token = getArgs().remote_access_token remote = self._getRemoteInfo() logs = self._composeMessages(content, remote['category']) self._log(remote['url'], access_token, logs)
def _getSavedCommit(self): if getArgs().commit_file and \ os.path.isfile(getArgs().commit_file): with open(getArgs().commit_file, 'r') as file: commit_hash = file.read().strip() # verify that the commit exists return self.repo.getNextCommitHash(commit_hash) return None
def run(self): if getArgs().interval: while not stopRun(): self._buildExecutables() time.sleep(getArgs().interval) else: # single run self._buildExecutables()
def getReporters(): reporters = [] if getArgs().local_reporter: reporters.append(LocalReporter()) if getArgs().remote_reporter: reporters.append(RemoteReporter()) if getArgs().screen_reporter: reporters.append(ScreenReporter()) return reporters
def runBenchmark(self, info, platform, benchmarks, framework): if getArgs().reboot: platform.rebootDevice() reporters = getReporters() for benchmark in benchmarks: b = copy.deepcopy(benchmark) i = copy.deepcopy(info) runOneBenchmark(i, b, framework, platform, getArgs().platform, reporters)
def _getInfo(self): info = json.loads(getArgs().info) info["run_type"] = "benchmark" if getArgs().backend: info["commands"] = {} info["commands"][getArgs().framework] = { "backend": getArgs().backend } return info
def runOneBenchmark(info, benchmark, framework, platform, backend, reporters, lock): assert "treatment" in info, "Treatment is missing in info" getLogger().info("Running {}".format(benchmark["path"])) minfo = copy.deepcopy(info["treatment"]) if "shared_libs" in info: minfo["shared_libs"] = info["shared_libs"] try: data = _runOnePass(minfo, benchmark, framework, platform) meta = None if "control" in info and benchmark["tests"][0]["metric"] == "delay": cinfo = copy.deepcopy(info["control"]) if "shared_libs" in info: cinfo["shared_libs"] = info["shared_libs"] control = _runOnePass(cinfo, benchmark, framework, platform) bname = benchmark["model"]["name"] data = _mergeDelayData(data, control, bname) if benchmark["tests"][0]["metric"] != "generic": data = _adjustData(info, data) meta = _retrieveMeta(info, benchmark, platform, framework, backend) result = {"meta": meta, "data": data} except Exception as e: # Catch all exceptions so that failure in one test does not # affect other tests getLogger().info("Exception caught when running benchmark") getLogger().info(e) data = None if data is None or len(data) == 0: name = platform.getMangledName() model_name = "" if "model" in benchmark and "name" in benchmark["model"]: model_name = benchmark["model"]["name"] commit_hash = "" if "commit" in info["treatment"]: commit_hash = info["treatment"]["commit"] getLogger().info("No data collected for ".format(model_name) + "on {}. ".format(name) + "The run may be failed for " + "{}".format(commit_hash)) return False with lock: for reporter in reporters: reporter.report(result) if "regression_commits" in info and \ info["run_type"] == "benchmark" and \ getArgs().local_reporter: from regression_detectors.regression_detectors import checkRegressions checkRegressions(info, platform, framework, benchmark, reporters, result['meta'], getArgs().local_reporter) time.sleep(5) return True
def run(self): buildProgramPlatform(getArgs().dst, getArgs().repo_dir, getArgs().framework, getArgs().frameworks_dir, getArgs().platform) if __name__ == "__main__": app = BuildProgram()
def rebootDevice(self): self.util.reboot() self.waitForDevice(180) # Need to wait a bit more after the device is rebooted time.sleep(20) # may need to set log size again after reboot self._setLogCatSize() if getArgs().set_freq: self.util.setFrequency(getArgs().set_freq)
def getRepo(): repo = getArgs().repo repo_dir = getArgs().repo_dir if repo == 'git': return GitRepo(repo_dir) elif repo == 'hg': return HGRepo(repo_dir) else: assert False, "Repo not recognized" return None
def getPlatforms(tempdir): platforms = [] if getArgs().platform[:4] == "host" or \ getArgs().platform[:5] == "linux" or \ getArgs().platform[:3] == "mac": platforms.append(HostPlatform(tempdir)) elif getArgs().platform[:7] == "android": driver = AndroidDriver() platforms.extend(driver.getAndroidPlatforms(tempdir)) if getArgs().excluded_devices: excluded_devices = getArgs().excluded_devices.strip().split(',') platforms = \ [p for p in platforms if p.platform not in excluded_devices and (p.platform_hash is None or p.platform_hash not in excluded_devices)] if getArgs().devices: plts = getArgs().devices.strip().split(',') platforms = [ p for p in platforms if p.platform in plts or p.platform_hash in plts ] elif getArgs().platform.startswith("ios"): driver = IOSDriver() platforms.extend(driver.getIOSPlatforms(tempdir)) elif os.name == "nt": platforms.append(WindowsPlatform(tempdir)) if not platforms: getLogger().error("No platform or physical device detected.") return platforms
def _getAbsFilename(self, filename, source): if filename[0:2] == "//": assert getArgs().root_model_dir is not None, \ "When specifying relative directory, the " \ "--root_model_dir must be specified." return getArgs().root_model_dir + filename[1:] elif filename[0] != "/": abs_dir = os.path.dirname(os.path.abspath(source)) + "/" return abs_dir + filename else: return filename
def __init__(self, tempdir): super(HostPlatform, self).__init__() if getArgs().platform_sig is not None: self.setPlatform(str(getArgs().platform_sig)) else: self.setPlatform(platform.platform() + "-" + self._getProcessorName()) self.setPlatformHash(str(socket.gethostname())) self.tempdir = tempdir + "/" + self.platform + '_' + str(self.platform_hash) os.makedirs(self.tempdir, 0o777) self.type = "host"
def _replaceStringMap(self, root, platform): string_map = json.loads(getArgs().string_map) \ if getArgs().string_map else {} string_map["TGTDIR"] = platform.getOutputDir() string_map["HOSTDIR"] = self._createHostDir() string_map["FAIPEPROOT"] = getFAIPEPROOT() for name in string_map: value = string_map[name] deepReplace(root, "{"+name+"}", value)
def run(self): try: if getArgs().interval: while not stopRun(): self._buildExecutables() time.sleep(getArgs().interval) else: # single run self._buildExecutables() except Exception: setRunStatus(2) getLogger().error(traceback.format_exc())
def _runBenchmarkSuites(self): # initially sleep 10 seconds in case no need to build the binary time.sleep(10) if getArgs().interval: while not stopRun(): self._runBenchmarkSuitesInQueue() time.sleep(getArgs().interval) else: # single run while self.executables_builder.is_alive(): time.sleep(10) self._runBenchmarkSuitesInQueue()
def reboot(): parse() device = getArgs().device platform = getArgs().platform if platform.startswith("ios"): util = IDB(device) elif platform.startswith("android"): util = ADB(device) else: assert False, "Platform {} not recognized".format(platform) util.reboot() print("Reboot Success")
def _buildProgram(self, platform, repo_info): directory = "/" + \ getDirectory(repo_info['commit'], repo_info['commit_time']) dst = getArgs().exec_dir + "/" + getArgs().framework + "/" + \ platform + "/" + directory + getArgs().framework + \ "_benchmark" repo_info["program"] = dst if not _runIndividual() and os.path.isfile(dst): return True else: return self._buildProgramPlatform(repo_info, dst, platform)
def _runOneBenchmarkSuite(self, repo_info): cmd = self._getCommand(repo_info) getLogger().info("Running: %s", cmd) # always sleep 10 seconds to make the phone in a more # consistent state time.sleep(10) # cannot use subprocess because it conflicts with requests os.system(cmd) if getArgs().commit_file: with open(getArgs().commit_file, 'w') as file: file.write(repo_info['treatment']['commit']) getLogger().info("Done one benchmark run for " + repo_info['treatment']['commit'])