示例#1
0
    def run(self):
        raw_args = self._getRawArgs()
        if "--remote" in raw_args or "--lab" in raw_args:

            # server address must start with http
            assert "--server_addr" in raw_args
            idx = raw_args.index("--server_addr")
            assert raw_args[idx + 1].startswith("http") or len(
                raw_args[idx + 1]) == 0
            if "--lab" in raw_args and "--remote_reporter" not in raw_args:
                raw_args.extend([
                    "--remote_reporter",
                    raw_args[idx + 1] +
                    ("" if raw_args[idx + 1][-1] == "/" else "/") +
                    "benchmark/store-result|oss",
                ])
        app = self.repoCls(raw_args=raw_args)
        ret = app.run()
        if "--query_num_devices" in self.unknowns:
            return ret
        if "--fetch_status" in self.unknowns or "--fetch_result" in self.unknowns:
            return ret
        if "--list_devices" in self.unknowns:
            return ret
        if ret is not None:
            setRunStatus(ret >> 8)
        return getRunStatus()
示例#2
0
    def preprocess(self, *args, **kwargs):
        assert "programs" in kwargs, "Must have programs specified"

        programs = kwargs["programs"]

        # find the first zipped app file
        assert "program" in programs, "program is not specified"
        program = programs["program"]
        assert program.endswith(".ipa"), \
            "IOS program must be an ipa file"

        processRun(["unzip", "-o", "-d", self.tempdir, program])
        # get the app name
        app_dir = os.path.join(self.tempdir, "Payload")
        dirs = [f for f in os.listdir(app_dir)
                if os.path.isdir(os.path.join(app_dir, f))]
        assert len(dirs) == 1, "Only one app in the Payload directory"
        app_name = dirs[0]
        self.app = os.path.join(app_dir, app_name)
        del programs["program"]

        bundle_id, _ = processRun(["osascript", "-e",
                                   "id of app \"" + self.app + "\""])
        assert len(bundle_id) > 0, "bundle id cannot be found"
        self.util.setBundleId(bundle_id[0].strip())

        # We know this command will fail. Avoid propogating this
        # failure to the upstream
        success = getRunStatus()
        self.util.run(["--bundle", self.app, "--uninstall", "--justlaunch"])
        setRunStatus(success, overwrite=True)
示例#3
0
    def runBenchmark(self, cmd, *args, **kwargs):
        if not isinstance(cmd, list):
            cmd = shlex.split(cmd)

        # profiling is not supported on android
        if "platform_args" in kwargs and \
           "enable_profiling" in kwargs["platform_args"]:
            del kwargs["platform_args"]["enable_profiling"]
        if "platform_args" in kwargs and \
           "profiler_args" in kwargs["platform_args"]:
            del kwargs["platform_args"]["profiler_args"]

        # meta is used to store any data about the benchmark run
        # that is not the output of the command
        meta = {}

        # We know this command may fail. Avoid propogating this
        # failure to the upstream
        success = getRunStatus()
        self.util.logcat('-b', 'all', '-c')
        setRunStatus(success, overwrite=True)
        if self.app:
            log = self.runAppBenchmark(cmd, *args, **kwargs)
        else:
            log = self.runBinaryBenchmark(cmd, *args, **kwargs)
        return log, meta
示例#4
0
 def run(self):
     raw_args = self._getRawArgs()
     app = self.repoCls(raw_args=raw_args)
     ret = app.run()
     if ret and json.loads(ret):
         return ret
     if ret is not None:
         setRunStatus(ret >> 8)
     return getRunStatus()
示例#5
0
 def _setLogCatSize(self):
     repeat = True
     size = 131072
     while (repeat and size > 256):
         repeat = False
         # We know this command may fail. Avoid propogating this
         # failure to the upstream
         success = getRunStatus()
         ret = self.util.logcat("-G", str(size) + "K")
         setRunStatus(success, overwrite=True)
         if len(ret) > 0 and ret[0].find("failed to") >= 0:
             repeat = True
             size = int(size / 2)
示例#6
0
 def runBenchmark(self, cmd, *args, **kwargs):
     if not isinstance(cmd, list):
         cmd = shlex.split(cmd)
     # We know this command may fail. Avoid propogating this
     # failure to the upstream
     success = getRunStatus()
     self.util.logcat('-b', 'all', '-c')
     setRunStatus(success, overwrite=True)
     if self.app:
         log = self.runAppBenchmark(cmd, *args, **kwargs)
     else:
         log = self.runBinaryBenchmark(cmd, *args, **kwargs)
     return log
示例#7
0
文件: caffe2.py 项目: xta0/FAI-PEP
    def runOnPlatform(self, total_num, cmd, platform, platform_args,
                      converter):
        if converter is None:
            converter = {
                "name": "json_with_identifier_converter",
                "args": {
                    "identifier": self.IDENTIFIER
                }
            }

        converter_obj = self.converters[converter["name"]]()
        args = converter.get("args")
        results = []
        num = 0
        # emulate do...while... loop
        while True:
            output, meta = platform.runBenchmark(cmd,
                                                 platform_args=platform_args)
            one_result, valid_run_idxs = \
                converter_obj.collect(output, args)
            valid_run_idxs = [num + idx for idx in valid_run_idxs]
            num += len(valid_run_idxs)
            results.extend(one_result)
            if getRunStatus() != 0:
                getLogger().info("Execution failed, terminating")
                break
            if num < total_num:
                num_items = len(valid_run_idxs)
                if num_items > 0:
                    getLogger().info("%d items collected, Still missing %d "
                                     "runs. Collect again." %
                                     (num_items, total_num - num))

                    continue
                else:
                    getLogger().info("No new items collected, "
                                     "finish collecting...")
            elif total_num >= 0 and num > total_num:
                # if collect more than the needed number, get the
                # latest entries. This may happen when the data in
                # the previous runs are not cleared. e.g. on some
                # android 5 devices. Or, it may happen when multiple
                # runs are needed to collect the desired number of
                # iterations
                results = results[valid_run_idxs[num - total_num]:]
            break
        metric = converter_obj.convert(results)
        metric["meta"] = meta
        return metric
示例#8
0
    def run(self):
        tempdir = tempfile.mkdtemp(prefix="_".join(
            ["aibench", str(self.args.user_identifier), ""]))
        getLogger().info("Temp directory: {}".format(tempdir))
        info = self._getInfo()
        frameworks = getFrameworks()
        assert (self.args.framework
                in frameworks), "Framework {} is not supported".format(
                    self.args.framework)
        framework = frameworks[self.args.framework](tempdir, self.args)
        bcollector = BenchmarkCollector(framework,
                                        self.args.model_cache,
                                        args=self.args)
        benchmarks = bcollector.collectBenchmarks(info,
                                                  self.args.benchmark_file,
                                                  self.args.user_identifier)
        platforms = getPlatforms(self.args, tempdir, self.usb_controller)
        threads = []
        for platform in platforms:
            t = threading.Thread(target=self.runBenchmark,
                                 args=(info, platform, benchmarks))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()

        if not self.args.debug:
            shutil.rmtree(tempdir, True)

        status = self.status | getRunStatus()
        if getRunKilled():
            status_str = "killed"
        elif getRunTimeout():
            status_str = "timeout"
        elif status == 0:
            status_str = "success"
        elif status == 1:
            status_str = "user error"
        elif status == 2:
            status_str = "harness error"
        else:
            status_str = "user and harness error"
        getLogger().info(" ======= {} =======".format(status_str))
        if getRunKilled():
            return RUN_KILLED
        if getRunTimeout():
            return RUN_TIMEOUT
        return status
示例#9
0
 def killProgram(self, program):
     basename = os.path.basename(program)
     # if the program doesn't exist, the grep may fail
     # do not update status code
     success = getRunStatus()
     res = self.util.shell(["ps", "|", "grep", basename])
     setRunStatus(success, overwrite=True)
     if len(res) == 0:
         return
     results = res[0].split("\n")
     pattern = re.compile(r"^shell\s+(\d+)\s+")
     for result in results:
         match = pattern.match(result)
         if match:
             pid = match.group(1)
             self.util.shell(["kill", pid])
示例#10
0
    def run(self):
        raw_args = self._getRawArgs()
        if "--remote" in raw_args:

            # server address must start with http
            assert "--server_addr" in raw_args
            idx = raw_args.index("--server_addr")
            assert raw_args[idx+1].startswith("http") or len(raw_args[idx+1]) == 0
        app = self.repoCls(raw_args=raw_args)
        ret = app.run()
        if "--query_num_devices" in self.unknowns:
            return ret
        if "--fetch_status" in self.unknowns or "--fetch_result" in self.unknowns:
            return ret
        if ret is not None:
            setRunStatus(ret >> 8)
        return getRunStatus()
示例#11
0
def _runOnePass(info, benchmark, framework, platform):
    assert len(benchmark["tests"]) == 1, \
        "At this moment, only one test exists in the benchmark"
    to = benchmark["model"]["repeat"] if "repeat" in benchmark["model"] else 1
    output = None
    for idx in range(to):
        benchmark["tests"][0]["INDEX"] = idx
        one_output, output_files = \
            framework.runBenchmark(info, benchmark, platform)
        if output:
            deepMerge(output, one_output)
        else:
            output = copy.deepcopy(one_output)
        if getRunStatus() != 0:
            # early exit if there is an error
            break
    data = _processDelayData(output)
    return data
示例#12
0
    def runBenchmark(self, cmd, *args, **kwargs):
        if not isinstance(cmd, list):
            cmd = shlex.split(cmd)

        # meta is used to store any data about the benchmark run
        # that is not the output of the command
        meta = {}

        # We know this command may fail. Avoid propogating this
        # failure to the upstream
        success = getRunStatus()
        self.util.logcat('-b', 'all', '-c')
        setRunStatus(success, overwrite=True)
        if self.app:
            log, meta = self.runAppBenchmark(cmd, *args, **kwargs)
        else:
            log, meta = self.runBinaryBenchmark(cmd, *args, **kwargs)
        return log, meta
示例#13
0
    def run(self):
        tempdir = tempfile.mkdtemp()
        getLogger().info("Temp directory: {}".format(tempdir))
        info = self._getInfo()
        frameworks = getFrameworks()
        assert self.args.framework in frameworks, \
            "Framework {} is not supported".format(self.args.framework)
        framework = frameworks[self.args.framework](tempdir, self.args)
        bcollector = BenchmarkCollector(framework,
                                        self.args.model_cache,
                                        args=self.args)
        benchmarks = bcollector.collectBenchmarks(info,
                                                  self.args.benchmark_file)
        platforms = getPlatforms(tempdir, self.args)
        threads = []
        for platform in platforms:
            t = threading.Thread(target=self.runBenchmark,
                                 args=(info, platform, benchmarks))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()

        if not self.args.debug:
            shutil.rmtree(tempdir, True)

        status = self.status | getRunStatus()
        if status == 0:
            status_str = "success"
        elif status == 1:
            status_str = "user error"
        elif status == 2:
            status_str = "harness error"
        else:
            status_str = "user and harness error"
        getLogger().info(" ======= {} =======".format(status_str))
        return status
示例#14
0
        if getArgs().reset_options or \
                not os.path.isdir(self.root_dir) or \
                not os.path.isfile(os.path.join(self.root_dir, "config.txt")):
            args = self._saveDefaultArgs(new_args)
        else:
            with open(os.path.join(self.root_dir, "config.txt"), "r") as f:
                args = json.load(f)
        for v in new_args:
            if v in args:
                del args[v]
        return args

    def _getCMD(self):
        args = self._getSavedArgs()
        unknowns = getUnknowns()
        dir_path = os.path.dirname(os.path.realpath(__file__))
        command = getPythonInterpreter() + " " + \
            os.path.join(dir_path, "repo_driver.py") + " " + \
            ' '.join([getString(u) + ' ' +
                     (getString(args[u])
                      if args[u] is not None else "")
                      for u in args]) + ' ' + \
            ' '.join([getString(u) for u in unknowns])
        return command


if __name__ == "__main__":
    app = RunBench()
    app.run()
    sys.exit(getRunStatus())
示例#15
0
def runOneBenchmark(info,
                    benchmark,
                    framework,
                    platform,
                    backend,
                    reporters,
                    lock,
                    cooldown=None,
                    user_identifier=None,
                    local_reporter=None):
    assert "treatment" in info, "Treatment is missing in info"
    getLogger().info("Running {}".format(benchmark["path"]))

    status = 0
    minfo = copy.deepcopy(info["treatment"])
    mbenchmark = copy.deepcopy(benchmark)
    if "shared_libs" in info:
        minfo["shared_libs"] = info["shared_libs"]
    try:
        # invalidate CPU cache
        [1.0 for _ in range(20 << 20)]
        gc.collect()
        data = _runOnePass(minfo, mbenchmark, framework, platform)
        status = status | getRunStatus()
        meta = None
        if "control" in info:
            cinfo = copy.deepcopy(info["control"])
            if "shared_libs" in info:
                cinfo["shared_libs"] = info["shared_libs"]
            # cool down between treatment and control
            if "model" in benchmark and "cooldown" in benchmark["model"]:
                cooldown = float(benchmark["model"]["cooldown"])
            time.sleep(cooldown)
            # invalidate CPU cache
            [1.0 for _ in range(20 << 20)]
            gc.collect()
            control = _runOnePass(cinfo, benchmark, framework, platform)
            status = status | getRunStatus()
            bname = benchmark["model"]["name"]
            data = _mergeDelayData(data, control, bname)
        if benchmark["tests"][0]["metric"] != "generic":
            data = _adjustData(info, data)
        meta = _retrieveMeta(info, benchmark, platform, framework, backend,
                             user_identifier)
        data = _retrieveInfo(info, data)
        result = {"meta": meta, "data": data}
    except Exception as e:
        # Catch all exceptions so that failure in one test does not
        # affect other tests
        getLogger().info("Exception caught when running benchmark")
        getLogger().info(e)
        data = None
        status = 2
        setRunStatus(status)
        getLogger().error(traceback.format_exc())

        # Set result meta and data to default values to that
        # the reporter will not try to key into a None
        result = {"meta": {}, "data": []}

    if data is None or len(data) == 0:
        name = platform.getMangledName()
        model_name = ""
        if "model" in benchmark and "name" in benchmark["model"]:
            model_name = benchmark["model"]["name"]
        commit_hash = ""
        if "commit" in info["treatment"]:
            commit_hash = info["treatment"]["commit"]
        getLogger().info("No data collected for ".format(model_name) +
                         "on {}. ".format(name) +
                         "The run may be failed for " +
                         "{}".format(commit_hash))
        return status

    with lock:
        for reporter in reporters:
            reporter.report(result)

    if "regression_commits" in info and \
            info["run_type"] == "benchmark" and local_reporter:
        from regression_detectors.regression_detectors import checkRegressions
        checkRegressions(info, platform, framework, benchmark, reporters,
                         result['meta'], local_reporter)
    return status
示例#16
0
 def postprocess(self, *args, **kwargs):
     success = getRunStatus()
     self.util.run(["--bundle", self.app, "--uninstall_only"])
     setRunStatus(success, overwrite=True)
示例#17
0
 def run(self):
     getLogger().info("Start benchmark run @ %s" %
                      datetime.datetime.now().strftime("%Y_%m_%d_%H_%M"))
     self.executables_builder.start()
     self._runBenchmarkSuites()
     sys.exit(getRunStatus())
示例#18
0
 def run(self):
     raw_args = self._getRawArgs()
     app = RepoDriver(raw_args=raw_args)
     ret = app.run()
     setRunStatus(ret >> 8)
     sys.exit(getRunStatus())
示例#19
0
        info["meta"]["command_args"] = getArgs().command_args \
            if getArgs().command_args else ""

        # for backward compatible purpose
        if getArgs().backend:
            info["meta"]["command_args"] += \
                " --backend {}".format(getArgs().backend)
        if getArgs().wipe_cache:
            info["meta"]["command_args"] += \
                " --wipe_cache {}".format(getArgs().wipe_cache)
        if getArgs().user_string:
            info["user"] = getArgs().user_string

        return info


if __name__ == "__main__":
    app = BenchmarkDriver()
    app.run()
    status = app.status | getRunStatus()
    if status == 0:
        status_str = "success"
    elif status == 1:
        status_str = "user error"
    elif status == 2:
        status_str = "harness error"
    else:
        status_str = "user and harness error"
    getLogger().info(" ======= {} =======".format(status_str))
    sys.exit(status)
示例#20
0
 def runBinaryBenchmark(self, cmd, *args, **kwargs):
     log_to_screen_only = 'log_to_screen_only' in kwargs and \
         kwargs['log_to_screen_only']
     platform_args = {}
     meta = {}
     if "platform_args" in kwargs:
         platform_args = kwargs["platform_args"]
         if "taskset" in platform_args:
             taskset = platform_args["taskset"]
             cmd = ["taskset", taskset] + cmd
             del platform_args["taskset"]
         if "sleep_before_run" in platform_args:
             sleep_before_run = str(platform_args["sleep_before_run"])
             cmd = ["sleep", sleep_before_run, "&&"] + cmd
             del platform_args["sleep_before_run"]
         if "power" in platform_args and platform_args["power"]:
             # launch settings page to prevent the phone
             # to go into sleep mode
             self.util.shell(
                 ["am", "start", "-a", "android.settings.SETTINGS"])
             time.sleep(1)
             cmd = ["nohup"] + ["sh", "-c", "'" + " ".join(cmd) + "'"] + \
                 [">", "/dev/null", "2>&1"]
             platform_args["non_blocking"] = True
             del platform_args["power"]
         if platform_args.get("enable_profiling", False):
             # attempt to run with profiling, else fallback to standard run
             success = getRunStatus()
             try:
                 simpleperf = getProfilerByUsage(
                     "android",
                     None,
                     platform=self,
                     model_name=platform_args.get("model_name", None),
                     cmd=cmd)
                 if simpleperf:
                     f = simpleperf.start()
                     output, meta = f.result()
                     log_logcat = []
                     if not log_to_screen_only:
                         log_logcat = self.util.logcat('-d')
                     return output + log_logcat, meta
             # if this has not succeeded for some reason reset run status and run without profiling.
             except RuntimeError as ex:
                 getLogger().error(
                     "An error occurred when running Simpleperf profiler. {}"
                     .format(ex))
                 setRunStatus(success, overwrite=True)
             except FileNotFoundError as ex:
                 getLogger().error(
                     "An error occurred when running Simpleperf profiler. {}"
                     .format(ex))
                 setRunStatus(success, overwrite=True)
             except Exception:
                 getLogger().exception(
                     "An error has occurred when running Simpleperf profiler."
                 )
                 setRunStatus(success, overwrite=True)
     log_screen = self.util.shell(cmd, **platform_args)
     log_logcat = []
     if not log_to_screen_only:
         log_logcat = self.util.logcat('-d')
     return log_screen + log_logcat, meta