Ejemplo n.º 1
0
    def runBenchmark(self, cmd, *args, **kwargs):
        if not isinstance(cmd, list):
            cmd = shlex.split(cmd)

        # profiling is not supported on android
        if "platform_args" in kwargs and \
           "enable_profiling" in kwargs["platform_args"]:
            del kwargs["platform_args"]["enable_profiling"]
        if "platform_args" in kwargs and \
           "profiler_args" in kwargs["platform_args"]:
            del kwargs["platform_args"]["profiler_args"]

        # meta is used to store any data about the benchmark run
        # that is not the output of the command
        meta = {}

        # We know this command may fail. Avoid propogating this
        # failure to the upstream
        success = getRunStatus()
        self.util.logcat('-b', 'all', '-c')
        setRunStatus(success, overwrite=True)
        if self.app:
            log = self.runAppBenchmark(cmd, *args, **kwargs)
        else:
            log = self.runBinaryBenchmark(cmd, *args, **kwargs)
        return log, meta
Ejemplo n.º 2
0
    def run(self):
        raw_args = self._getRawArgs()
        if "--remote" in raw_args or "--lab" in raw_args:

            # server address must start with http
            assert "--server_addr" in raw_args
            idx = raw_args.index("--server_addr")
            assert raw_args[idx + 1].startswith("http") or len(
                raw_args[idx + 1]) == 0
            if "--lab" in raw_args and "--remote_reporter" not in raw_args:
                raw_args.extend([
                    "--remote_reporter",
                    raw_args[idx + 1] +
                    ("" if raw_args[idx + 1][-1] == "/" else "/") +
                    "benchmark/store-result|oss",
                ])
        app = self.repoCls(raw_args=raw_args)
        ret = app.run()
        if "--query_num_devices" in self.unknowns:
            return ret
        if "--fetch_status" in self.unknowns or "--fetch_result" in self.unknowns:
            return ret
        if "--list_devices" in self.unknowns:
            return ret
        if ret is not None:
            setRunStatus(ret >> 8)
        return getRunStatus()
Ejemplo n.º 3
0
 def __init__(self, **kwargs):
     setRunStatus(0, overwrite=True)
     self.status = 0
     raw_args = kwargs.get("raw_args", None)
     self.usb_controller = kwargs.get("usb_controller")
     self.args, self.unknowns = parser.parse_known_args(raw_args)
     self._lock = threading.Lock()
Ejemplo n.º 4
0
    def preprocess(self, *args, **kwargs):
        assert "programs" in kwargs, "Must have programs specified"

        programs = kwargs["programs"]

        # find the first zipped app file
        assert "program" in programs, "program is not specified"
        program = programs["program"]
        assert program.endswith(".ipa"), \
            "IOS program must be an ipa file"

        processRun(["unzip", "-o", "-d", self.tempdir, program])
        # get the app name
        app_dir = os.path.join(self.tempdir, "Payload")
        dirs = [f for f in os.listdir(app_dir)
                if os.path.isdir(os.path.join(app_dir, f))]
        assert len(dirs) == 1, "Only one app in the Payload directory"
        app_name = dirs[0]
        self.app = os.path.join(app_dir, app_name)
        del programs["program"]

        bundle_id, _ = processRun(["osascript", "-e",
                                   "id of app \"" + self.app + "\""])
        assert len(bundle_id) > 0, "bundle id cannot be found"
        self.util.setBundleId(bundle_id[0].strip())

        # We know this command will fail. Avoid propogating this
        # failure to the upstream
        success = getRunStatus()
        self.util.run(["--bundle", self.app, "--uninstall", "--justlaunch"])
        setRunStatus(success, overwrite=True)
Ejemplo n.º 5
0
 def run(self):
     raw_args = self._getRawArgs()
     app = self.repoCls(raw_args=raw_args)
     ret = app.run()
     if ret and json.loads(ret):
         return ret
     if ret is not None:
         setRunStatus(ret >> 8)
     return getRunStatus()
Ejemplo n.º 6
0
 def run(self):
     try:
         if self.args.interval:
             while not stopRun(self.args.status_file):
                 self._buildExecutables()
                 time.sleep(self.args.interval)
         else:
             # single run
             self._buildExecutables()
     except Exception:
         setRunStatus(2)
         getLogger().error(traceback.format_exc())
Ejemplo n.º 7
0
 def runBenchmark(self, cmd, *args, **kwargs):
     if not isinstance(cmd, list):
         cmd = shlex.split(cmd)
     # We know this command may fail. Avoid propogating this
     # failure to the upstream
     success = getRunStatus()
     self.util.logcat('-b', 'all', '-c')
     setRunStatus(success, overwrite=True)
     if self.app:
         log = self.runAppBenchmark(cmd, *args, **kwargs)
     else:
         log = self.runBinaryBenchmark(cmd, *args, **kwargs)
     return log
Ejemplo n.º 8
0
 def _setLogCatSize(self):
     repeat = True
     size = 131072
     while (repeat and size > 256):
         repeat = False
         # We know this command may fail. Avoid propogating this
         # failure to the upstream
         success = getRunStatus()
         ret = self.util.logcat("-G", str(size) + "K")
         setRunStatus(success, overwrite=True)
         if len(ret) > 0 and ret[0].find("failed to") >= 0:
             repeat = True
             size = int(size / 2)
Ejemplo n.º 9
0
 def _runOneBenchmarkSuite(self, repo_info):
     cmd = self._getCommand(repo_info)
     getLogger().info("Running: %s", cmd)
     if not _runIndividual():
         # always sleep 10 seconds to make the phone in a more
         # consistent state
         time.sleep(10)
     # cannot use subprocess because it conflicts with requests
     ret = os.system(cmd)
     setRunStatus(ret >> 8)
     if getArgs().commit_file and getArgs().regression:
         with open(getArgs().commit_file, 'w') as file:
             file.write(repo_info['treatment']['commit'])
     getLogger().info("One benchmark run {} for ".format(
         "successful" if ret == 0 else "failed") +
                      repo_info['treatment']['commit'])
Ejemplo n.º 10
0
 def killProgram(self, program):
     basename = os.path.basename(program)
     # if the program doesn't exist, the grep may fail
     # do not update status code
     success = getRunStatus()
     res = self.util.shell(["ps", "|", "grep", basename])
     setRunStatus(success, overwrite=True)
     if len(res) == 0:
         return
     results = res[0].split("\n")
     pattern = re.compile(r"^shell\s+(\d+)\s+")
     for result in results:
         match = pattern.match(result)
         if match:
             pid = match.group(1)
             self.util.shell(["kill", pid])
Ejemplo n.º 11
0
    def run(self):
        raw_args = self._getRawArgs()
        if "--remote" in raw_args:

            # server address must start with http
            assert "--server_addr" in raw_args
            idx = raw_args.index("--server_addr")
            assert raw_args[idx+1].startswith("http") or len(raw_args[idx+1]) == 0
        app = self.repoCls(raw_args=raw_args)
        ret = app.run()
        if "--query_num_devices" in self.unknowns:
            return ret
        if "--fetch_status" in self.unknowns or "--fetch_result" in self.unknowns:
            return ret
        if ret is not None:
            setRunStatus(ret >> 8)
        return getRunStatus()
Ejemplo n.º 12
0
 def _runOneBenchmarkSuite(self, repo_info):
     raw_args = self._getRawArgs(repo_info)
     if not _runIndividual(self.args.interval, self.args.regression,
                           self.args.ab_testing):
         # always sleep 10 seconds to make the phone in a more
         # consistent state
         time.sleep(10)
     # cannot use subprocess because it conflicts with requests
     app = BenchmarkDriver(raw_args=raw_args)
     app.run()
     ret = 0
     setRunStatus(ret >> 8)
     if self.args.commit_file and self.args.regression:
         with open(self.args.commit_file, 'w') as file:
             file.write(repo_info['treatment']['commit'])
     getLogger().info("One benchmark run {} for ".format(
         "successful" if ret == 0 else "failed") +
                      repo_info['treatment']['commit'])
Ejemplo n.º 13
0
    def runBenchmark(self, cmd, *args, **kwargs):
        if not isinstance(cmd, list):
            cmd = shlex.split(cmd)

        # meta is used to store any data about the benchmark run
        # that is not the output of the command
        meta = {}

        # We know this command may fail. Avoid propogating this
        # failure to the upstream
        success = getRunStatus()
        self.util.logcat('-b', 'all', '-c')
        setRunStatus(success, overwrite=True)
        if self.app:
            log, meta = self.runAppBenchmark(cmd, *args, **kwargs)
        else:
            log, meta = self.runBinaryBenchmark(cmd, *args, **kwargs)
        return log, meta
Ejemplo n.º 14
0
 def run(self):
     cmd = self._getCMD()
     getLogger().info("Running: %s", cmd)
     ret = os.system(cmd)
     setRunStatus(ret >> 8)
Ejemplo n.º 15
0
def runOneBenchmark(info,
                    benchmark,
                    framework,
                    platform,
                    backend,
                    reporters,
                    lock,
                    cooldown=None,
                    user_identifier=None,
                    local_reporter=None):
    assert "treatment" in info, "Treatment is missing in info"
    getLogger().info("Running {}".format(benchmark["path"]))

    status = 0
    minfo = copy.deepcopy(info["treatment"])
    mbenchmark = copy.deepcopy(benchmark)
    if "shared_libs" in info:
        minfo["shared_libs"] = info["shared_libs"]
    try:
        data = _runOnePass(minfo, mbenchmark, framework, platform)
        meta = None
        if "control" in info:
            cinfo = copy.deepcopy(info["control"])
            if "shared_libs" in info:
                cinfo["shared_libs"] = info["shared_libs"]
            # cool down between treatment and control
            if "model" in benchmark and "cooldown" in benchmark["model"]:
                cooldown = float(benchmark["model"]["cooldown"])
            time.sleep(cooldown)
            control = _runOnePass(cinfo, benchmark, framework, platform)
            bname = benchmark["model"]["name"]
            data = _mergeDelayData(data, control, bname)
        if benchmark["tests"][0]["metric"] != "generic":
            data = _adjustData(info, data)
        meta = _retrieveMeta(info, benchmark, platform, framework, backend,
                             user_identifier)

        result = {"meta": meta, "data": data}
    except Exception as e:
        # Catch all exceptions so that failure in one test does not
        # affect other tests
        getLogger().info("Exception caught when running benchmark")
        getLogger().info(e)
        data = None
        status = 2
        setRunStatus(status)
        getLogger().error(traceback.format_exc())

    if data is None or len(data) == 0:
        name = platform.getMangledName()
        model_name = ""
        if "model" in benchmark and "name" in benchmark["model"]:
            model_name = benchmark["model"]["name"]
        commit_hash = ""
        if "commit" in info["treatment"]:
            commit_hash = info["treatment"]["commit"]
        getLogger().info("No data collected for ".format(model_name) +
                         "on {}. ".format(name) +
                         "The run may be failed for " +
                         "{}".format(commit_hash))
        return status

    with lock:
        for reporter in reporters:
            reporter.report(result)

    if "regression_commits" in info and \
            info["run_type"] == "benchmark" and local_reporter:
        from regression_detectors.regression_detectors import checkRegressions
        checkRegressions(info, platform, framework, benchmark, reporters,
                         result['meta'], local_reporter)
    return status
Ejemplo n.º 16
0
 def postprocess(self, *args, **kwargs):
     success = getRunStatus()
     self.util.run(["--bundle", self.app, "--uninstall_only"])
     setRunStatus(success, overwrite=True)
Ejemplo n.º 17
0
 def run(self):
     raw_args = self._getRawArgs()
     app = RepoDriver(raw_args=raw_args)
     ret = app.run()
     setRunStatus(ret >> 8)
     sys.exit(getRunStatus())
Ejemplo n.º 18
0
def runOneBenchmark(
    info,
    benchmark,
    framework,
    platform,
    backend,
    reporters,
    lock,
    cooldown=None,
    user_identifier=None,
    local_reporter=None,
):
    assert "treatment" in info, "Treatment is missing in info"
    getLogger().info("Running {}".format(benchmark["path"]))

    status = 0
    minfo = copy.deepcopy(info["treatment"])
    mbenchmark = copy.deepcopy(benchmark)
    if "shared_libs" in info:
        minfo["shared_libs"] = info["shared_libs"]
    try:
        # invalidate CPU cache
        [1.0 for _ in range(20 << 20)]
        gc.collect()
        data = _runOnePass(minfo, mbenchmark, framework, platform)
        status = status | getRunStatus()
        meta = None
        if "control" in info:
            cinfo = copy.deepcopy(info["control"])
            if "shared_libs" in info:
                cinfo["shared_libs"] = info["shared_libs"]
            # cool down between treatment and control
            if "model" in benchmark and "cooldown" in benchmark["model"]:
                cooldown = float(benchmark["model"]["cooldown"])
            time.sleep(cooldown)
            # invalidate CPU cache
            [1.0 for _ in range(20 << 20)]
            gc.collect()
            control = _runOnePass(cinfo, benchmark, framework, platform)
            status = status | getRunStatus()
            bname = benchmark["model"]["name"]
            data = _mergeDelayData(data, control, bname)
        if benchmark["tests"][0]["metric"] != "generic":
            data = _adjustData(info, data)
        meta = _retrieveMeta(info, benchmark, platform, framework, backend,
                             user_identifier)
        data = _retrieveInfo(info, data)
        result = {"meta": meta, "data": data}
    except Exception as e:
        # Catch all exceptions so that failure in one test does not
        # affect other tests
        getLogger().info("Exception caught when running benchmark")
        getLogger().info(e)
        data = None
        status = 2
        setRunStatus(status)
        getLogger().error(traceback.format_exc())

        # Set result meta and data to default values to that
        # the reporter will not try to key into a None
        result = {"meta": {}, "data": []}

    if data is None or len(data) == 0:
        _logNoData(benchmark, info, platform.getMangledName())
        return status

    with lock:
        for reporter in reporters:
            reporter.report(result)

    if ("regression_commits" in info and info["run_type"] == "benchmark"
            and local_reporter):
        from regression_detectors.regression_detectors import checkRegressions

        checkRegressions(
            info,
            platform,
            framework,
            benchmark,
            reporters,
            result["meta"],
            local_reporter,
        )
    return status
Ejemplo n.º 19
0
 def runBinaryBenchmark(self, cmd, *args, **kwargs):
     log_to_screen_only = 'log_to_screen_only' in kwargs and \
         kwargs['log_to_screen_only']
     platform_args = {}
     meta = {}
     if "platform_args" in kwargs:
         platform_args = kwargs["platform_args"]
         if "taskset" in platform_args:
             taskset = platform_args["taskset"]
             cmd = ["taskset", taskset] + cmd
             del platform_args["taskset"]
         if "sleep_before_run" in platform_args:
             sleep_before_run = str(platform_args["sleep_before_run"])
             cmd = ["sleep", sleep_before_run, "&&"] + cmd
             del platform_args["sleep_before_run"]
         if "power" in platform_args and platform_args["power"]:
             # launch settings page to prevent the phone
             # to go into sleep mode
             self.util.shell(
                 ["am", "start", "-a", "android.settings.SETTINGS"])
             time.sleep(1)
             cmd = ["nohup"] + ["sh", "-c", "'" + " ".join(cmd) + "'"] + \
                 [">", "/dev/null", "2>&1"]
             platform_args["non_blocking"] = True
             del platform_args["power"]
         if platform_args.get("enable_profiling", False):
             # attempt to run with profiling, else fallback to standard run
             success = getRunStatus()
             try:
                 simpleperf = getProfilerByUsage(
                     "android",
                     None,
                     platform=self,
                     model_name=platform_args.get("model_name", None),
                     cmd=cmd)
                 if simpleperf:
                     f = simpleperf.start()
                     output, meta = f.result()
                     log_logcat = []
                     if not log_to_screen_only:
                         log_logcat = self.util.logcat('-d')
                     return output + log_logcat, meta
             # if this has not succeeded for some reason reset run status and run without profiling.
             except RuntimeError as ex:
                 getLogger().error(
                     "An error occurred when running Simpleperf profiler. {}"
                     .format(ex))
                 setRunStatus(success, overwrite=True)
             except FileNotFoundError as ex:
                 getLogger().error(
                     "An error occurred when running Simpleperf profiler. {}"
                     .format(ex))
                 setRunStatus(success, overwrite=True)
             except Exception:
                 getLogger().exception(
                     "An error has occurred when running Simpleperf profiler."
                 )
                 setRunStatus(success, overwrite=True)
     log_screen = self.util.shell(cmd, **platform_args)
     log_logcat = []
     if not log_to_screen_only:
         log_logcat = self.util.logcat('-d')
     return log_screen + log_logcat, meta