示例#1
0
 def runBenchmark(self, info, platform, benchmarks):
     if getArgs().reboot:
         platform.rebootDevice()
     tempdir = tempfile.mkdtemp()
     # we need to get a different framework instance per thread
     # will consolidate later. For now create a new framework
     frameworks = getFrameworks()
     framework = frameworks[getArgs().framework](tempdir)
     reporters = getReporters()
     for idx in range(len(benchmarks)):
         benchmark = benchmarks[idx]
         # check the framework matches
         if "model" in benchmark and "framework" in benchmark["model"]:
             assert(benchmark["model"]["framework"] ==
                    getArgs().framework), \
                 "Framework specified in the json file " \
                 "{} ".format(benchmark["model"]["framework"]) + \
                 "does not match the command line argument " \
                 "{}".format(getArgs().framework)
         b = copy.deepcopy(benchmark)
         i = copy.deepcopy(info)
         success = runOneBenchmark(i, b, framework, platform,
                                   getArgs().platform, reporters,
                                   self._lock)
         self.success = self.success and success
         if idx != len(benchmarks) - 1:
             # cool down period between multiple benchmark runs
             cooldown = getArgs().cooldown
             if "model" in benchmark and "cooldown" in benchmark["model"]:
                 cooldown = float(benchmark["model"]["cooldown"])
             time.sleep(cooldown)
     shutil.rmtree(tempdir, True)
示例#2
0
    def runBenchmark(self, info, platform, benchmarks):
        if self.args.reboot:
            platform.rebootDevice()
        for idx in range(len(benchmarks)):
            tempdir = tempfile.mkdtemp(prefix="_".join(
                ["aibench", str(self.args.user_identifier), ""]))
            # we need to get a different framework instance per thread
            # will consolidate later. For now create a new framework
            frameworks = getFrameworks()
            framework = frameworks[self.args.framework](tempdir, self.args)
            reporters = getReporters(self.args)

            benchmark = benchmarks[idx]
            # check the framework matches
            if "model" in benchmark and "framework" in benchmark["model"]:
                assert benchmark["model"][
                    "framework"] == self.args.framework, (
                        "Framework specified in the json file "
                        "{} ".format(benchmark["model"]["framework"]) +
                        "does not match the command line argument "
                        "{}".format(self.args.framework))
            if self.args.debug:
                for test in benchmark["tests"]:
                    test["log_output"] = True
            if self.args.env:
                for test in benchmark["tests"]:
                    cmd_env = dict(self.args.env)
                    if "env" in test:
                        cmd_env.update(test["env"])
                    test["env"] = cmd_env

            b = copy.deepcopy(benchmark)
            i = copy.deepcopy(info)
            status = runOneBenchmark(
                i,
                b,
                framework,
                platform,
                self.args.platform,
                reporters,
                self._lock,
                self.args.cooldown,
                self.args.user_identifier,
                self.args.local_reporter,
            )
            self.status = self.status | status
            if idx != len(benchmarks) - 1:
                # cool down period between multiple benchmark runs
                cooldown = self.args.cooldown
                if "model" in benchmark and "cooldown" in benchmark["model"]:
                    cooldown = float(benchmark["model"]["cooldown"])
                time.sleep(cooldown)
            if not self.args.debug:
                shutil.rmtree(tempdir, True)
                for test in benchmark["tests"]:
                    if "preprocess" in test and "files" in test["preprocess"]:
                        for f in test["preprocess"]["files"].values():
                            shutil.rmtree(f["location"], True)
示例#3
0
    def run(self):
        tempdir = tempfile.mkdtemp(prefix="_".join(
            ["aibench", str(self.args.user_identifier), ""]))
        getLogger().info("Temp directory: {}".format(tempdir))
        info = self._getInfo()
        frameworks = getFrameworks()
        assert (self.args.framework
                in frameworks), "Framework {} is not supported".format(
                    self.args.framework)
        framework = frameworks[self.args.framework](tempdir, self.args)
        bcollector = BenchmarkCollector(framework,
                                        self.args.model_cache,
                                        args=self.args)
        benchmarks = bcollector.collectBenchmarks(info,
                                                  self.args.benchmark_file,
                                                  self.args.user_identifier)
        platforms = getPlatforms(self.args, tempdir, self.usb_controller)
        threads = []
        for platform in platforms:
            t = threading.Thread(target=self.runBenchmark,
                                 args=(info, platform, benchmarks))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()

        if not self.args.debug:
            shutil.rmtree(tempdir, True)

        status = self.status | getRunStatus()
        if getRunKilled():
            status_str = "killed"
        elif getRunTimeout():
            status_str = "timeout"
        elif status == 0:
            status_str = "success"
        elif status == 1:
            status_str = "user error"
        elif status == 2:
            status_str = "harness error"
        else:
            status_str = "user and harness error"
        getLogger().info(" ======= {} =======".format(status_str))
        if getRunKilled():
            return RUN_KILLED
        if getRunTimeout():
            return RUN_TIMEOUT
        return status
示例#4
0
 def run(self):
     tempdir = tempfile.mkdtemp()
     getLogger().info("Temp directory: {}".format(tempdir))
     info = self._getInfo()
     bcollector = BenchmarkCollector(getArgs().model_cache)
     benchmarks = bcollector.collectBenchmarks(info,
                                               getArgs().benchmark_file)
     frameworks = getFrameworks()
     assert getArgs().framework in frameworks, \
         "Framework {} is not supported".format(getArgs().framework)
     framework = frameworks[getArgs().framework](tempdir)
     platforms = getPlatforms(tempdir)
     threads = []
     for platform in platforms:
         t = threading.Thread(target=self.runBenchmark,
                              args=(info, platform, benchmarks, framework))
         threads.append(t)
         t.start()
     shutil.rmtree(tempdir, True)
示例#5
0
    def run(self):
        tempdir = tempfile.mkdtemp()
        getLogger().info("Temp directory: {}".format(tempdir))
        info = self._getInfo()
        frameworks = getFrameworks()
        assert self.args.framework in frameworks, \
            "Framework {} is not supported".format(self.args.framework)
        framework = frameworks[self.args.framework](tempdir, self.args)
        bcollector = BenchmarkCollector(framework,
                                        self.args.model_cache,
                                        args=self.args)
        benchmarks = bcollector.collectBenchmarks(info,
                                                  self.args.benchmark_file)
        platforms = getPlatforms(tempdir, self.args)
        threads = []
        for platform in platforms:
            t = threading.Thread(target=self.runBenchmark,
                                 args=(info, platform, benchmarks))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()

        if not self.args.debug:
            shutil.rmtree(tempdir, True)

        status = self.status | getRunStatus()
        if status == 0:
            status_str = "success"
        elif status == 1:
            status_str = "user error"
        elif status == 2:
            status_str = "harness error"
        else:
            status_str = "user and harness error"
        getLogger().info(" ======= {} =======".format(status_str))
        return status