示例#1
0
 def runBenchmark(self, info, platform, benchmarks):
     if getArgs().reboot:
         platform.rebootDevice()
     tempdir = tempfile.mkdtemp()
     # we need to get a different framework instance per thread
     # will consolidate later. For now create a new framework
     frameworks = getFrameworks()
     framework = frameworks[getArgs().framework](tempdir)
     reporters = getReporters()
     for idx in range(len(benchmarks)):
         benchmark = benchmarks[idx]
         # check the framework matches
         if "model" in benchmark and "framework" in benchmark["model"]:
             assert(benchmark["model"]["framework"] ==
                    getArgs().framework), \
                 "Framework specified in the json file " \
                 "{} ".format(benchmark["model"]["framework"]) + \
                 "does not match the command line argument " \
                 "{}".format(getArgs().framework)
         b = copy.deepcopy(benchmark)
         i = copy.deepcopy(info)
         success = runOneBenchmark(i, b, framework, platform,
                                   getArgs().platform, reporters,
                                   self._lock)
         self.success = self.success and success
         if idx != len(benchmarks) - 1:
             # cool down period between multiple benchmark runs
             cooldown = getArgs().cooldown
             if "model" in benchmark and "cooldown" in benchmark["model"]:
                 cooldown = float(benchmark["model"]["cooldown"])
             time.sleep(cooldown)
     shutil.rmtree(tempdir, True)
示例#2
0
 def runBenchmark(self, info, platform, benchmarks, framework):
     if getArgs().reboot:
         platform.rebootDevice()
     reporters = getReporters()
     for idx in range(len(benchmarks)):
         benchmark = benchmarks[idx]
         # check the framework matches
         if "model" in benchmark and "framework" in benchmark["model"]:
             assert(benchmark["model"]["framework"] ==
                    getArgs().framework), \
                 "Framework specified in the json file " \
                 "{} ".format(benchmark["model"]["framework"]) + \
                 "does not match the command line argument " \
                 "{}".format(getArgs().framework)
         b = copy.deepcopy(benchmark)
         i = copy.deepcopy(info)
         success = runOneBenchmark(i, b, framework, platform,
                                   getArgs().platform, reporters,
                                   self._lock)
         self.success = self.success and success
         if idx != len(benchmarks) - 1:
             # cool down period between multiple benchmark runs
             cooldown = getArgs().cooldown
             if "model" in benchmark and "cooldown" in benchmark["model"]:
                 cooldown = float(benchmark["model"]["cooldown"])
             time.sleep(cooldown)
示例#3
0
    def runBenchmark(self, info, platform, benchmarks):
        if self.args.reboot:
            platform.rebootDevice()
        for idx in range(len(benchmarks)):
            tempdir = tempfile.mkdtemp(prefix="_".join(
                ["aibench", str(self.args.user_identifier), ""]))
            # we need to get a different framework instance per thread
            # will consolidate later. For now create a new framework
            frameworks = getFrameworks()
            framework = frameworks[self.args.framework](tempdir, self.args)
            reporters = getReporters(self.args)

            benchmark = benchmarks[idx]
            # check the framework matches
            if "model" in benchmark and "framework" in benchmark["model"]:
                assert benchmark["model"][
                    "framework"] == self.args.framework, (
                        "Framework specified in the json file "
                        "{} ".format(benchmark["model"]["framework"]) +
                        "does not match the command line argument "
                        "{}".format(self.args.framework))
            if self.args.debug:
                for test in benchmark["tests"]:
                    test["log_output"] = True
            if self.args.env:
                for test in benchmark["tests"]:
                    cmd_env = dict(self.args.env)
                    if "env" in test:
                        cmd_env.update(test["env"])
                    test["env"] = cmd_env

            b = copy.deepcopy(benchmark)
            i = copy.deepcopy(info)
            status = runOneBenchmark(
                i,
                b,
                framework,
                platform,
                self.args.platform,
                reporters,
                self._lock,
                self.args.cooldown,
                self.args.user_identifier,
                self.args.local_reporter,
            )
            self.status = self.status | status
            if idx != len(benchmarks) - 1:
                # cool down period between multiple benchmark runs
                cooldown = self.args.cooldown
                if "model" in benchmark and "cooldown" in benchmark["model"]:
                    cooldown = float(benchmark["model"]["cooldown"])
                time.sleep(cooldown)
            if not self.args.debug:
                shutil.rmtree(tempdir, True)
                for test in benchmark["tests"]:
                    if "preprocess" in test and "files" in test["preprocess"]:
                        for f in test["preprocess"]["files"].values():
                            shutil.rmtree(f["location"], True)
示例#4
0
 def runBenchmark(self, info, platform, benchmarks, framework):
     if getArgs().reboot:
         platform.rebootDevice()
     reporters = getReporters()
     for benchmark in benchmarks:
         b = copy.deepcopy(benchmark)
         i = copy.deepcopy(info)
         runOneBenchmark(i, b, framework, platform,
                         getArgs().platform, reporters)