def run(self):
     platforms = getPlatforms("/tmp")
     devices = []
     for p in platforms:
         devices.append({
             "kind": p.platform,
             "hash": p.platform_hash,
         })
     json_str = json.dumps(devices)
     print(json_str)
     return json_str
Ejemplo n.º 2
0
 def run(self):
     platforms = getPlatforms(self.args, tempdir="/tmp")
     devices = []
     for p in platforms:
         devices.append({
             "kind": p.getKind(),
             "name": p.getName(),
             "hash": p.platform_hash,
             "abi": p.getABI(),
             "os": p.getOS(),
         })
     json_str = json.dumps(devices)
     print(json_str)
     return json_str
Ejemplo n.º 3
0
    def run(self):
        tempdir = tempfile.mkdtemp(prefix="_".join(
            ["aibench", str(self.args.user_identifier), ""]))
        getLogger().info("Temp directory: {}".format(tempdir))
        info = self._getInfo()
        frameworks = getFrameworks()
        assert (self.args.framework
                in frameworks), "Framework {} is not supported".format(
                    self.args.framework)
        framework = frameworks[self.args.framework](tempdir, self.args)
        bcollector = BenchmarkCollector(framework,
                                        self.args.model_cache,
                                        args=self.args)
        benchmarks = bcollector.collectBenchmarks(info,
                                                  self.args.benchmark_file,
                                                  self.args.user_identifier)
        platforms = getPlatforms(self.args, tempdir, self.usb_controller)
        threads = []
        for platform in platforms:
            t = threading.Thread(target=self.runBenchmark,
                                 args=(info, platform, benchmarks))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()

        if not self.args.debug:
            shutil.rmtree(tempdir, True)

        status = self.status | getRunStatus()
        if getRunKilled():
            status_str = "killed"
        elif getRunTimeout():
            status_str = "timeout"
        elif status == 0:
            status_str = "success"
        elif status == 1:
            status_str = "user error"
        elif status == 2:
            status_str = "harness error"
        else:
            status_str = "user and harness error"
        getLogger().info(" ======= {} =======".format(status_str))
        if getRunKilled():
            return RUN_KILLED
        if getRunTimeout():
            return RUN_TIMEOUT
        return status
Ejemplo n.º 4
0
 def run(self):
     tempdir = tempfile.mkdtemp()
     getLogger().info("Temp directory: {}".format(tempdir))
     info = self._getInfo()
     bcollector = BenchmarkCollector(getArgs().model_cache)
     benchmarks = bcollector.collectBenchmarks(info,
                                               getArgs().benchmark_file)
     frameworks = getFrameworks()
     assert getArgs().framework in frameworks, \
         "Framework {} is not supported".format(getArgs().framework)
     framework = frameworks[getArgs().framework](tempdir)
     platforms = getPlatforms(tempdir)
     threads = []
     for platform in platforms:
         t = threading.Thread(target=self.runBenchmark,
                              args=(info, platform, benchmarks, framework))
         threads.append(t)
         t.start()
     shutil.rmtree(tempdir, True)
Ejemplo n.º 5
0
    def run(self):
        tempdir = tempfile.mkdtemp()
        getLogger().info("Temp directory: {}".format(tempdir))
        info = self._getInfo()
        frameworks = getFrameworks()
        assert self.args.framework in frameworks, \
            "Framework {} is not supported".format(self.args.framework)
        framework = frameworks[self.args.framework](tempdir, self.args)
        bcollector = BenchmarkCollector(framework,
                                        self.args.model_cache,
                                        args=self.args)
        benchmarks = bcollector.collectBenchmarks(info,
                                                  self.args.benchmark_file)
        platforms = getPlatforms(tempdir, self.args)
        threads = []
        for platform in platforms:
            t = threading.Thread(target=self.runBenchmark,
                                 args=(info, platform, benchmarks))
            t.start()
            threads.append(t)
        for t in threads:
            t.join()

        if not self.args.debug:
            shutil.rmtree(tempdir, True)

        status = self.status | getRunStatus()
        if status == 0:
            status_str = "success"
        elif status == 1:
            status_str = "user error"
        elif status == 2:
            status_str = "harness error"
        else:
            status_str = "user and harness error"
        getLogger().info(" ======= {} =======".format(status_str))
        return status