Esempio n. 1
0
    def _runBenchmarks(self, jobs_queue):
        # run the jobs in job queue
        run_ids = ",".join([str(job["id"]) for job in jobs_queue])
        self.db.runBenchmarks(self.args.claimer_id, run_ids)
        run_devices = [
            self.devices[job["device"]][job["hash"]] for job in jobs_queue
        ]
        getLogger().info("Updating devices status")
        self.db.updateDevices(self.args.claimer_id,
                              getDevicesString(run_devices), False)
        getLogger().info("Downloading files")
        self._downloadFiles(jobs_queue)

        # run the benchmarks
        for job in jobs_queue:
            identifier = job["identifier"]
            getLogger().info(
                "Running job with identifier {}".format(identifier))
            tempdir = tempfile.mkdtemp(
                prefix="_".join(["aibench", str(identifier), ""]))
            raw_args = self._getRawArgs(job, tempdir)
            self.devices[job["device"]][
                job["hash"]]["start_time"] = time.ctime()
            async_runner = runAsync(self.args, self.devices, self.db, job,
                                    tempdir)

            # Watchdog will be used to kill currently running jobs
            # based on user requests
            app = WatchDog(async_runner, async_runner.didUserRequestJobKill,
                           async_runner.killJob)

            global RUNNING_JOBS
            RUNNING_JOBS += 1
            """
            Python's multiprocessing need to pickle things to sling them
            in different processes. However, bounded methods are not pickable,
            so the way it's doing it here doesn't work.
            Thus, I added __call__ method to the class we are passing into the
            apply_async method.
            Ref: https://stackoverflow.com/a/6975654
            """

            self.pool.apply_async(app,
                                  args=[raw_args],
                                  callback=app.main.callback)
Esempio n. 2
0
    def _runBenchmarks(self, jobs_queue):
        """Given a queue of jobs, update run statuses and device statuses in db, and spawn job processes."""
        run_ids = ",".join([str(job["id"]) for job in jobs_queue])
        self.db.runBenchmarks(self.args.claimer_id, run_ids)
        run_devices = [
            self.devices[job["device"]][job["hash"]] for job in jobs_queue
        ]
        getLogger().info("Updating devices status")
        self.db.updateDevices(self.args.claimer_id,
                              getDevicesString(run_devices), False)

        # run the benchmarks
        for job in jobs_queue:
            getLogger().info(
                f"Running job with identifier {job['identifier']} and id {job['id']}"
            )
            device = self.devices[job["device"]][job["hash"]]
            device["start_time"] = time.ctime()
            async_runner = runAsync(
                self.args,
                device,
                self.db,
                job,
                self.benchmark_downloader,
                self.device_manager.usb_controller,
            )

            # Watchdog will be used to kill currently running jobs
            # based on user requests
            app = WatchDog(async_runner, async_runner.didUserRequestJobKill,
                           async_runner.killJob)

            global RUNNING_JOBS
            RUNNING_JOBS += 1
            """
            Python's multiprocessing need to pickle things to sling them
            in different processes. However, bounded methods are not pickable,
            so the way it's doing it here doesn't work.
            Thus, I added __call__ method to the class we are passing into the
            apply_async method.
            Ref: https://stackoverflow.com/a/6975654
            """
            future = self.pool.submit(app)
            future.add_done_callback(self.callback)