def run(self, name, options, log_filepath): if "gpu" in options: gpu_ready = self.gpu_enabled() if options["gpu"] is True: if not gpu_ready: raise GPUSupportNotEnabled('nvidia') else: options['runtime'] = 'nvidia' options.pop("gpu", None) run_return_code, run_id = self.run_container(image_name=name, **options) log_return_code, logs = self.log_container(run_id, filepath=log_filepath) final_return_code = run_return_code and log_return_code return final_return_code, run_id, logs
def gpu_enabled(self): # test if this images works # docker run --runtime=nvidia --rm nvidia/cuda nvidia-smi process = subprocess.Popen([ "docker", "run", "--runtime=nvidia", "--rm", "nvidia/cuda", "nvidia-smi", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() stderr = stderr.decode("utf-8") if "Unknown runtime specified nvidia" in stderr: return False if "OCI runtime create failed" in stderr: return False if len(stderr) > 2: raise GPUSupportNotEnabled(stderr) # this may mean we're good to go. Untested though. return True