def _start_single_tfs(self, instance_id): cmd = tfs_utils.tfs_command( self._tfs_grpc_ports[instance_id], self._tfs_rest_ports[instance_id], self._tfs_config_path, self._tfs_enable_batching, self._tfs_batching_config_path, tfs_intra_op_parallelism=self._tfs_intra_op_parallelism, tfs_inter_op_parallelism=self._tfs_inter_op_parallelism, tfs_enable_gpu_memory_fraction=self. _enable_per_process_gpu_memory_fraction(), tfs_gpu_memory_fraction=self. _calculate_per_process_gpu_memory_fraction(), ) log.info("tensorflow serving command: {}".format(cmd)) num_gpus = self._get_number_of_gpu_on_host() if num_gpus > 1: # utilizing multi-gpu worker_env = os.environ.copy() worker_env["CUDA_VISIBLE_DEVICES"] = str(instance_id % num_gpus) p = subprocess.Popen(cmd.split(), env=worker_env) log.info("started tensorflow serving (pid: {}) on GPU: {}".format( p.pid, instance_id % num_gpus)) else: # cpu and single gpu p = subprocess.Popen(cmd.split()) log.info("started tensorflow serving (pid: {})".format(p.pid)) return p
def _start_single_tfs(self, instance_id): cmd = tfs_utils.tfs_command( self._tfs_grpc_port[instance_id], self._tfs_rest_port[instance_id], self._tfs_config_path, self._tfs_enable_batching, self._tfs_batching_config_path, self._tfs_intra_op_parallelism, self._tfs_inter_op_parallelism, self._enable_per_process_gpu_memory_fraction(), self._calculate_per_process_gpu_memory_fraction()) log.info("tensorflow serving command: {}".format(cmd)) p = subprocess.Popen(cmd.split()) log.info("started tensorflow serving (pid: %d)", p.pid) return p
def _start_tfs(self): self._log_version('tensorflow_model_server --version', 'tensorflow version info:') cmd = tfs_utils.tfs_command( self._tfs_grpc_port, self._tfs_rest_port, self._tfs_config_path, self._tfs_enable_batching, self._tfs_batching_config_path, ) log.info('tensorflow serving command: {}'.format(cmd)) p = subprocess.Popen(cmd.split()) log.info('started tensorflow serving (pid: %d)', p.pid) self._tfs = p
def _start_tfs(self): self._log_version("tensorflow_model_server_neuron --version", "tensorflow version info:") cmd = tfs_utils.tfs_command( self._tfs_grpc_port, self._tfs_rest_port, self._tfs_config_path, self._tfs_enable_batching, self._tfs_batching_config_path, ) log.info("tensorflow serving command: {}".format(cmd)) p = subprocess.Popen(cmd.split()) log.info("started tensorflow serving (pid: %d)", p.pid) self._tfs.append(p.pid)
def _handle_load_model_post(self, res, data): # noqa: C901 model_name = data['model_name'] base_path = data['url'] # model is already loaded if model_name in self._model_tfs_pid: res.status = falcon.HTTP_409 res.body = json.dumps( {'error': 'Model {} is already loaded.'.format(model_name)}) # check if there are available ports if not self._ports_available(): res.status = falcon.HTTP_507 res.body = json.dumps({ 'error': 'Memory exhausted: no available ports to load the model.' }) with lock(): self._model_tfs_rest_port[model_name] = self._tfs_ports[ 'rest_port'].pop() self._model_tfs_grpc_port[model_name] = self._tfs_ports[ 'grpc_port'].pop() # validate model files are in the specified base_path if self.validate_model_dir(base_path): try: # install custom dependencies, import handlers self._import_custom_modules(model_name) tfs_config = tfs_utils.create_tfs_config_individual_model( model_name, base_path) tfs_config_file = '/sagemaker/tfs-config/{}/model-config.cfg'.format( model_name) log.info('tensorflow serving model config: \n%s\n', tfs_config) os.makedirs(os.path.dirname(tfs_config_file)) with open(tfs_config_file, 'w') as f: f.write(tfs_config) batching_config_file = '/sagemaker/batching/{}/batching-config.cfg'.format( model_name) if self._tfs_enable_batching: tfs_utils.create_batching_config(batching_config_file) cmd = tfs_utils.tfs_command( self._model_tfs_grpc_port[model_name], self._model_tfs_rest_port[model_name], tfs_config_file, self._tfs_enable_batching, batching_config_file, ) p = subprocess.Popen(cmd.split()) self._wait_for_model(model_name) log.info('started tensorflow serving (pid: %d)', p.pid) # update model name <-> tfs pid map self._model_tfs_pid[model_name] = p res.status = falcon.HTTP_200 res.body = json.dumps({ 'success': 'Successfully loaded model {}, ' 'listening on rest port {} ' 'and grpc port {}.'.format( model_name, self._model_tfs_rest_port, self._model_tfs_grpc_port, ) }) except MultiModelException as multi_model_exception: self._cleanup_config_file(tfs_config_file) self._cleanup_config_file(batching_config_file) if multi_model_exception.code == 409: res.status = falcon.HTTP_409 res.body = multi_model_exception.msg elif multi_model_exception.code == 408: res.status = falcon.HTTP_408 res.body = multi_model_exception.msg else: raise MultiModelException(falcon.HTTP_500, multi_model_exception.msg) except FileExistsError as e: res.status = falcon.HTTP_409 res.body = json.dumps({ 'error': 'Model {} is already loaded. {}'.format( model_name, str(e)) }) except OSError as os_error: self._cleanup_config_file(tfs_config_file) self._cleanup_config_file(batching_config_file) if os_error.errno == 12: raise MultiModelException( falcon.HTTP_507, 'Memory exhausted: ' 'not enough memory to start TFS instance') else: raise MultiModelException(falcon.HTTP_500, os_error.strerror) else: res.status = falcon.HTTP_404 res.body = json.dumps({ 'error': 'Could not find valid base path {} for servable {}'.format( base_path, model_name) })
def _handle_load_model_post(self, res, data): # noqa: C901 model_name = data["model_name"] base_path = data["url"] # model is already loaded if model_name in self._model_tfs_pid: res.status = falcon.HTTP_409 res.body = json.dumps( {"error": "Model {} is already loaded.".format(model_name)}) # check if there are available ports if not self._ports_available(): res.status = falcon.HTTP_507 res.body = json.dumps({ "error": "Memory exhausted: no available ports to load the model." }) with lock(): self._model_tfs_rest_port[model_name] = self._tfs_ports[ "rest_port"].pop() self._model_tfs_grpc_port[model_name] = self._tfs_ports[ "grpc_port"].pop() # validate model files are in the specified base_path if self.validate_model_dir(base_path): try: tfs_config = tfs_utils.create_tfs_config_individual_model( model_name, base_path) tfs_config_file = "/sagemaker/tfs-config/{}/model-config.cfg".format( model_name) log.info("tensorflow serving model config: \n%s\n", tfs_config) os.makedirs(os.path.dirname(tfs_config_file)) with open(tfs_config_file, "w") as f: f.write(tfs_config) batching_config_file = "/sagemaker/batching/{}/batching-config.cfg".format( model_name) if self._tfs_enable_batching: tfs_utils.create_batching_config(batching_config_file) cmd = tfs_utils.tfs_command( self._model_tfs_grpc_port[model_name], self._model_tfs_rest_port[model_name], tfs_config_file, self._tfs_enable_batching, batching_config_file, ) p = subprocess.Popen(cmd.split()) self._wait_for_model(model_name) log.info("started tensorflow serving (pid: %d)", p.pid) # update model name <-> tfs pid map self._model_tfs_pid[model_name] = p res.status = falcon.HTTP_200 res.body = json.dumps({ "success": "Successfully loaded model {}, " "listening on rest port {} " "and grpc port {}.".format( model_name, self._model_tfs_rest_port, self._model_tfs_grpc_port, ) }) except MultiModelException as multi_model_exception: self._cleanup_config_file(tfs_config_file) self._cleanup_config_file(batching_config_file) if multi_model_exception.code == 409: res.status = falcon.HTTP_409 res.body = multi_model_exception.msg elif multi_model_exception.code == 408: res.status = falcon.HTTP_408 res.body = multi_model_exception.msg else: raise MultiModelException(falcon.HTTP_500, multi_model_exception.msg) except FileExistsError as e: res.status = falcon.HTTP_409 res.body = json.dumps({ "error": "Model {} is already loaded. {}".format( model_name, str(e)) }) except OSError as os_error: self._cleanup_config_file(tfs_config_file) self._cleanup_config_file(batching_config_file) if os_error.errno == 12: raise MultiModelException( falcon.HTTP_507, "Memory exhausted: " "not enough memory to start TFS instance") else: raise MultiModelException(falcon.HTTP_500, os_error.strerror) else: res.status = falcon.HTTP_404 res.body = json.dumps({ "error": "Could not find valid base path {} for servable {}".format( base_path, model_name) })