Exemplo n.º 1
0
 def serve(self, config, model_path, gpuid=0):
     server_config_path = os.path.join(self._output_dir, "conf.json")
     with open(server_config_path, "w") as server_config_file:
         json.dump(
             {
                 "models_root":
                 model_path,
                 "models": [{
                     "id": 0,
                     "model": _RELEASED_MODEL_NAME,
                     "opt": _trans_options(config, gpuid),
                 }],
             },
             server_config_file,
         )
     port = serving.pick_free_port()
     process = utils.run_cmd(
         [
             "onmt_server",
             "--ip",
             "127.0.0.1",
             "--port",
             str(port),
             "--url_root",
             "/translator-backend",
             "--config",
             server_config_path,
         ],
         background=True,
     )
     return process, {"port": port}
Exemplo n.º 2
0
 def serve(self, config, model_path, gpuid=0):
     # Start a new tensorflow_model_server instance.
     batching_parameters = self._generate_batching_parameters(config.get('serving'))
     port = serving.pick_free_port()
     model_name = '%s%s' % (config['source'], config['target'])
     cmd = ['tensorflow_model_server',
            '--port=%d' % port,
            '--model_name=%s' % model_name,
            '--model_base_path=%s' % model_path,
            '--enable_batching=true',
            '--batching_parameters_file=%s' % batching_parameters]
     process = utils.run_cmd(cmd, background=True)
     info = {'port': port, 'model_name': model_name}
     return process, info
Exemplo n.º 3
0
 def serve(self, config, model_path, gpuid=0):
     if isinstance(gpuid, list):
         logger.warning('no support of multi-gpu for opennmt_lua serve')
         gpuid = gpuid[0]
     model_file = os.path.join(model_path, 'model_released.t7')
     host_ip = '127.0.0.1'
     port = pick_free_port()
     options = self._get_translation_options(config, model_file, gpuid=gpuid)
     options['host'] = host_ip
     options['port'] = port
     options['withAttn'] = 'true'
     options['mode'] = 'space'
     options = _build_cmd_line_options(options)
     process = self._run_command(
         ['th', 'tools/rest_translation_server.lua'] + options, background=True)
     info = {'endpoint': 'http://{}:{}/translator/translate'.format(host_ip, port)}
     return process, info
Exemplo n.º 4
0
 def serve(self, config, model_path, gpuid=0):
     # Export model (deleting any previously exported models).
     export_base_dir = os.path.join(model_path, "export")
     if os.path.exists(export_base_dir):
         shutile.rmtree(export_base_dir)
     export_dir = self._export_model(config, model_path)
     # Start a new tensorflow_model_server instance.
     batching_parameters = self._generate_batching_parameters(
         config.get('serving'))
     port = serving.pick_free_port()
     model_name = '%s%s' % (config['source'], config['target'])
     cmd = [
         'tensorflow_model_server',
         '--port=%d' % port,
         '--model_name=%s' % model_name,
         '--model_base_path=%s' % os.path.dirname(export_dir),
         '--enable_batching=true',
         '--batching_parameters_file=%s' % batching_parameters
     ]
     process = utils.run_cmd(cmd, background=True)
     info = {'port': port, 'model_name': model_name}
     return process, info