コード例 #1
0
def test_load_dependencies(popen, serve):
    with patch('os.environ') as env:
        env['SAGEMAKER_PROGRAM'] = 'script.py'
        env['SAGEMAKER_SUBMIT_DIRECTORY'] = 's3://what/ever'

        serve.Transformer.from_module = Mock()
        serve.load_dependencies()

        popen.assert_called_with(['tensorflow_model_server',
                                  '--port=9000',
                                  '--model_name=generic_model',
                                  '--model_base_path=/opt/ml/model/export/Servo'])
コード例 #2
0
def test_load_dependencies(popen, serve):
    with patch('os.environ') as env:
        env['SAGEMAKER_PROGRAM'] = 'script.py'
        env['SAGEMAKER_SUBMIT_DIRECTORY'] = 's3://what/ever'

        serve.Transformer.from_module = Mock()
        serve.load_dependencies()

        popen.assert_called_with(['tensorflow_model_server',
                                  '--port=9000',
                                  '--model_name=generic_model',
                                  '--model_base_path=/opt/ml/model/export/Servo'])
コード例 #3
0
def test_load_dependencies_with_safe_port(hosting_env, popen, serve):
    with patch('os.environ') as env:
        env['SAGEMAKER_PROGRAM'] = 'script.py'
        env['SAGEMAKER_SUBMIT_DIRECTORY'] = 's3://what/ever'

        hosting_env.return_value.port_range = SAFE_PORT_RANGE
        hosting_env.return_value.model_dir = '/opt/ml/model'

        serve.Transformer.from_module = Mock()
        serve.load_dependencies()

        popen.assert_called_with(['tensorflow_model_server',
                                  '--port={}'.format(FIRST_PORT),
                                  '--model_name=generic_model',
                                  '--model_base_path=/opt/ml/model/export/Servo'])
コード例 #4
0
def test_load_dependencies_with_safe_port(hosting_env, popen):
    with patch('os.environ') as env:
        env['SAGEMAKER_PROGRAM'] = 'script.py'
        env['SAGEMAKER_SUBMIT_DIRECTORY'] = 's3://what/ever'

        hosting_env.return_value.port_range = SAFE_PORT_RANGE
        hosting_env.return_value.model_dir = '/opt/ml/model'

        serve.Transformer.from_module = Mock()
        serve.load_dependencies()

        popen.assert_called_with(['tensorflow_model_server',
                                  '--port={}'.format(FIRST_PORT),
                                  '--model_name=generic_model',
                                  '--model_base_path=/opt/ml/model/export/Servo'])