def start_server_single_model_from_gc(request, get_image, get_test_dir,
                                      get_docker_context):
    client = get_docker_context
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet " \
              "--model_path " \
              "gs://public-artifacts/intelai_public_models/resnet_50_i8/ " \
              "--port 9006 --target_device CPU --nireq 4 --plugin_config " \
              "\"{\\\"CPU_THROUGHPUT_STREAMS\\\": \\\"2\\\", " \
              "\\\"CPU_THREADS_NUM\\\": \\\"4\\\"}\""
    envs = ['https_proxy=' + os.getenv('https_proxy', "")]
    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-single-gs',
                                      ports={'9006/tcp': 9006},
                                      remove=True,
                                      environment=envs,
                                      command=command)

    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
def start_server_update_flow_specific(request, get_image, get_test_dir,
                                      get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {'{}'.format(path_to_mount): {'bind': '/opt/ml',
                                                 'mode': 'ro'}}
    command = '/ie-serving-py/start_server.sh ie_serving model ' \
              '--model_name resnet --model_path /opt/ml/update ' \
              '--port 9008 --model_version_policy' \
              ' \'{"specific": { "versions":[1, 3, 4] }}\' ' \
              '--rest_port 5563'

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-'
                                           'update-specific',
                                      ports={'9008/tcp': 9008,
                                             '5563/tcp': 5563},
                                      remove=True, volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 3
0
def start_server_single_model(request, get_image, get_test_dir,
                              get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {'{}'.format(path_to_mount): {'bind': '/opt/ml',
                                                 'mode': 'ro'}}
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet --model_path /opt/ml/resnet_V1_50 " \
              "--port 9000 --rest_port 5555 --plugin_config " \
              "\"{\\\"CPU_THROUGHPUT_STREAMS\\\": " \
              "\\\"CPU_THROUGHPUT_AUTO\\\"}\""

    container = \
        client.containers.run(
            image=get_image,
            detach=True,
            name='ie-serving-py-test-single',
            ports={'9000/tcp': 9000,
                   '5555/tcp': 5555},
            remove=True,
            volumes=volumes_dict,
            # In this case, slower,
            # non-default serialization method is used
            environment=[
                'SERIALIZATON=_prepare_output_as_AppendArrayToTensorProto'],
            command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 4
0
def start_server_with_mapping(request, get_image, get_test_dir,
                              get_docker_context):
    shutil.copyfile('tests/functional/mapping_config.json',
                    get_test_dir + '/saved_models/resnet_2_out/1/'
                                   'mapping_config.json')
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {'{}'.format(path_to_mount): {'bind': '/opt/ml',
                                                 'mode': 'ro'}}
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet_2_out --model_path /opt/ml/resnet_2_out " \
              "--port 9002 --rest_port 5556"

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-2-out',
                                      ports={'9002/tcp': 9002,
                                             '5556/tcp': 5556},
                                      remove=True, volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
def start_server_single_model_from_s3(request, get_image, get_test_dir,
                                      get_docker_context):
    AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
    AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
    AWS_REGION = os.getenv('AWS_REGION')

    client = get_docker_context
    envs = [
        'AWS_ACCESS_KEY_ID=' + AWS_ACCESS_KEY_ID,
        'AWS_SECRET_ACCESS_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_REGION=' + AWS_REGION
    ]
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet " \
              "--model_path s3://inference-test-aipg/resnet_v1_50 " \
              "--port 9000"

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-single-s3',
                                      ports={'9002/tcp': 9002},
                                      remove=True,
                                      environment=envs,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 6
0
def start_server_update_flow_latest(request, get_image, get_test_dir,
                                    get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    update_test_dir = path_to_mount + '/update'
    # ensure model dir is empty before starting OVMS
    shutil.rmtree(update_test_dir, ignore_errors=True)

    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet --model_path /opt/ml/update " \
              "--port 9007 --rest_port 5562 --grpc_workers 1 --nireq 1"

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-update-latest',
                                      ports={
                                          '9007/tcp': 9007,
                                          '5562/tcp': 5562
                                      },
                                      remove=True,
                                      volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 7
0
def start_server_batch_model_bs4(request, get_image, get_test_dir,
                                 get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet --model_path /opt/ml/resnet_V1_50_batch8 " \
              "--port 9004 --batch_size 4 --rest_port 5558"

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-batch4',
                                      ports={
                                          '9004/tcp': 9004,
                                          '5558/tcp': 5558
                                      },
                                      remove=True,
                                      volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 8
0
def start_server_single_model_from_s3(request, get_image, get_test_dir,
                                      get_docker_context):
    AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
    AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
    AWS_REGION = os.getenv('AWS_REGION')

    client = get_docker_context
    envs = [
        'AWS_ACCESS_KEY_ID=' + AWS_ACCESS_KEY_ID,
        'AWS_SECRET_ACCESS_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_REGION=' + AWS_REGION
    ]

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="09")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet " \
              "--model_path s3://inference-test-aipg/resnet_v1_50 " \
              "--port {}".format(grpc_port)

    container = client.containers.run(
        image=get_image,
        detach=True,
        name='ie-serving-py-test-single-s3-{}'.format(get_tests_suffix()),
        ports={'{}/tcp'.format(grpc_port): grpc_port},
        remove=True,
        environment=envs,
        command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
def start_server_update_flow_latest(request, get_image, get_test_dir,
                                    get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models'
    update_test_dir = path_to_mount + '/update-{}/'.format(get_tests_suffix())
    # ensure model dir is empty before starting OVMS
    shutil.rmtree(update_test_dir, ignore_errors=True)

    volumes_dict = {'{}'.format(path_to_mount): {'bind': '/opt/ml',
                                                 'mode': 'ro'}}

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="03")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet --model_path /opt/ml/update-{} " \
              "--port {} --rest_port {} --grpc_workers 1 --nireq 1".\
              format(get_tests_suffix(), grpc_port, rest_port)

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-update-'
                                      'latest-{}'.
                                      format(get_tests_suffix()),
                                      ports={'{}/tcp'.format(grpc_port):
                                             grpc_port,
                                             '{}/tcp'.format(rest_port):
                                             rest_port},
                                      remove=True, volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
Exemplo n.º 10
0
def start_server_batch_model_auto(request, get_image, get_test_dir,
                                  get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="14")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet --model_path /opt/ml/resnet_V1_50_batch8 " \
              "--port {} --batch_size auto --rest_port {}".\
              format(grpc_port, rest_port)

    container = client.containers.run(
        image=get_image,
        detach=True,
        name='ie-serving-py-test-autobatch-{}'.format(get_tests_suffix()),
        ports={
            '{}/tcp'.format(grpc_port): grpc_port,
            '{}/tcp'.format(rest_port): rest_port
        },
        remove=True,
        volumes=volumes_dict,
        command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
def start_server_multi_model(request, get_image, get_test_dir,
                             get_docker_context):
    shutil.copyfile('tests/functional/config.json',
                    get_test_dir + '/saved_models/config.json')
    AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
    AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
    AWS_REGION = os.getenv('AWS_REGION')

    client = get_docker_context
    envs = ['AWS_ACCESS_KEY_ID=' + AWS_ACCESS_KEY_ID,
            'AWS_SECRET_ACCESS_KEY=' + AWS_SECRET_ACCESS_KEY,
            'AWS_REGION=' + AWS_REGION]
    volumes_dict = {'{}'.format(get_test_dir + '/saved_models/'):
                    {'bind': '/opt/ml', 'mode': 'ro'}}
    command = "/ie-serving-py/start_server.sh ie_serving config " \
              "--config_path /opt/ml/config.json --port 9001 " \
              "--rest_port 5561 --grpc_workers 2 --rest_workers 2"

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-multi',
                                      ports={'9001/tcp': 9001,
                                             '5561/tcp': 5561},
                                      remove=True, volumes=volumes_dict,
                                      environment=envs,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
def start_server_model_ver_policy(request, get_image, get_test_dir,
                                  get_docker_context):
    shutil.copyfile('tests/functional/model_version_policy_config.json',
                    get_test_dir +
                    '/saved_models/model_ver_policy_config.json')

    shutil.copyfile('tests/functional/mapping_config.json',
                    get_test_dir + '/saved_models/model_ver/3/'
                                   'mapping_config.json')

    client = get_docker_context
    volumes_dict = {'{}'.format(get_test_dir + '/saved_models/'):
                    {'bind': '/opt/ml', 'mode': 'ro'}}

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="18")

    command = "/ie-serving-py/start_server.sh ie_serving config " \
              "--config_path /opt/ml/model_ver_policy_config.json " \
              "--port {} --rest_port {}".format(grpc_port, rest_port)

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-policy-{}'.
                                      format(get_tests_suffix()),
                                      ports={'{}/tcp'.format(grpc_port):
                                             grpc_port,
                                             '{}/tcp'.format(rest_port):
                                             rest_port},
                                      remove=True, volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
def start_server_single_model(request, get_image, get_test_dir,
                              get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet --model_path /opt/ml/resnet_V1_50 " \
              "--port 9000 --rest_port 5555 --plugin_config " \
              "\"{\\\"CPU_THROUGHPUT_STREAMS\\\": " \
              "\\\"CPU_THROUGHPUT_AUTO\\\"}\""

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-single',
                                      ports={
                                          '9000/tcp': 9000,
                                          '5555/tcp': 5555
                                      },
                                      remove=True,
                                      volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 14
0
def start_server_batch_model_2out(request, get_image, get_test_dir,
                                  get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name age_gender " \
              "--model_path /opt/ml/age-gender-recognition-retail-0013 " \
              "--port 9006 --rest_port 5560"

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-batch-2out',
                                      ports={
                                          '9006/tcp': 9006,
                                          '5560/tcp': 5560
                                      },
                                      remove=True,
                                      volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 15
0
def start_server_single_model_from_gc(request, get_image, get_test_dir,
                                      get_docker_context):
    GOOGLE_APPLICATION_CREDENTIALS = \
        os.getenv('GOOGLE_APPLICATION_CREDENTIALS')

    client = get_docker_context
    envs = ['GOOGLE_APPLICATION_CREDENTIALS=/etc/gcp.json']
    volumes_dict = {GOOGLE_APPLICATION_CREDENTIALS: {'bind': '/etc/gcp.json',
                                                     'mode': 'ro'}}
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet " \
              "--model_path gs://inference-eu/ml-test " \
              "--port 9000"

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-single-gs',
                                      ports={'9000/tcp': 9000},
                                      remove=True, volumes=volumes_dict,
                                      environment=envs,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 16
0
def start_server_face_detection_model_nonamed_shape(request, get_image,
                                                    get_test_dir,
                                                    get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {'{}'.format(path_to_mount): {'bind': '/opt/ml',
                                                 'mode': 'ro'}}
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name face_detection --model_path " \
              "/opt/ml/face-detection-retail-0004 " \
              "--port 9012 --rest_port 5567 " \
              "--shape \"(1, 3, 600, 600)\" " \
              "--rest_workers 4 --nireq 2"

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-nonamed-shape',
                                      ports={'9012/tcp': 9012,
                                             '5567/tcp': 5567},
                                      remove=True, volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 17
0
def start_server_single_model_from_minio(request, get_docker_network,
                                         get_minio_server_s3, get_image,
                                         get_test_dir, get_docker_context):

    network = get_docker_network

    AWS_ACCESS_KEY_ID = os.getenv('MINIO_ACCESS_KEY')
    AWS_SECRET_ACCESS_KEY = os.getenv('MINIO_SECRET_KEY')
    AWS_REGION = os.getenv('AWS_REGION')

    _, ports = get_minio_server_s3
    grpc_port = ports["grpc_port"]
    minio_endpoint = 'http://minio.locals3-{}.com:{}'.format(
        get_tests_suffix(), grpc_port)

    envs = [
        'MINIO_ACCESS_KEY=' + AWS_ACCESS_KEY_ID,
        'MINIO_SECRET_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_ACCESS_KEY_ID=' + AWS_ACCESS_KEY_ID,
        'AWS_SECRET_ACCESS_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_REGION=' + AWS_REGION, 'S3_ENDPOINT=' + minio_endpoint
    ]

    client = get_docker_context

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="11")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet " \
              "--model_path s3://inference/resnet_v1_50 " \
              "--port {}".format(grpc_port)

    container = client.containers.run(
        image=get_image,
        detach=True,
        name='ie-serving-test-'
        'single-minio-{}'.format(get_tests_suffix()),
        ports={'{}/tcp'.format(grpc_port): grpc_port},
        remove=True,
        environment=envs,
        command=command,
        network=network.name)

    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)

    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
Exemplo n.º 18
0
def start_server_with_mapping(request, get_image, get_test_dir,
                              get_docker_context):
    shutil.copyfile(
        'tests/functional/mapping_config.json', get_test_dir + '/saved_models/'
        'age-gender-recognition-retail-0013/1/'
        'mapping_config.json')
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="06")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name age_gender " \
              "--model_path /opt/ml/age-gender-recognition-retail-0013 " \
              "--port {} --rest_port {}".format(grpc_port, rest_port)

    container = client.containers.run(
        image=get_image,
        detach=True,
        name='ie-serving-py-test-2-out-{}'.format(get_tests_suffix()),
        ports={
            '{}/tcp'.format(grpc_port): grpc_port,
            '{}/tcp'.format(rest_port): rest_port
        },
        remove=True,
        volumes=volumes_dict,
        command=command)

    def delete_mapping_file():
        path = get_test_dir + '/saved_models/' \
                              'age-gender-recognition-retail-0013/1/' \
                              'mapping_config.json'
        if os.path.exists(path):
            os.remove(path)

    request.addfinalizer(delete_mapping_file)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
Exemplo n.º 19
0
def start_server_single_vehicle_attrib_model(
        request, get_image, get_test_dir, vehicle_attributes_model_downloader,
        get_docker_context):

    print("Downloaded model files:", vehicle_attributes_model_downloader)
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="05")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name vehicle-attributes " \
              "--model_path /opt/ml/vehicle-attributes-recognition-barrier-0039 " \
              "--port " + str(grpc_port) + " --rest_port " + str(rest_port) + \
              " --plugin_config " \
              "\"{\\\"CPU_THROUGHPUT_STREAMS\\\": " \
              "\\\"CPU_THROUGHPUT_AUTO\\\"}\""

    container = \
        client.containers.run(
            image=get_image,
            detach=True,
            name='ie-serving-py-test-single-{}'.format(get_tests_suffix()),
            ports={'{}/tcp'.format(grpc_port): grpc_port,
                   '{}/tcp'.format(rest_port): rest_port},
            remove=True,
            volumes=volumes_dict,
            # In this case, slower,
            # non-default serialization method is used
            environment=[
                'SERIALIZATON=_prepare_output_as_AppendArrayToTensorProto'],
            command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
Exemplo n.º 20
0
def start_minio_server(request, get_test_dir, get_docker_network,
                       get_docker_context):
    """sudo docker run -d -p 9099:9000 minio/minio server /data"""
    client = get_docker_context

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="10")

    command = 'server --address ":{}" /data'.format(grpc_port)

    client.images.pull('minio/minio:latest')

    network = get_docker_network

    MINIO_ACCESS_KEY = os.getenv('MINIO_ACCESS_KEY')
    MINIO_SECRET_KEY = os.getenv('MINIO_SECRET_KEY')

    if MINIO_ACCESS_KEY is None or MINIO_SECRET_KEY is None:
        MINIO_ACCESS_KEY = "MINIO_A_KEY"
        MINIO_SECRET_KEY = "MINIO_S_KEY"
        os.environ["MINIO_ACCESS_KEY"] = "MINIO_A_KEY"
        os.environ["MINIO_SECRET_KEY"] = "MINIO_S_KEY"

    envs = [
        'MINIO_ACCESS_KEY=' + MINIO_ACCESS_KEY,
        'MINIO_SECRET_KEY=' + MINIO_SECRET_KEY
    ]

    container = client.containers.run(
        image='minio/minio:latest',
        detach=True,
        name='minio.locals3-{}.com'.format(get_tests_suffix()),
        ports={'{}/tcp'.format(grpc_port): grpc_port},
        remove=True,
        environment=envs,
        command=command,
        network=network.name)

    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container, minio_condition, 30)
    assert running is True, "minio container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
def start_server_single_model_from_minio(request, get_docker_network,
                                         get_minio_server_s3, get_image,
                                         get_test_dir, get_docker_context):

    network = get_docker_network

    AWS_ACCESS_KEY_ID = os.getenv('MINIO_ACCESS_KEY')
    AWS_SECRET_ACCESS_KEY = os.getenv('MINIO_SECRET_KEY')
    AWS_REGION = os.getenv('AWS_REGION')

    envs = [
        'MINIO_ACCESS_KEY' + AWS_ACCESS_KEY_ID,
        'MINIO_SECRET_KEY' + AWS_SECRET_ACCESS_KEY,
        'AWS_ACCESS_KEY_ID=' + AWS_ACCESS_KEY_ID,
        'AWS_SECRET_ACCESS_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_REGION=' + AWS_REGION,
        'S3_ENDPOINT=' + 'http://minio.locals3.com:9000'
    ]

    client = get_docker_context
    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name resnet " \
              "--model_path s3://inference/resnet_v1_50 " \
              "--port 9099"

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-single-minio',
                                      ports={'9099/tcp': 9099},
                                      remove=True,
                                      environment=envs,
                                      command=command,
                                      network=network.name)

    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)

    assert running is True, "docker container was not started successfully"

    return container
Exemplo n.º 22
0
def start_ams_service(request, get_image, get_test_dir, get_docker_context):

    client = get_docker_context
    _, port = get_ports_for_fixture(port_suffix="01")

    command = "/ams_wrapper/start_ams.py --ams_port={}".format(port)

    container = \
        client.containers.run(
            image=get_image,
            detach=True,
            name='ams-service',
            ports={'{}/tcp'.format(port): port},
            remove=True,
            environment=['LOG_LEVEL=DEBUG'],
            command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"
    return container, {"port": port}
Exemplo n.º 23
0
def start_server_face_detection_model_named_shape(request, get_image,
                                                  get_test_dir,
                                                  get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models/'
    volumes_dict = {
        '{}'.format(path_to_mount): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="01")

    command = "/ie-serving-py/start_server.sh ie_serving model " \
              "--model_name face_detection --model_path " \
              "/opt/ml/face-detection-retail-0004 " \
              "--port " + str(grpc_port) + " --rest_port " + str(rest_port) + \
              " --shape \"{\\\"data\\\": \\\"(1, 3, 600, 600)\\\"}\" " \
              "--grpc_workers 4 --rest_workers 2 " \
              "--nireq 2"

    container = client.containers.run(
        image=get_image,
        detach=True,
        name='ie-serving-py-test-named-shape-{}'.format(get_tests_suffix()),
        ports={
            '{}/tcp'.format(grpc_port): grpc_port,
            '{}/tcp'.format(rest_port): rest_port
        },
        remove=True,
        volumes=volumes_dict,
        command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
def start_minio_server(request, get_image, get_test_dir, get_docker_network,
                       get_docker_context):
    """sudo docker run -d -p 9099:9000 minio/minio server /data"""
    client = get_docker_context
    command = "server /data"

    client.images.pull('minio/minio:latest')

    network = get_docker_network

    MINIO_ACCESS_KEY = os.getenv('MINIO_ACCESS_KEY')
    MINIO_SECRET_KEY = os.getenv('MINIO_SECRET_KEY')

    if MINIO_ACCESS_KEY is None or MINIO_SECRET_KEY is None:
        MINIO_ACCESS_KEY = "MINIO_A_KEY"
        MINIO_SECRET_KEY = "MINIO_S_KEY"
        os.environ["MINIO_ACCESS_KEY"] = "MINIO_A_KEY"
        os.environ["MINIO_SECRET_KEY"] = "MINIO_S_KEY"

    envs = [
        'MINIO_ACCESS_KEY=' + MINIO_ACCESS_KEY,
        'MINIO_SECRET_KEY=' + MINIO_SECRET_KEY
    ]

    container = client.containers.run(image='minio/minio:latest',
                                      detach=True,
                                      name='minio.locals3.com',
                                      ports={'9000/tcp': 9000},
                                      remove=True,
                                      environment=envs,
                                      command=command,
                                      network=network.name)

    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container, minio_condition, 30)
    assert running is True, "minio container was not started successfully"

    return container
Exemplo n.º 25
0
def start_server_model_ver_policy(request, get_image, get_test_dir,
                                  get_docker_context):
    shutil.copyfile(
        'tests/functional/model_version_policy_config.json',
        get_test_dir + '/saved_models/model_ver_policy_config.json')

    shutil.copyfile(
        'tests/functional/mapping_config.json',
        get_test_dir + '/saved_models/model_ver/3/'
        'mapping_config.json')

    client = get_docker_context
    volumes_dict = {
        '{}'.format(get_test_dir + '/saved_models/'): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }
    command = "/ie-serving-py/start_server.sh ie_serving config " \
              "--config_path /opt/ml/model_ver_policy_config.json " \
              "--port 9006 --rest_port 5560"

    container = client.containers.run(image=get_image,
                                      detach=True,
                                      name='ie-serving-py-test-policy',
                                      ports={
                                          '9006/tcp': 9006,
                                          '5560/tcp': 5560
                                      },
                                      remove=True,
                                      volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container
def start_server_update_flow_specific(request, get_image, get_test_dir,
                                      get_docker_context):
    client = get_docker_context
    path_to_mount = get_test_dir + '/saved_models'
    update_test_dir = path_to_mount + '/update-{}/'.format(get_tests_suffix())
    # ensure model dir is empty before starting OVMS
    shutil.rmtree(update_test_dir, ignore_errors=True)

    volumes_dict = {'{}'.format(path_to_mount): {'bind': '/opt/ml',
                                                 'mode': 'ro'}}

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="04")

    command = '/ie-serving-py/start_server.sh ie_serving model ' \
              '--model_name resnet --model_path ' \
              '/opt/ml/update-' + get_tests_suffix() +  \
              ' --port ' + str(grpc_port) + ' --model_version_policy' \
              ' \'{"specific": { "versions":[1, 3, 4] }}\' ' \
              '--rest_port ' + str(rest_port)

    container = client.containers.run(image=get_image, detach=True,
                                      name='ie-serving-py-test-'
                                           'update-specific-{}'.
                                      format(get_tests_suffix()),
                                      ports={'{}/tcp'.format(grpc_port):
                                             grpc_port,
                                             '{}/tcp'.format(rest_port):
                                             rest_port},
                                      remove=True, volumes=volumes_dict,
                                      command=command)
    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}
def start_server_multi_model(request, get_docker_network, start_minio_server,
                             get_minio_server_s3, get_image, get_test_dir,
                             get_docker_context):

    shutil.copyfile('tests/functional/config.json',
                    get_test_dir + '/saved_models/config.json')
    AWS_ACCESS_KEY_ID = os.getenv('MINIO_ACCESS_KEY')
    AWS_SECRET_ACCESS_KEY = os.getenv('MINIO_SECRET_KEY')
    AWS_REGION = os.getenv('AWS_REGION')

    client = get_docker_context
    network = get_docker_network

    _, ports = start_minio_server
    grpc_port, rest_port = ports["grpc_port"], ports["rest_port"]
    minio_endpoint = 'http://minio.locals3-{}.com:{}'.format(
        get_tests_suffix(), grpc_port)

    envs = [
        'MINIO_ACCESS_KEY=' + AWS_ACCESS_KEY_ID,
        'MINIO_SECRET_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_ACCESS_KEY_ID=' + AWS_ACCESS_KEY_ID,
        'AWS_SECRET_ACCESS_KEY=' + AWS_SECRET_ACCESS_KEY,
        'AWS_REGION=' + AWS_REGION, 'S3_ENDPOINT=' + minio_endpoint,
        'https_proxy=' + os.getenv('https_proxy', ""),
        'no_proxy={}'.format(minio_endpoint)
    ]

    volumes_dict = {
        '{}'.format(get_test_dir + '/saved_models/'): {
            'bind': '/opt/ml',
            'mode': 'ro'
        }
    }

    grpc_port, rest_port = get_ports_for_fixture(port_suffix="07")

    command = "/ie-serving-py/start_server.sh ie_serving config " \
              "--config_path /opt/ml/config.json --port {} " \
              "--rest_port {} --grpc_workers 2 --rest_workers 2".\
              format(grpc_port, rest_port)

    container = client.containers.run(
        image=get_image,
        detach=True,
        name='ie-serving-py-test-multi-{}'.format(get_tests_suffix()),
        ports={
            '{}/tcp'.format(grpc_port): grpc_port,
            '{}/tcp'.format(rest_port): rest_port
        },
        remove=True,
        volumes=volumes_dict,
        environment=envs,
        command=command,
        network=network.name)

    request.addfinalizer(container.kill)

    running = wait_endpoint_setup(container)
    assert running is True, "docker container was not started successfully"

    return container, {"grpc_port": grpc_port, "rest_port": rest_port}