コード例 #1
0
import test.test_utils.ec2 as ec2_utils

from test import test_utils
from test.test_utils import CONTAINER_TESTS_PREFIX, get_framework_and_version_from_tag
from test.test_utils.ec2 import get_ec2_instance_type, execute_ec2_inference_test, get_ec2_accelerator_type
from test.dlc_tests.conftest import LOGGER

SQUEEZENET_MODEL = "squeezenet"
BERT_MODEL = "bert_sst"
RESNET_EIA_MODEL = "resnet-152-eia"

MX_EC2_GPU_INSTANCE_TYPE = get_ec2_instance_type(default="g3.8xlarge",
                                                 processor="gpu")
MX_EC2_CPU_INSTANCE_TYPE = get_ec2_instance_type(default="c5.4xlarge",
                                                 processor="cpu")
MX_EC2_EIA_ACCELERATOR_TYPE = get_ec2_accelerator_type(default="eia1.large",
                                                       processor="eia")
MX_EC2_GPU_EIA_INSTANCE_TYPE = get_ec2_instance_type(
    default="g3.8xlarge",
    processor="gpu",
    filter_function=ec2_utils.filter_not_heavy_instance_types,
)
MX_EC2_SINGLE_GPU_INSTANCE_TYPE = get_ec2_instance_type(
    default="p3.2xlarge",
    processor="gpu",
    filter_function=ec2_utils.filter_only_single_gpu,
)
MX_EC2_NEURON_INSTANCE_TYPE = get_ec2_instance_type(default="inf1.xlarge",
                                                    processor="neuron")

MX_TELEMETRY_CMD = os.path.join(CONTAINER_TESTS_PREFIX,
                                "test_mx_dlc_telemetry_test")
コード例 #2
0
import os

import pytest

from test import test_utils
from test.test_utils import CONTAINER_TESTS_PREFIX, get_framework_and_version_from_tag
from test.test_utils.ec2 import get_ec2_instance_type, execute_ec2_inference_test, get_ec2_accelerator_type
from test.dlc_tests.conftest import LOGGER

PT_EC2_GPU_INSTANCE_TYPE = get_ec2_instance_type(default="g3.8xlarge",
                                                 processor="gpu")
PT_EC2_CPU_INSTANCE_TYPE = get_ec2_instance_type(default="c5.9xlarge",
                                                 processor="cpu")
PT_EC2_EIA_ACCELERATOR_TYPE = get_ec2_accelerator_type(default="eia1.large",
                                                       processor="eia")
PT_TELEMETRY_CMD = os.path.join(CONTAINER_TESTS_PREFIX, "pytorch_tests",
                                "test_pt_dlc_telemetry_test")
PT_EC2_NEURON_ACCELERATOR_TYPE = get_ec2_accelerator_type(
    default="inf1.xlarge", processor="neuron")


@pytest.mark.model("resnet")
@pytest.mark.parametrize("ec2_instance_ami", [test_utils.NEURON_AL2_DLAMI],
                         indirect=True)
@pytest.mark.parametrize("ec2_instance_type",
                         PT_EC2_NEURON_ACCELERATOR_TYPE,
                         indirect=True)
def test_ec2_pytorch_inference_gpu(pytorch_inference, ec2_connection, region,
                                   gpu_only):
    ec2_pytorch_inference(pytorch_inference, "neuron", ec2_connection, region)