예제 #1
0
def test_huggingface_bert_batch_inference():
    batch_size = 2
    batch_delay = 10000  # 10 seconds
    params = (('model_name', 'BERTSeqClassification'), (
        'url',
        'https://torchserve.pytorch.org/mar_files/BERTSeqClassification.mar'),
              ('initial_workers', '1'), ('batch_size', str(batch_size)),
              ('max_batch_delay', str(batch_delay)))
    test_utils.start_torchserve(no_config_snapshots=True)
    test_utils.register_model_with_params(params)
    input_text = os.path.join(
        REPO_ROOT,
        'examples/Huggingface_Transformers/Seq_classification_artifacts/sample_text.txt'
    )

    # Make 2 curl requests in parallel with &
    # curl --header \"X-Forwarded-For: 1.2.3.4\" won't work since you can't access local host anymore
    response = os.popen(
        f"curl http://127.0.0.1:8080/predictions/BERTSeqClassification -T {input_text} & curl http://127.0.0.1:8080/predictions/BERTSeqClassification -T {input_text}"
    )
    response = response.read()

    ## Assert that 2 responses are returned from the same batch
    assert response == 'Not AcceptedNot Accepted'
    test_utils.unregister_model('BERTSeqClassification')
예제 #2
0
def test_replace_mar_file_with_dummy():
    """Validates that torchserve will fail to start in the following scenario:
        1) We use a snapshot file to start torchserve. The snapshot contains reference to "A" model file
        2) "A" model file gets corrupted or is replaced by some dummy mar file with same name"""

    snapshot_created_on_management_api_invoke()

    # Start Torchserve using last snapshot state
    snapshot_cfg = glob.glob('logs/config/*snap*.cfg')[0]
    test_utils.start_torchserve(snapshot_file=snapshot_cfg)
    response = requests.get('http://127.0.0.1:8081/models/')
    assert json.loads(
        response.content)['models'][0]['modelName'] == "densenet161"
    test_utils.stop_torchserve()

    # Now replace the registered model mar with dummy file
    replace_mar_file_with_dummy_mar_in_model_store(
        model_store="/workspace/model_store", model_mar="densenet161.mar")
    snapshot_cfg = glob.glob('logs/config/*snap*.cfg')[0]
    test_utils.start_torchserve(snapshot_file=snapshot_cfg)
    try:
        response = requests.get('http://127.0.0.1:8081/models/')
        assert json.loads(
            response.content)['models'][0]['modelName'] == "densenet161"
    except:
        assert True, "Correct Model mar file not found"
    else:
        assert False, "Something is not right!! Successfully started Torchserve with a dummy mar file"
    finally:
        test_utils.delete_all_snapshots()
        test_utils.delete_model_store()
예제 #3
0
def logs_created(no_config_snapshots=False):

    test_utils.start_torchserve(snapshot_file=snapshot_file,
                                no_config_snapshots=no_config_snapshots)
    assert len(glob.glob('logs/access_log.log')) == 1
    assert len(glob.glob('logs/model_log.log')) == 1
    assert len(glob.glob('logs/ts_log.log')) == 1
예제 #4
0
def run_metrics_location_var(custom_path=test_utils.ROOT_DIR, no_config_snapshots=False):
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve(no_config_snapshots=no_config_snapshots)

    if os.access(custom_path, os.W_OK):
        assert len(glob.glob(custom_path + '/ts_metrics.log')) == 1
        assert len(glob.glob(custom_path + '/model_metrics.log')) == 1
예제 #5
0
def test_MMF_activity_recognition_model_register_and_inference_on_valid_model(
):

    test_utils.start_torchserve(snapshot_file=snapshot_file_tf)
    test_utils.register_model(
        'MMF_activity_recognition_v2',
        'https://torchserve.pytorch.org/mar_files/MMF_activity_recognition_v2.mar'
    )
    os.system(
        'wget https://mmfartifacts.s3-us-west-2.amazonaws.com/372CC.mp4 -P ../../examples/MMF-activity-recognition'
    )
    input_json = "../../examples/MMF-activity-recognition/372CC.info.json"
    with open(input_json) as jsonfile:
        info = json.load(jsonfile)

    files = {
        'data': open('../../examples/MMF-activity-recognition/372CC.mp4',
                     'rb'),
        'script': info['script'],
        'labels': info['action_labels']
    }
    response = run_inference_using_url_with_data(
        TF_INFERENCE_API + '/v1/models/MMF_activity_recognition_v2:predict',
        pfiles=files)
    response = response.content.decode("utf-8")
    response = ast.literal_eval(response)
    response = [n.strip() for n in response]
    assert response == [
        'Sitting at a table', 'Someone is sneezing',
        'Watching a laptop or something on a laptop'
    ]
    test_utils.unregister_model("MMF_activity_recognition_v2")
예제 #6
0
def snapshot_created_on_management_api_invoke(model_mar="densenet161.mar"):
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve()
    requests.post(
        'http://127.0.0.1:8081/models?url=https://torchserve.pytorch.org/mar_files/'
        + model_mar)
    time.sleep(10)
    test_utils.stop_torchserve()
예제 #7
0
def test_start_from_default():
    """
    Validates that Default config is used if we dont use a config explicitly.
    """
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve()
    response = requests.get('http://127.0.0.1:8081/models/')
    assert len(json.loads(response.content)['models']) == 0
예제 #8
0
def test_start_from_latest():
    """
    Validates if latest snapshot file is picked if we dont pass snapshot arg explicitly.
    """
    test_utils.start_torchserve()
    response = requests.get('http://127.0.0.1:8081/models/')
    assert json.loads(
        response.content)['models'][0]['modelName'] == "densenet161"
    test_utils.stop_torchserve()
예제 #9
0
def run_log_location_var(custom_path=test_utils.ROOT_DIR, no_config_snapshots=False):
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve(no_config_snapshots=no_config_snapshots)

    # This check warrants that we are not accidentally monitoring a readonly logs/snapshot directory
    if os.access(custom_path, os.W_OK):
        assert len(glob.glob(custom_path + '/access_log.log')) == 1
        assert len(glob.glob(custom_path + '/model_log.log')) == 1
        assert len(glob.glob(custom_path + '/ts_log.log')) == 1
예제 #10
0
def test_snapshot_created_on_start_and_stop():
    """
    Validates that startup .cfg & shutdown.cfg are created upon start & stop.
    """
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve()
    test_utils.stop_torchserve()
    assert len(glob.glob('logs/config/*startup.cfg')) == 1
    assert len(glob.glob('logs/config/*shutdown.cfg')) == 1
예제 #11
0
def test_start_from_snapshot():
    """
    Validates if we can restore state from snapshot.
    """
    snapshot_cfg = glob.glob('logs/config/*snap*.cfg')[0]
    test_utils.start_torchserve(snapshot_file=snapshot_cfg)
    response = requests.get('http://127.0.0.1:8081/models/')
    assert json.loads(
        response.content)['models'][0]['modelName'] == "densenet161"
    test_utils.stop_torchserve()
예제 #12
0
def test_no_config_snapshots_cli_option():
    """
    Validates that --no-config-snapshots works as expected.
    """
    # Required to stop torchserve here so that all config files gets deleted
    test_utils.stop_torchserve()
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve(no_config_snapshots=True)
    test_utils.stop_torchserve()
    assert len(glob.glob('logs/config/*.cfg')) == 0
예제 #13
0
def validate_metrics_created(no_config_snapshots=False):
    test_utils.delete_all_snapshots()
    global NUM_STARTUP_CFG
    # Reset NUM_STARTUP_CFG as we are deleting snapshots in the previous step
    NUM_STARTUP_CFG = 0
    test_utils.start_torchserve(no_config_snapshots=no_config_snapshots)
    if not no_config_snapshots:
        NUM_STARTUP_CFG += 1

    assert len(glob.glob('logs/model_metrics.log')) == 1
    assert len(glob.glob('logs/ts_metrics.log')) == 1
예제 #14
0
def snapshot_created_on_management_api_invoke(model_mar="densenet161.mar"):
    test_utils.delete_all_snapshots()
    test_utils.start_torchserve()
    mar_path = "mar_path_{}".format(model_mar[0:-4])
    if mar_path in test_utils.mar_file_table:
        requests.post('http://127.0.0.1:8081/models?url=' + model_mar)
    else:
        requests.post(
            'http://127.0.0.1:8081/models?url=https://torchserve.pytorch.org/mar_files/'
            + model_mar)
    time.sleep(10)
    test_utils.stop_torchserve()
예제 #15
0
def test_mnist_model_register_and_inference_on_valid_model_explain():
    """
    Validates that snapshot.cfg is created when management apis are invoked.
    """
    test_utils.start_torchserve(no_config_snapshots=True)
    test_utils.register_model('mnist', 'mnist.mar')
    files = {
        'data': (data_file_mnist, open(data_file_mnist, 'rb')),
    }
    response = run_inference_using_url_with_data(
        TF_INFERENCE_API + '/explanations/mnist', files)

    assert np.array(json.loads(response.content)).shape == (1, 28, 28)
    test_utils.unregister_model("mnist")
예제 #16
0
def test_start_from_non_existing_snapshot():
    """
    Validates that Torchserve should fail to start when we pass a non-existent snapshot
     as an input snapshot while starting Torchserve.
    """
    test_utils.stop_torchserve()
    test_utils.start_torchserve(snapshot_file="logs/config/junk-snapshot.cfg")
    try:
        response = requests.get('http://127.0.0.1:8081/models/')
    except:
        assert True, "Failed to start Torchserve using a Non Existing Snapshot"
    else:
        assert False, "Something is not right!! Successfully started Torchserve " \
                      "using Non Existing Snapshot File!!"
예제 #17
0
def test_async_logging():
    """Validates that we can use async_logging flag while starting Torchserve"""
    # Need to stop torchserve as we need to check if log files get generated with 'aysnc_logging' flag
    test_utils.stop_torchserve()
    for f in glob.glob("logs/*.log"):
        os.remove(f)
    # delete_all_snapshots()
    async_config_file = test_utils.ROOT_DIR + 'async-log-config.properties'
    with open(async_config_file, "w+") as f:
        f.write("async_logging=true")
    test_utils.start_torchserve(snapshot_file=async_config_file)
    assert len(glob.glob('logs/access_log.log')) == 1
    assert len(glob.glob('logs/model_log.log')) == 1
    assert len(glob.glob('logs/ts_log.log')) == 1
예제 #18
0
def test_async_logging_non_boolean():
    '''Validates that Torchserve uses default value for async_logging flag
    if we assign a non boolean value to this flag'''
    test_utils.stop_torchserve()
    for f in glob.glob("logs/*.log"):
        os.remove(f)
    # delete_all_snapshots()
    async_config_file = test_utils.ROOT_DIR + 'async-log-config.properties'
    with open(async_config_file, "w+") as f:
        f.write("async_logging=2")
    test_utils.start_torchserve(snapshot_file=async_config_file)
    assert len(glob.glob('logs/access_log.log')) == 1
    assert len(glob.glob('logs/model_log.log')) == 1
    assert len(glob.glob('logs/ts_log.log')) == 1
    test_utils.stop_torchserve()
예제 #19
0
def test_start_from_read_only_snapshot():
    """
    Validates if we can start and restore Torchserve state using a read-only snapshot.
    """
    snapshot_cfg = glob.glob('logs/config/*snap*.cfg')[0]
    file_status = os.stat(snapshot_cfg)
    os.chmod(snapshot_cfg, 0o444)
    test_utils.start_torchserve(snapshot_file=snapshot_cfg)
    os.chmod(snapshot_cfg, (file_status.st_mode & 0o777))
    try:
        response = requests.get('http://127.0.0.1:8081/models/')
    except:
        assert False, "Something is not right!! Failed to start Torchserve using Read Only Snapshot!!"
    else:
        assert True, "Successfully started and restored Torchserve state using a Read Only Snapshot"
예제 #20
0
def test_torchserve_init_with_non_existent_model_store():
    """Validates that Torchserve fails to start if the model store directory is non existent """

    test_utils.start_torchserve(model_store="/invalid_model_store",
                                snapshot_file=None,
                                no_config_snapshots=True)
    try:
        response = requests.get('http://127.0.0.1:8081/models/')
    except:
        assert True, "Failed to start Torchserve using non existent model-store directory"
    else:
        assert False, "Something is not right!! Successfully started Torchserve " \
                      "using non existent directory!!"
    finally:
        test_utils.delete_model_store()
        test_utils.delete_all_snapshots()
예제 #21
0
def test_kfserving_mnist_model_register_and_inference_on_valid_model_explain():
    """
    Validates the kfserving model explanations.
    """
    test_utils.start_torchserve(snapshot_file=snapshot_file_kf)
    test_utils.register_model('mnist', 'mnist.mar')
    with open(input_json_mnist, 'r') as f:
        s = f.read()
        s = s.replace('\'', '\"')
        data = json.loads(s)

    response = run_inference_using_url_with_data_json(
        KF_INFERENCE_API + '/v1/models/mnist:explain', data)

    assert np.array(json.loads(
        response.content)['explanations']).shape == (1, 1, 28, 28)
    test_utils.unregister_model("mnist")
예제 #22
0
def test_kfserving_mnist_model_register_and_inference_on_valid_model():
    """
    Validates that snapshot.cfg is created when management apis are invoked for kfserving.
    """
    test_utils.start_torchserve(snapshot_file=snapshot_file_kf)
    test_utils.register_model('mnist', 'mnist.mar')

    with open(input_json_mnist, 'r') as f:
        s = f.read()
        s = s.replace('\'', '\"')
        data = json.loads(s)

    response = run_inference_using_url_with_data_json(
        KF_INFERENCE_API + '/v1/models/mnist:predict', data)

    assert (json.loads(response.content)['predictions'][0]) == 2
    test_utils.unregister_model("mnist")
예제 #23
0
def test_mnist_model_register_and_inference_on_valid_model():
    """
    Validates that snapshot.cfg is created when management apis are invoked.
    """
    test_utils.start_torchserve(no_config_snapshots=True)
    test_utils.register_model('mnist', 'mnist.mar')

    files = {
        'data': ('../../examples/image_classifier/mnist/test_data/1.png',
                 open('../../examples/image_classifier/mnist/test_data/1.png',
                      'rb')),
    }
    response = run_inference_using_url_with_data(
        TF_INFERENCE_API + '/predictions/mnist', files)

    assert (json.loads(response.content)) == 1
    test_utils.unregister_model("mnist")
예제 #24
0
def test_multiple_model_versions_registration():
    # Download resnet-18 model

    create_resnet_archive("resnet-18", "1.0")
    create_resnet_archive("resnet-18_v2", "2.0")

    test_utils.start_torchserve(no_config_snapshots=True)

    response = requests.get('http://localhost:8081/models/resnet18/all')
    print(response.content)

    test_utils.register_model("resnet18", "resnet-18.mar")
    test_utils.register_model("resnet18", "resnet-18_v2.mar")

    response = requests.get('http://localhost:8081/models/resnet18/all')
    time.sleep(5)
    # Verify that we can use the list models api to get all versions of resnet-18
    assert len(json.loads(response.content)) == 2
예제 #25
0
def test_restart_torchserve_with_last_snapshot_with_model_mar_removed():
    """Validates that torchserve will fail to start in the following scenario:
        1) We use a snapshot file to start torchserve. The snapshot contains reference to "A" model file
        2) The "A" model mar file is accidentally deleted from the model store"""

    # Register model using mgmt api
    snapshot_created_on_management_api_invoke()

    # Now remove the registered model mar file (delete_mar_ fn)
    test_utils.delete_mar_file_from_model_store(
        model_store="/workspace/model_store", model_mar="densenet")

    # Start Torchserve with last generated snapshot file
    snapshot_cfg = glob.glob('logs/config/*snap*.cfg')[0]
    test_utils.start_torchserve(snapshot_file=snapshot_cfg)
    try:
        response = requests.get('http://127.0.0.1:8081/models/')
    except:
        assert True, "Failed to start Torchserve properly as reqd model mar file is missing!!"
    else:
        assert False, "Something is not right!! Successfully started Torchserve without reqd mar file"
    finally:
        test_utils.delete_model_store()
        test_utils.delete_all_snapshots()
예제 #26
0
def test_restart_torchserve_with_one_of_model_mar_removed():
    """Validates that torchserve will fail to start in the following scenario:
        1) We use a snapshot file to start torchserve. The snapshot contains reference to few model files
        2) One of these model mar files are accidentally deleted from the model store"""
    # Register multiple models
    # 1st model
    test_utils.delete_model_store()
    test_utils.start_torchserve()
    requests.post(
        'http://127.0.0.1:8081/models?url=https://torchserve.pytorch.org/mar_files/densenet161.mar'
    )
    time.sleep(15)
    # 2nd model
    requests.post(
        'http://127.0.0.1:8081/models?url=https://torchserve.pytorch.org/mar_files/mnist.mar'
    )
    time.sleep(15)
    test_utils.stop_torchserve()

    # Start Torchserve
    test_utils.start_torchserve()
    response = requests.get('http://127.0.0.1:8081/models/')
    num_of_regd_models = len(json.loads(response.content)['models'])
    test_utils.stop_torchserve()

    # Now remove the registered model mar file (delete_mar_ fn)
    test_utils.delete_mar_file_from_model_store(
        model_store="/workspace/model_store", model_mar="densenet")

    # Start Torchserve with existing snapshot file containing reference to one of the model mar file
    # which is now missing from the model store
    snapshot_cfg = glob.glob('logs/config/*snap*.cfg')[1]
    test_utils.start_torchserve(snapshot_file=snapshot_cfg)
    try:
        response = requests.get('http://127.0.0.1:8081/models/')
    except:
        assert True, "Failed to start Torchserve as one of reqd model mar file is missing"
    else:
        assert False, "Something is not right!! Started Torchserve successfully with a " \
                      "reqd model mar file missing from the model store!!"
    finally:
        test_utils.torchserve_cleanup()
예제 #27
0
def setup_module(module):
    test_utils.torchserve_cleanup()
    test_utils.start_torchserve()
예제 #28
0
def validate_config_file(config_file):
    test_utils.start_torchserve(snapshot_file=config_file)
    response = requests.get('http://localhost:8080/ping')
    assert json.loads(response.content)['status'] == 'Healthy'