def test_MMF_activity_recognition_model_register_and_inference_on_valid_model( ): test_utils.start_torchserve(snapshot_file=snapshot_file_tf) test_utils.register_model( 'MMF_activity_recognition_v2', 'https://torchserve.pytorch.org/mar_files/MMF_activity_recognition_v2.mar' ) os.system( 'wget https://mmfartifacts.s3-us-west-2.amazonaws.com/372CC.mp4 -P ../../examples/MMF-activity-recognition' ) input_json = "../../examples/MMF-activity-recognition/372CC.info.json" with open(input_json) as jsonfile: info = json.load(jsonfile) files = { 'data': open('../../examples/MMF-activity-recognition/372CC.mp4', 'rb'), 'script': info['script'], 'labels': info['action_labels'] } response = run_inference_using_url_with_data( TF_INFERENCE_API + '/v1/models/MMF_activity_recognition_v2:predict', pfiles=files) response = response.content.decode("utf-8") response = ast.literal_eval(response) response = [n.strip() for n in response] assert response == [ 'Sitting at a table', 'Someone is sneezing', 'Watching a laptop or something on a laptop' ] test_utils.unregister_model("MMF_activity_recognition_v2")
def test_mnist_model_register_and_inference_on_valid_model_explain(): """ Validates that snapshot.cfg is created when management apis are invoked. """ test_utils.start_torchserve(no_config_snapshots=True) test_utils.register_model('mnist', 'mnist.mar') files = { 'data': (data_file_mnist, open(data_file_mnist, 'rb')), } response = run_inference_using_url_with_data( TF_INFERENCE_API + '/explanations/mnist', files) assert np.array(json.loads(response.content)).shape == (1, 28, 28) test_utils.unregister_model("mnist")
def test_kfserving_mnist_model_register_and_inference_on_valid_model_explain(): """ Validates the kfserving model explanations. """ test_utils.start_torchserve(snapshot_file=snapshot_file_kf) test_utils.register_model('mnist', 'mnist.mar') with open(input_json_mnist, 'r') as f: s = f.read() s = s.replace('\'', '\"') data = json.loads(s) response = run_inference_using_url_with_data_json( KF_INFERENCE_API + '/v1/models/mnist:explain', data) assert np.array(json.loads( response.content)['explanations']).shape == (1, 1, 28, 28) test_utils.unregister_model("mnist")
def test_kfserving_mnist_model_register_and_inference_on_valid_model(): """ Validates that snapshot.cfg is created when management apis are invoked for kfserving. """ test_utils.start_torchserve(snapshot_file=snapshot_file_kf) test_utils.register_model('mnist', 'mnist.mar') with open(input_json_mnist, 'r') as f: s = f.read() s = s.replace('\'', '\"') data = json.loads(s) response = run_inference_using_url_with_data_json( KF_INFERENCE_API + '/v1/models/mnist:predict', data) assert (json.loads(response.content)['predictions'][0]) == 2 test_utils.unregister_model("mnist")
def test_duplicate_model_registration_using_http_url_followed_by_local_url(): # Register using http url clean_mar_file("resnet-18.mar") response = test_utils.register_model( "resnet18", "https://torchserve.pytorch.org/mar_files/resnet-18.mar") create_resnet_archive() response = test_utils.register_model("resnet18", "resnet-18.mar") if json.loads(response.content)['code'] == 409 and \ json.loads(response.content)['type'] == "ConflictStatusException": assert True, "Conflict Status Exception, " \ "Duplicate model registration request" response = test_utils.unregister_model("resnet18") time.sleep(10) else: assert False, "Something is not right!! Successfully re-registered existing model "
def test_mnist_model_register_and_inference_on_valid_model(): """ Validates that snapshot.cfg is created when management apis are invoked. """ test_utils.start_torchserve(no_config_snapshots=True) test_utils.register_model('mnist', 'mnist.mar') files = { 'data': ('../../examples/image_classifier/mnist/test_data/1.png', open('../../examples/image_classifier/mnist/test_data/1.png', 'rb')), } response = run_inference_using_url_with_data( TF_INFERENCE_API + '/predictions/mnist', files) assert (json.loads(response.content)) == 1 test_utils.unregister_model("mnist")
def test_multiple_model_versions_registration(): # Download resnet-18 model create_resnet_archive("resnet-18", "1.0") create_resnet_archive("resnet-18_v2", "2.0") test_utils.start_torchserve(no_config_snapshots=True) response = requests.get('http://localhost:8081/models/resnet18/all') print(response.content) test_utils.register_model("resnet18", "resnet-18.mar") test_utils.register_model("resnet18", "resnet-18_v2.mar") response = requests.get('http://localhost:8081/models/resnet18/all') time.sleep(5) # Verify that we can use the list models api to get all versions of resnet-18 assert len(json.loads(response.content)) == 2
def test_duplicate_model_registration_using_local_url_followed_by_http_url(): # Registration through local mar url is already complete in previous test case. # Now try to register same model using http url in this next step response = test_utils.register_model( "resnet18", "https://torchserve.pytorch.org/mar_files/resnet-18.mar") time.sleep(15) if json.loads(response.content)['code'] == 500 and \ json.loads(response.content)['type'] == "InternalServerException": assert True, "Internal Server Exception, " \ "Model file already exists!! Duplicate model registration request" test_utils.unregister_model("resnet18") time.sleep(10) else: assert False, "Something is not right!! Successfully re-registered existing model "