def consume_file_path( number: int, number_1a_path: str, number_1b_file: str, number_1c_file_path: str, number_1d_path_file: str, number_2a_path: InputPath(str), number_2b_file: InputPath(str), number_2c_file_path: InputPath(str), number_2d_path_file: InputPath(str), number_3a_path: InputTextFile(str), number_3b_file: InputTextFile(str), number_3c_file_path: InputTextFile(str), number_3d_path_file: InputTextFile(str), number_4a_path: InputBinaryFile(str), number_4b_file: InputBinaryFile(str), number_4c_file_path: InputBinaryFile(str), number_4d_path_file: InputBinaryFile(str), output_number_2a_path: OutputPath(str), output_number_2b_file: OutputPath(str), output_number_2c_file_path: OutputPath(str), output_number_2d_path_file: OutputPath(str), output_number_3a_path: OutputTextFile(str), output_number_3b_file: OutputTextFile(str), output_number_3c_file_path: OutputTextFile(str), output_number_3d_path_file: OutputTextFile(str), output_number_4a_path: OutputBinaryFile(str), output_number_4b_file: OutputBinaryFile(str), output_number_4c_file_path: OutputBinaryFile(str), output_number_4d_path_file: OutputBinaryFile(str), ): pass
def train_task(data: InputBinaryFile(str), epochs: int, batch_size: int, model_path: OutputBinaryFile(str)): """Train CNN model on MNIST dataset.""" from tensorflow.python import keras from tensorflow.python.keras import Sequential, backend as K from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense import numpy as np mnistdata = np.load(data) train_x = mnistdata['train_x'] train_y = mnistdata['train_y'] test_x = mnistdata['test_x'] test_y = mnistdata['test_y'] num_classes = 10 img_w = 28 img_h = 28 if K.image_data_format() == 'channels_first': train_x.shape = (-1, 1, img_h, img_w) test_x.shape = (-1, 1, img_h, img_w) input_shape = (1, img_h, img_w) else: train_x.shape = (-1, img_h, img_w, 1) test_x.shape = (-1, img_h, img_w, 1) input_shape = (img_h, img_w, 1) model = Sequential([ Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), Conv2D(64, (3, 3), activation='relu'), MaxPooling2D(pool_size=(2, 2)), Dropout(0.25), Flatten(), Dense(128, activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax'), ]) model.compile( loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'], ) model.fit( train_x, train_y, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(test_x, test_y), ) score = model.evaluate(test_x, test_y) print('Test loss & accuracy: %s' % (score, )) model.save(model_path)
def consume_file_path(number_file: InputBinaryFile(int)) -> int: bytes_data = number_file.read() assert isinstance(bytes_data, bytes) return int(bytes_data)
def test_task( model_file: InputBinaryFile(str), examples_file: InputBinaryFile(str), confusion_matrix: OutputTextFile(str), results: OutputTextFile(str), ): """Connects to served model and tests example MNIST images.""" import time import json import numpy as np import requests from tensorflow.python.keras.backend import get_session from tensorflow.python.keras.saving import load_model from tensorflow.python.saved_model.simple_save import simple_save with get_session() as sess: model = load_model(model_file) simple_save( sess, '/output/mnist/1/', inputs={'input_image': model.input}, outputs={t.name: t for t in model.outputs}, ) model_url = 'http://localhost:9001/v1/models/mnist' for _ in range(60): try: requests.get(f'{model_url}/versions/1').raise_for_status() break except requests.RequestException: time.sleep(5) else: raise Exception("Waited too long for sidecar to come up!") response = requests.get(f'{model_url}/metadata') response.raise_for_status() assert response.json() == { 'model_spec': { 'name': 'mnist', 'signature_name': '', 'version': '1' }, 'metadata': { 'signature_def': { 'signature_def': { 'serving_default': { 'inputs': { 'input_image': { 'dtype': 'DT_FLOAT', 'tensor_shape': { 'dim': [ { 'size': '-1', 'name': '' }, { 'size': '28', 'name': '' }, { 'size': '28', 'name': '' }, { 'size': '1', 'name': '' }, ], 'unknown_rank': False, }, 'name': 'conv2d_input:0', } }, 'outputs': { 'dense_1/Softmax:0': { 'dtype': 'DT_FLOAT', 'tensor_shape': { 'dim': [{ 'size': '-1', 'name': '' }, { 'size': '10', 'name': '' }], 'unknown_rank': False, }, 'name': 'dense_1/Softmax:0', } }, 'method_name': 'tensorflow/serving/predict', } } } }, } examples = np.load(examples_file) assert examples['val_x'].shape == (100, 28, 28, 1) assert examples['val_y'].shape == (100, 10) response = requests.post(f'{model_url}:predict', json={'instances': examples['val_x'].tolist()}) response.raise_for_status() predicted = np.argmax(response.json()['predictions'], axis=1).tolist() actual = np.argmax(examples['val_y'], axis=1).tolist() zipped = list(zip(predicted, actual)) accuracy = sum(1 for (p, a) in zipped if p == a) / len(predicted) print(f"Accuracy: {accuracy:0.2f}")