示例#1
0
 def __init__(self):
     self.sess = tf.Session()
     self.graph = tf.get_default_graph()
     set_session(self.sess)
     self.trump_model = load_model(
         'my_jass/ModelCreation/models/dave/final_model_82_games_025_mean_03_std_06_without_schieben.h5')
     self.card_model = load_model('my_jass/ModelCreation/models/matt/card/card_model_e120_b256_sgd__66_15.h5')
示例#2
0
    def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
        if not self._is_strategy_supported(strategy_fn):
            return
        strategy = strategy_fn()
        if (isinstance(strategy, mirrored_strategy.MirroredStrategy)
                and not context.executing_eagerly()):
            # TODO(b/121381184): Enable running the test in this case.
            return

        # Create and run model.
        with strategy.scope():
            x = layers.Input(shape=(2, ), batch_size=2, dtype=dtypes.float32)
            y = AddLayer()(x)
            model = models.Model(inputs=x, outputs=y)

            loss_scale = loss_scale_module.DynamicLossScale(
                initial_loss_scale=1., increment_period=2., multiplier=2.)
            opt = gradient_descent.SGD(1.)
            opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
            model.compile(optimizer=opt,
                          loss='mse',
                          run_eagerly=testing_utils.should_run_eagerly(),
                          experimental_run_tf_function=testing_utils.
                          should_run_tf_function())
        # Run for 3 steps (6 examples with a batch size of 2)
        model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
        self.assertEqual(backend.get_value(loss_scale()), 2)
        self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
        (weight, ) = model.trainable_weights
        orig_weight = backend.get_value(weight)

        # Save model weights.
        save_path = os.path.join(self.get_temp_dir(), 'model')
        model.save(save_path, save_format='h5' if h5 else 'tf')

        # Run model again for 1 step (2 examples with a batch size of 2)
        model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
        new_weight = backend.get_value(weight)
        self.assertNotEqual(new_weight, orig_weight)
        self.assertEqual(backend.get_value(loss_scale()), 4)
        self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)

        # Load model weights and ensure loss scale weights are restored.
        model = saving.load_model(save_path,
                                  custom_objects={'AddLayer': AddLayer})
        loss_scale = model.optimizer.loss_scale
        (weight, ) = model.trainable_weights
        loaded_weight = backend.get_value(weight)
        self.assertEqual(loaded_weight, orig_weight)
        # Currently the loss scale isn't always saved when the model is saved with
        # Model.save(). So we assert the loss scale either has the value when it was
        # saved, or the value it was initialized with.
        # TODO(reedwm): Always save/restore the loss scale with Model.save().
        self.assertIn(backend.get_value(loss_scale()), (1, 2))
        self.assertIn(backend.get_value(loss_scale._num_good_steps), (0, 1))
示例#3
0
def test_task(endpoint: str, bucket: str, model_file: str, examples_file: str) -> str:
    """Connects to served model and tests example MNIST images."""

    from minio import Minio
    from pathlib import Path
    from retrying import retry
    from tensorflow.python.keras.backend import get_session
    from tensorflow.python.keras.saving import load_model
    from tensorflow.python.saved_model.simple_save import simple_save
    import numpy as np
    import requests

    mclient = Minio(
        endpoint,
        access_key=Path('/secrets/accesskey').read_text(),
        secret_key=Path('/secrets/secretkey').read_text(),
        secure=False,
    )

    print('Downloading model')

    mclient.fget_object(bucket, model_file, '/models/model.h5')
    mclient.fget_object(bucket, examples_file, '/models/examples.npz')

    print('Downloaded model, converting it to serving format')

    with get_session() as sess:
        model = load_model('/models/model.h5')
        simple_save(
            sess,
            '/output/mnist/1/',
            inputs={'input_image': model.input},
            outputs={t.name: t for t in model.outputs},
        )

    model_url = 'http://localhost:9001/v1/models/mnist'

    @retry(stop_max_delay=30 * 1000)
    def wait_for_model():
        requests.get(f'{model_url}/versions/1').raise_for_status()

    wait_for_model()

    response = requests.get(f'{model_url}/metadata')
    response.raise_for_status()
    assert response.json() == {
        'model_spec': {'name': 'mnist', 'signature_name': '', 'version': '1'},
        'metadata': {
            'signature_def': {
                'signature_def': {
                    'serving_default': {
                        'inputs': {
                            'input_image': {
                                'dtype': 'DT_FLOAT',
                                'tensor_shape': {
                                    'dim': [
                                        {'size': '-1', 'name': ''},
                                        {'size': '28', 'name': ''},
                                        {'size': '28', 'name': ''},
                                        {'size': '1', 'name': ''},
                                    ],
                                    'unknown_rank': False,
                                },
                                'name': 'conv2d_input:0',
                            }
                        },
                        'outputs': {
                            'dense_1/Softmax:0': {
                                'dtype': 'DT_FLOAT',
                                'tensor_shape': {
                                    'dim': [{'size': '-1', 'name': ''}, {'size': '10', 'name': ''}],
                                    'unknown_rank': False,
                                },
                                'name': 'dense_1/Softmax:0',
                            }
                        },
                        'method_name': 'tensorflow/serving/predict',
                    }
                }
            }
        },
    }

    examples = np.load('/models/examples.npz')
    assert examples['X'].shape == (10, 28, 28, 1)
    assert examples['y'].shape == (10, 10)

    response = requests.post(
        f'{model_url}:predict', json={'instances': examples['X'].tolist()}
    )
    response.raise_for_status()

    predicted = np.argmax(response.json()['predictions'], axis=1).tolist()
    actual = np.argmax(examples['y'], axis=1).tolist()
    accuracy = sum(1 for (p, a) in zip(predicted, actual) if p == a) / len(predicted)

    if accuracy >= 0.8:
        print(f'Got accuracy of {accuracy:0.2f} in mnist model')
    else:
        raise Exception(f'Low accuracy in mnist model: {accuracy}')
示例#4
0
def load_most_recent_model(dir):
    models = os.listdir(dir)  #load all models in dir
    models.sort(key=lambda f: int(''.join(filter(str.isdigit, f)))
                )  #sort models by a number attached to their name
    model = load_model(os.path.join(dir, models[-1]))
    return model
示例#5
0

if args.perc_lines > 0.0:

    loader = Loader()
    args.num_batches = int((args.perc_lines * loader.get_num_lines() * 26) /
                           (args.batch_size * args.num_epochs))
    print(
        'To see {} of dataset, {} batches with {} training steps are run for {} epochs.'
        .format(args.perc_lines, args.num_batches, args.batch_size,
                args.num_epochs))

if args.configuration:
    try:
        model = load_model(
            'checkpoints/checkpoint_{}.best_val_acc.hdf5'.format(
                args.configuration), )

        with open('tokenizers/tokenizer_{}.pickle'.format(args.configuration),
                  'rb') as handle:
            tokenizer = pickle.load(handle)

    except OSError:

        sys.stderr.write('Configuration {} not found!'.format(
            args.configuration))
        model = None
        exit(1)

if args.configuration is None or args.force_train:
示例#6
0
def test_task(
        model_file: InputBinaryFile(str),
        examples_file: InputBinaryFile(str),
        confusion_matrix: OutputTextFile(str),
        results: OutputTextFile(str),
):
    """Connects to served model and tests example MNIST images."""

    import time
    import json

    import numpy as np
    import requests
    from tensorflow.python.keras.backend import get_session
    from tensorflow.python.keras.saving import load_model
    from tensorflow.python.saved_model.simple_save import simple_save

    with get_session() as sess:
        model = load_model(model_file)
        simple_save(
            sess,
            '/output/mnist/1/',
            inputs={'input_image': model.input},
            outputs={t.name: t
                     for t in model.outputs},
        )

    model_url = 'http://localhost:9001/v1/models/mnist'

    for _ in range(60):
        try:
            requests.get(f'{model_url}/versions/1').raise_for_status()
            break
        except requests.RequestException:
            time.sleep(5)
    else:
        raise Exception("Waited too long for sidecar to come up!")

    response = requests.get(f'{model_url}/metadata')
    response.raise_for_status()
    assert response.json() == {
        'model_spec': {
            'name': 'mnist',
            'signature_name': '',
            'version': '1'
        },
        'metadata': {
            'signature_def': {
                'signature_def': {
                    'serving_default': {
                        'inputs': {
                            'input_image': {
                                'dtype': 'DT_FLOAT',
                                'tensor_shape': {
                                    'dim': [
                                        {
                                            'size': '-1',
                                            'name': ''
                                        },
                                        {
                                            'size': '28',
                                            'name': ''
                                        },
                                        {
                                            'size': '28',
                                            'name': ''
                                        },
                                        {
                                            'size': '1',
                                            'name': ''
                                        },
                                    ],
                                    'unknown_rank':
                                    False,
                                },
                                'name': 'conv2d_input:0',
                            }
                        },
                        'outputs': {
                            'dense_1/Softmax:0': {
                                'dtype': 'DT_FLOAT',
                                'tensor_shape': {
                                    'dim': [{
                                        'size': '-1',
                                        'name': ''
                                    }, {
                                        'size': '10',
                                        'name': ''
                                    }],
                                    'unknown_rank':
                                    False,
                                },
                                'name': 'dense_1/Softmax:0',
                            }
                        },
                        'method_name': 'tensorflow/serving/predict',
                    }
                }
            }
        },
    }

    examples = np.load(examples_file)
    assert examples['val_x'].shape == (100, 28, 28, 1)
    assert examples['val_y'].shape == (100, 10)

    response = requests.post(f'{model_url}:predict',
                             json={'instances': examples['val_x'].tolist()})
    response.raise_for_status()

    predicted = np.argmax(response.json()['predictions'], axis=1).tolist()
    actual = np.argmax(examples['val_y'], axis=1).tolist()
    zipped = list(zip(predicted, actual))
    accuracy = sum(1 for (p, a) in zipped if p == a) / len(predicted)

    print(f"Accuracy: {accuracy:0.2f}")