def test_pickler_keras():
    '''Tests keras dump / load functionality'''
    iris = load_iris()
    X = iris.data
    y_onehot = pd.get_dummies(iris.target).values

    model = Sequential()
    model.add(Dense(3, input_dim=4, activation='relu'))
    model.add(Dense(3, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(X, y_onehot, verbose=0)

    with TemporaryDirectory() as root:

        with AcumosContextManager(root) as context:
            model_path = context.build_path('model.pkl')
            with open(model_path, 'wb') as f:
                dump_model(model, f)

            assert {'keras', 'dill', 'acumos', 'h5py',
                    'tensorflow'} == context.package_names

        with AcumosContextManager(root) as context:
            with open(model_path, 'rb') as f:
                loaded_model = load_model(f)

    assert (model.predict_classes(
        X, verbose=0) == loaded_model.predict_classes(X, verbose=0)).all()
Esempio n. 2
0
def test_user_script():
    '''Tests that user scripts are identified as dependencies'''
    def predict(x: int) -> int:
        return user_function(x)

    model = Model(predict=predict)

    with AcumosContextManager() as context:
        model_path = context.build_path('model.pkl')
        with open(model_path, 'wb') as f:
            dump_model(model, f)

            assert 'user_module' in context.script_names

        # unpickling should fail because `user_module` is not available
        with pytest.raises(Exception, match="No module named 'user_module'"):
            run_command([sys.executable, _UNPICKLER_HELPER, context.abspath])
Esempio n. 3
0
def test_function_import():
    '''Tests that a module used by a function is captured correctly'''
    import numpy as np

    def foo():
        return np.arange(5)

    with AcumosContextManager() as context:
        model_path = context.build_path('model.pkl')
        with open(model_path, 'wb') as f:
            dump_model(foo, f)

        assert {'dill', 'acumos', 'numpy'} == context.package_names

        with open(model_path, 'rb') as f:
            loaded_model = load_model(f)

    assert (loaded_model() == np.arange(5)).all()
Esempio n. 4
0
def _dump_model(model, name, requirements=None):
    '''Generates model artifacts and serializes the model'''
    requirements = Requirements() if requirements is None else requirements

    with TemporaryDirectory() as rootdir:

        model_dir = path_join(rootdir, 'model')
        mkdir(model_dir)

        with AcumosContextManager(model_dir) as context:

            with open(context.build_path('model.pkl'), 'wb') as f:
                dump_model(model, f)

            # generate protobuf definition
            proto_pkg = context.parameters['protobuf_package'] = _random_string()
            protostr = model2proto(model, proto_pkg)
            dump_artifact(rootdir, 'model.proto', data=protostr, module=None, mode='w')

            # generate protobuf source code
            module_name = 'model'
            proto_dir = context.create_subdir('scripts', 'acumos_gen', proto_pkg)
            compile_protostr(protostr, proto_pkg, module_name, proto_dir)

            # generate model metadata
            requirements.reqs.update(context.package_names)
            metadata = create_model_meta(model, name, requirements)
            dump_artifact(rootdir, 'metadata.json', data=metadata, module=json, mode='w')

            # bundle user-provided code
            code_dir = context.create_subdir('scripts', 'user_provided')
            Path(code_dir, '.keep').touch()  # may resolve pruning issues when unzipping

            # copy packages and modules
            pkg_scripts = _gather_package_scripts(requirements.packages)
            _copy_package_scripts(context, pkg_scripts, code_dir)

            scripts = set(_gather_scripts(context, requirements))
            _copy_scripts(scripts, code_dir)

        shutil.make_archive(model_dir, 'zip', model_dir)  # create zip at same level as parent
        shutil.rmtree(model_dir)  # clean up model directory

        yield rootdir
Esempio n. 5
0
def test_nested_model():
    '''Tests nested models'''
    iris = load_iris()
    X = iris.data
    y = iris.target
    y_onehot = pd.get_dummies(iris.target).values

    m1 = Sequential()
    m1.add(Dense(3, input_dim=4, activation='relu'))
    m1.add(Dense(3, activation='softmax'))
    m1.compile(loss='categorical_crossentropy',
               optimizer='adam',
               metrics=['accuracy'])
    m1.fit(X, y_onehot, verbose=0)

    m2 = SVC()
    m2.fit(X, y)

    # lambda watch out
    crazy_good_model = lambda x: m1.predict_classes(x) + m2.predict(x)  # noqa
    out1 = crazy_good_model(X)

    with TemporaryDirectory() as root:

        with AcumosContextManager(root) as context:
            model_path = context.build_path('model.pkl')
            with open(model_path, 'wb') as f:
                dump_model(crazy_good_model, f)

            assert {
                'sklearn', 'keras', 'dill', 'acumos', 'numpy', 'h5py',
                'tensorflow'
            } == context.package_names

        with AcumosContextManager(root) as context:
            with open(model_path, 'rb') as f:
                loaded_model = load_model(f)

    out2 = loaded_model(X)
    assert (out1 == out2).all()
Esempio n. 6
0
def test_keras_contrib():
    '''Tests keras_contrib layer is saved correctly'''
    model = Sequential()
    model.add(Dense(10, input_shape=(10, )))
    model.add(PELU())

    model.compile(loss='mse', optimizer='adam')
    model.fit(x=np.random.random((10, 10)),
              y=np.random.random((10, 10)),
              epochs=1,
              verbose=0)

    with AcumosContextManager() as context:
        model_path = context.build_path('model.pkl')
        with open(model_path, 'wb') as f:
            dump_model(model, f)
            assert {
                'keras', 'dill', 'acumos', 'h5py', 'tensorflow',
                'keras_contrib'
            } == context.package_names

        # verify that the contrib layers don't cause a load error
        run_command([sys.executable, _UNPICKLER_HELPER, context.abspath])
Esempio n. 7
0
def test_pickler_sklearn():
    '''Tests sklearn dump / load functionality'''
    iris = load_iris()
    X = iris.data
    y = iris.target

    model = SVC()
    model.fit(X, y)

    with TemporaryDirectory() as root:

        with AcumosContextManager(root) as context:
            model_path = context.build_path('model.pkl')
            with open(model_path, 'wb') as f:
                dump_model(model, f)

            assert {'sklearn', 'dill', 'acumos',
                    'numpy'} == context.package_names

        with AcumosContextManager(root) as context:
            with open(model_path, 'rb') as f:
                loaded_model = load_model(f)

    assert (model.predict(X) == loaded_model.predict(X)).all()
Esempio n. 8
0
def test_pickler_tensorflow():
    '''Tests tensorflow session and graph serialization'''
    tf.set_random_seed(0)

    iris = load_iris()
    data = iris.data
    target = iris.target
    target_onehot = pd.get_dummies(target).values.astype(float)

    with tf.Graph().as_default():

        # test pickling a session with trained weights

        session = tf.Session()
        x, y, prediction = _build_tf_model(session, data, target_onehot)
        yhat = session.run([prediction], {x: data})[0]

        with TemporaryDirectory() as model_root:
            with AcumosContextManager(model_root) as context:
                model_path = context.build_path('model.pkl')
                with open(model_path, 'wb') as f:
                    dump_model(session, f)

                assert {'acumos', 'dill',
                        'tensorflow'} == context.package_names

            with AcumosContextManager(model_root) as context:
                with open(model_path, 'rb') as f:
                    loaded_session = load_model(f)

            loaded_graph = loaded_session.graph
            loaded_prediction = loaded_graph.get_tensor_by_name(
                prediction.name)
            loaded_x = loaded_graph.get_tensor_by_name(x.name)
            loaded_yhat = loaded_session.run([loaded_prediction],
                                             {loaded_x: data})[0]

            assert loaded_session is not session
            assert loaded_graph is not session.graph
            assert (yhat == loaded_yhat).all()

        # tests pickling a session with a frozen graph

        with TemporaryDirectory() as frozen_root:
            save_path = path_join(frozen_root, 'model')

            with loaded_session.graph.as_default():
                saver = tf.train.Saver()
                saver.save(loaded_session, save_path)

            frozen_path = _freeze_graph(frozen_root, ['prediction'])
            frozen_graph = _unfreeze_graph(frozen_path)
            frozen_session = tf.Session(graph=frozen_graph)

        with TemporaryDirectory() as model_root:
            with AcumosContextManager(model_root) as context:
                model_path = context.build_path('model.pkl')
                with open(model_path, 'wb') as f:
                    dump_model(frozen_session, f)

            with AcumosContextManager(model_root) as context:
                with open(model_path, 'rb') as f:
                    loaded_frozen_session = load_model(f)

            loaded_frozen_graph = loaded_frozen_session.graph
            loaded_frozen_prediction = loaded_frozen_graph.get_tensor_by_name(
                prediction.name)
            loaded_frozen_x = loaded_frozen_graph.get_tensor_by_name(x.name)
            loaded_frozen_yhat = loaded_frozen_session.run(
                [loaded_frozen_prediction], {loaded_frozen_x: data})[0]

            assert loaded_frozen_session is not frozen_session
            assert loaded_frozen_graph is not frozen_session.graph
            assert (yhat == loaded_frozen_yhat).all()