def test_script_req():
    '''Tests that Python scripts can be included using Requirements'''
    def predict(x: int) -> int:
        return x

    model = Model(predict=predict)
    model_name = 'my-model'

    # tests that individual script and directory of scripts are both gathered
    reqs = Requirements(scripts=_abspath('./user_module.py', './user_package'))

    with _dump_model(model, model_name, reqs) as dump_dir:
        _verify_files(dump_dir,
                      ('scripts/user_provided/user_package_module.py',
                       'scripts/user_provided/__init__.py',
                       'scripts/user_provided/user_module.py'))

    bad_reqs = Requirements(
        scripts=_abspath('./user_module.py', './user_package', 'not_real.py'))

    with pytest.raises(AcumosError, match='does not exist'):
        with _dump_model(model, model_name, bad_reqs) as dump_dir:
            pass

    bad_reqs = Requirements(
        scripts=_abspath('./user_module.py', './user_package', './att.png'))

    with pytest.raises(AcumosError, match='is invalid'):
        with _dump_model(model, model_name, bad_reqs) as dump_dir:
            pass
def test_dump_model():
    '''Tests dump model utility, including generated artifacts'''
    def predict(x: int) -> int:
        return user_function(x)

    model = Model(predict=predict)
    model_name = 'my-model'

    reqs = Requirements(reqs=['wronglib'],
                        req_map={'wronglib': 'scipy'},
                        packages=[_USER_PACKAGE_DIR])

    with _dump_model(model, model_name, reqs) as dump_dir:

        assert set(listdir(dump_dir)) == set(_REQ_FILES)

        metadata = load_artifact(dump_dir,
                                 'metadata.json',
                                 module=json,
                                 mode='r')
        schema = _load_schema(SCHEMA_VERSION)
        validate(metadata, schema)

        # test that a user-provided library was included and correctly mapped
        assert 'scipy' in {
            r['name']
            for r in metadata['runtime']['dependencies']['pip']['requirements']
        }

        # test that custom package was bundled
        _verify_files(
            dump_dir,
            ('scripts/user_provided/user_package/user_package_module.py',
             'scripts/user_provided/user_package/__init__.py',
             'scripts/user_provided/user_module.py'))
def test_custom_package():
    '''Tests that custom packages can be included, wrapped, and loaded'''
    def my_transform(x: int, y: int) -> int:
        return user_package_module.add_numbers(x, y)

    model = Model(transform=my_transform)
    model_name = 'my-model'

    # load should fail without requirements
    with pytest.raises(
            Exception,
            match='Module user_package was detected as a dependency'):
        with _dump_model(model, model_name) as dump_dir:
            pass

    reqs = Requirements(packages=[_USER_PACKAGE_DIR])

    with _dump_model(model, model_name, reqs) as dump_dir:
        run_command(
            [sys.executable, _MODEL_LOADER_HELPER, dump_dir, 'user_package'])
def test_custom_script():
    '''Tests that custom modules can be included, wrapped, and loaded'''
    def predict(x: int) -> int:
        return user_function(x)

    model = Model(predict=predict)
    model_name = 'my-model'

    with _dump_model(model, model_name) as dump_dir:
        run_command(
            [sys.executable, _MODEL_LOADER_HELPER, dump_dir, 'user_module'])
Example #5
0
def test_raw_type(func, f_in, f_out, in_media_type, out_media_type, in_is_raw,
                  out_is_raw):
    '''Tests to make sure that supported raw data type models are working correctly'''
    model = Model(transform=func)
    model_name = 'my-model'

    with TemporaryDirectory() as tdir:
        with _dump_model(model, model_name) as dump_dir:
            _copy_dir(dump_dir, tdir, model_name)

        copied_dump_dir = path_join(tdir, model_name)
        metadata_file_path = path_join(copied_dump_dir, 'metadata.json')

        with open(metadata_file_path) as metadata_file:
            metadata_json = json.load(metadata_file)

            assert metadata_json['methods']['transform']['input'][
                'media_type'] == in_media_type
            assert metadata_json['methods']['transform']['output'][
                'media_type'] == out_media_type

        wrapped_model = load_model(copied_dump_dir)
        if in_is_raw:
            wrapped_return = wrapped_model.transform.from_raw(f_in)
        else:
            arguments = model.transform.input_type(*f_in)
            arguments_pb_msg = _pack_pb_msg(arguments,
                                            wrapped_model.transform._module)
            wrapped_return = wrapped_model.transform.from_pb_msg(
                arguments_pb_msg)

        if out_is_raw:
            ret = wrapped_return.as_raw()
        else:
            ret_pb_msg = wrapped_return.as_pb_msg()
            ret = _unpack_pb_msg(model.transform.output_type, ret_pb_msg).value

        assert ret == f_out
Example #6
0
def _generic_test(func,
                  in_,
                  out,
                  wrapped_eq=eq,
                  pb_mg_eq=eq,
                  pb_bytes_eq=eq,
                  dict_eq=eq,
                  json_eq=eq,
                  preload=None,
                  reqs=None,
                  skip=None):
    '''Reusable wrap test routine with swappable equality functions'''

    model = Model(transform=func)
    model_name = 'my-model'

    with TemporaryDirectory() as tdir:
        with _dump_model(model, model_name, reqs) as dump_dir:
            _copy_dir(dump_dir, tdir, model_name)

        if preload is not None:
            preload()

        copied_dump_dir = path_join(tdir, model_name)
        wrapped_model = load_model(copied_dump_dir)

        TransIn = model.transform.input_type
        TransOut = model.transform.output_type

        trans_in = TransIn(*in_)
        trans_out = TransOut(*out)

        trans_in_pb = _pack_pb_msg(trans_in, wrapped_model.transform._module)
        trans_out_pb = _pack_pb_msg(trans_out, wrapped_model.transform._module)

        trans_in_pb_bytes = trans_in_pb.SerializeToString()
        trans_out_pb_bytes = trans_out_pb.SerializeToString()

        trans_in_dict = MessageToDict(trans_in_pb)
        trans_out_dict = MessageToDict(trans_out_pb)

        trans_in_json = MessageToJson(trans_in_pb, indent=0)
        trans_out_json = MessageToJson(trans_out_pb, indent=0)

        # test all from / as combinations
        for as_method_name, as_data_expected, eq_func in (
            ('as_wrapped', trans_out, wrapped_eq), ('as_pb_msg', trans_out_pb,
                                                    pb_mg_eq),
            ('as_pb_bytes', trans_out_pb_bytes,
             pb_bytes_eq), ('as_dict', trans_out_dict,
                            dict_eq), ('as_json', trans_out_json, json_eq)):
            for from_method_name, from_data in (('from_wrapped', trans_in),
                                                ('from_pb_msg', trans_in_pb),
                                                ('from_pb_bytes',
                                                 trans_in_pb_bytes),
                                                ('from_dict', trans_in_dict),
                                                ('from_json', trans_in_json)):

                if skip is not None and skip(as_method_name, from_method_name):
                    logger.info("Skipping {} -> {}".format(
                        from_method_name, as_method_name))
                    continue

                from_method = getattr(wrapped_model.transform,
                                      from_method_name)
                resp = from_method(from_data)
                as_data_method = getattr(resp, as_method_name)
                as_data = as_data_method()
                assert eq_func(as_data, as_data_expected)
def keras_evaluate(config):
    taskComplete = False
    useSklearn = True
    listImages = []

    if 'image_list' in config and config['image_list']:
        dfImages = pd.read_csv(config['image_list'], header=None, names=['file'], delimiter=",")
        listImages = dfImages['file'].tolist()
        config['image'] = listImages[0]
    X = create_sample(config['image'])

    if useSklearn:
        # formulate the pipelien to be used
        from image_classifier.keras_model.prediction_formatter import Formatter
        model, reqs = model_create_pipeline(config['model_path'], config['label_path'],
                                            config['num_top_predictions'])

        if 'push_address' in config and 'auth_address' in config and config['push_address']:
            from acumos.session import AcumosSession
            session = AcumosSession(push_api=config['push_address'], auth_api=config['auth_address'])
            print("Pushing new model to upload '{:}', auth '{:}'...".format(config['push_address'], config['auth_address']))
            session.push(model, MODEL_NAME, reqs)  # creates ./my-iris.zip
            taskComplete = True

        if 'dump_model' in config and config['dump_model']:
            from acumos.session import AcumosSession
            from os import makedirs
            if not os.path.exists(config['dump_model']):
                makedirs(config['dump_model'])
            print("Dumping new model to '{:}'...".format(config['dump_model']))
            session = AcumosSession()
            session.dump(model, MODEL_NAME, config['dump_model'], reqs)  # creates ./my-iris.zip
            taskComplete = True

        preds = None
        if not taskComplete:   # means we need to run a prediction/classify
            import tempfile
            from acumos.session import _dump_model, _copy_dir
            from os.path import join as path_join
            from acumos.wrapped import load_model

            if not listImages:
                listImages = [config['image']]
            preds = None

            # temporarily wrap model to a temp directory (to get 'wrapped' functionality)
            with tempfile.TemporaryDirectory() as tdir:   # create temp dir
                with _dump_model(model, MODEL_NAME, reqs) as dump_dir:  # dump model to temp dir
                    _copy_dir(dump_dir, tdir, MODEL_NAME)   # relocate for load_model below

                model_dir = path_join(tdir, MODEL_NAME)
                wrapped_model = load_model(model_dir)  # load to wrapped model
                type_in = wrapped_model.classify._input_type

                for idx in range(len(listImages)):
                    curImage = listImages[idx]
                    print("Attempting classification of image [{:}]: {:}...".format(idx, curImage))

                    X = create_sample(curImage)
                    classify_in = type_in(*tuple(col for col in X.values.T))
                    pred_raw = wrapped_model.classify.from_wrapped(classify_in).as_wrapped()
                    # already a wrapped response
                    predNew = pd.DataFrame(np.column_stack(pred_raw), columns=pred_raw._fields)
                    predNew[Formatter.COL_NAME_IDX] = idx
                    if preds is None:
                        preds = predNew
                    else:
                        preds = preds.append(predNew, ignore_index=True)
                preds.reset_index(drop=True, inplace=True)

    """
    Disable non-sklearn path for now
    else:
        from image_classifier.keras import inception_v4
        from image_classifier.keras.image_decoder import ImageDecoder

        # Load test image!
        img = ImageDecoder.get_processed_image_keras_file(config['image'])  # load image through keras
        # img = evaluate_image.get_processed_image_cv(config['image'])

        # Run prediction on test image
        model, model_path = inception_v4.create_model(weights='imagenet', include_top=True, model_path=model_path)
        preds = model.predict(img)
    """

    return preds