예제 #1
0
    def test_non_ascii_variable_name_pipeline(self):

        data = dedent("""
            pclass,survived,name,sex,age,sibsp,parch,ticket,fare,cabin,embarked,boat,body,home.dest
            1,1,"A",female,29.0,0,0,24160,211.3375,B5,S,2,,"MO"
            1,1,"B",male,0.9167,1,2,113781,151.55,C22 C26,S,11,,"Can"
            1,0,"C",female,2.0,1,2,113781,151.55,C22 C26,S,,,"Can"
            1,0,"D",male,30.0,1,2,113781,151.55,C22 C26,S,,135.0,"Can"
            1,0,"E",female,25.0,1,2,113781,151.55,C22 C26,S,,,"Can"
            1,1,"F",male,48.0,0,0,19952,26.55,E12,S,3,,"NY"
            1,1,"G",female,63.0,1,0,13502,77.9583,D7,S,10,,"NY"
            1,0,"H",male,39.0,0,0,112050,0.0,A36,S,,,"NI"
            1,1,"I",female,53.0,2,0,11769,51.4792,C101,S,D,,"NY"
            1,0,"J",male,71.0,0,0,PC 17609,49.5042,,C,,22.0,"Uruguay"
            1,0,"K",male,47.0,1,0,PC 17757,227.525,C62 C64,C,,124.0,"NY"
            1,1,"L",female,18.0,1,0,PC 17757,227.525,C62 C64,C,4,,"NY"
            1,1,"M",female,24.0,0,0,PC 17477,69.3,B35,C,9,,"F"
            1,1,"N",female,26.0,0,0,19877,78.85,,S,6,,
            1,1,"L",male,80.0,0,0,27042,30.0,A23,S,B,,"Yorks"
            1,0,"O",male,,0,0,PC 17318,25.925,,S,,,"NY"
            1,0,"P",male,24.0,0,1,PC 17558,247.5208,B58 B60,C,,,"PQ"
            1,1,"Q",female,50.0,0,1,PC 17558,247.5208,B58 B60,C,6,,"PQ"
            1,1,"R",female,32.0,0,0,11813,76.2917,D15,C,8,,
            1,0,"S",male,36.0,0,0,13050,75.2417,C6,C,A,,"MN"
        """).strip(" \n")
        data = pd.read_csv(StringIO(data))
        data.rename(columns={"age": "年齢"}, inplace=True)
        X = data.drop('survived', axis=1)
        # y = data['survived']
        cols = ['embarked', 'sex', 'pclass', '年齢', 'fare']
        X = X[cols]
        for cat in ['embarked', 'sex', 'pclass']:
            X[cat].fillna('missing', inplace=True)
        numeric_features = ['年齢', 'fare']
        numeric_transformer = Pipeline(
            steps=[('imputer', SimpleImputer(
                strategy='median')), ('scaler', StandardScaler())])
        categorical_features = ['embarked', 'sex', 'pclass']
        categorical_transformer = Pipeline(
            steps=[('onehot', OneHotEncoder(handle_unknown='ignore'))])
        preprocessor = ColumnTransformer(
            transformers=[('num', numeric_transformer, numeric_features),
                          ('cat', categorical_transformer,
                           categorical_features)])
        preprocessor.fit_transform(X)
        initial_type = [('pclass', Int64TensorType(shape=[None, 1])),
                        ('sex', StringTensorType(shape=[None, 1])),
                        ('年齢', FloatTensorType(shape=[None, 1])),
                        ('fare', FloatTensorType(shape=[None, 1])),
                        ('embarked', StringTensorType(shape=[None, 1]))]

        onnx_object = convert_sklearn(preprocessor,
                                      initial_types=initial_type,
                                      target_opset=TARGET_OPSET)
        sess = InferenceSession(onnx_object.SerializeToString())
        self.assertTrue(sess is not None)
예제 #2
0
def _guess_type_proto_str(data_type, dims):
    # This could be moved to onnxconverter_common.
    if data_type == "tensor(float)":
        return FloatTensorType(dims)
    if data_type == "tensor(double)":
        return DoubleTensorType(dims)
    if data_type == "tensor(string)":
        return StringTensorType(dims)
    if data_type == "tensor(int64)":
        return Int64TensorType(dims)
    if data_type == "tensor(int32)":
        return Int32TensorType(dims)
    if data_type == "tensor(bool)":
        return BooleanTensorType(dims)
    if data_type == "tensor(int8)":
        return Int8TensorType(dims)
    if data_type == "tensor(uint8)":
        return UInt8TensorType(dims)
    if Complex64TensorType is not None:
        if data_type == "tensor(complex64)":
            return Complex64TensorType(dims)
        if data_type == "tensor(complex128)":
            return Complex128TensorType(dims)
    raise NotImplementedError(
        "Unsupported data_type '{}'. You may raise an issue "
        "at https://github.com/onnx/sklearn-onnx/issues."
        "".format(data_type))
예제 #3
0
def _guess_type_proto(data_type, dims):
    # This could be moved to onnxconverter_common.
    for d in dims:
        if d == 0:
            raise RuntimeError("Dimension should not be null: {}.".format(
                list(dims)))
    if data_type == onnx_proto.TensorProto.FLOAT:
        return FloatTensorType(dims)
    if data_type == onnx_proto.TensorProto.DOUBLE:
        return DoubleTensorType(dims)
    if data_type == onnx_proto.TensorProto.STRING:
        return StringTensorType(dims)
    if data_type == onnx_proto.TensorProto.INT64:
        return Int64TensorType(dims)
    if data_type == onnx_proto.TensorProto.INT32:
        return Int32TensorType(dims)
    if data_type == onnx_proto.TensorProto.BOOL:
        return BooleanTensorType(dims)
    if data_type == onnx_proto.TensorProto.INT8:
        return Int8TensorType(dims)
    if data_type == onnx_proto.TensorProto.UINT8:
        return UInt8TensorType(dims)
    if Complex64TensorType is not None:
        if data_type == onnx_proto.TensorProto.COMPLEX64:
            return Complex64TensorType(dims)
        if data_type == onnx_proto.TensorProto.COMPLEX128:
            return Complex128TensorType(dims)
    raise NotImplementedError(
        "Unsupported data_type '{}'. You may raise an issue "
        "at https://github.com/onnx/sklearn-onnx/issues."
        "".format(data_type))
예제 #4
0
def _declare_input_variables(topology, raw_model_container, extra_config):
    # Declare input variables.
    inputs = []
    n_inputs = extra_config[
        constants.N_INPUTS] if constants.N_INPUTS in extra_config else 1
    if constants.INPUT_NAMES in extra_config:
        assert n_inputs == len(extra_config[constants.INPUT_NAMES])
    if constants.TEST_INPUT in extra_config:
        from onnxconverter_common.data_types import (
            FloatTensorType,
            DoubleTensorType,
            Int32TensorType,
            Int64TensorType,
            StringTensorType,
        )

        test_input = extra_config[constants.TEST_INPUT] if n_inputs > 1 else [
            extra_config[constants.TEST_INPUT]
        ]
        for i in range(n_inputs):
            input = test_input[i]
            input_name = (extra_config[constants.INPUT_NAMES][i]
                          if constants.INPUT_NAMES in extra_config else
                          "input_{}".format(i))
            if input.dtype == np.float32:
                input_type = FloatTensorType(input.shape)
            elif input.dtype == np.float64:
                input_type = DoubleTensorType(input.shape)
            elif input.dtype == np.int32:
                input_type = Int32TensorType(input.shape)
            elif input.dtype == np.int64:
                input_type = Int64TensorType(input.shape)
            elif input.dtype.kind in constants.SUPPORTED_STRING_TYPES:
                input_type = StringTensorType(input.shape)
            else:
                raise NotImplementedError(
                    "Type {} not supported. Please fill an issue on https://github.com/microsoft/hummingbird/."
                    .format(input.dtype))
            inputs.append(
                topology.declare_logical_variable(input_name, type=input_type))
    else:
        # We have no information on the input. Sklearn/Spark-ML always gets as input a single dataframe,
        # therefore by default we start with a single `input` variable
        input_name = extra_config[constants.INPUT_NAMES][
            0] if constants.TEST_INPUT in extra_config else "input"
        var = topology.declare_logical_variable(input_name)
        inputs.append(var)

    # The object raw_model_container is a part of the topology we're going to return.
    # We use it to store the inputs of the Sklearn/Spark-ML's computational graph.
    for variable in inputs:
        raw_model_container.add_input(variable)

    return inputs
예제 #5
0
def _guess_numpy_type(data_type, dims):
    # This could be moved to onnxconverter_common.
    if data_type == np.float32:
        return FloatTensorType(dims)
    if data_type == np.float64:
        return DoubleTensorType(dims)
    if data_type in (np.str_, str, object) or str(data_type) in ('<U1', ) or (
            hasattr(data_type, 'type') and data_type.type is np.str_):  # noqa
        return StringTensorType(dims)
    if data_type in (np.int64, ) or str(data_type) == '<U6':
        return Int64TensorType(dims)
    if data_type in (np.int32, ) or str(data_type) in ('<U4', ):  # noqa
        return Int32TensorType(dims)
    if data_type == np.uint8:
        return UInt8TensorType(dims)
    if data_type in (np.bool_, bool):
        return BooleanTensorType(dims)
    if data_type in (np.str_, str):
        return StringTensorType(dims)
    if data_type == np.int8:
        return Int8TensorType(dims)
    if data_type == np.int16:
        return Int16TensorType(dims)
    if data_type == np.uint64:
        return UInt64TensorType(dims)
    if data_type == np.uint32:
        return UInt32TensorType(dims)
    if data_type == np.uint16:
        return UInt16TensorType(dims)
    if data_type == np.float16:
        return Float16TensorType(dims)
    if Complex64TensorType is not None:
        if data_type == np.complex64:
            return Complex64TensorType(dims)
        if data_type == np.complex128:
            return Complex128TensorType(dims)
    raise NotImplementedError(
        "Unsupported data_type %r (type=%r). You may raise an issue "
        "at https://github.com/onnx/sklearn-onnx/issues."
        "" % (data_type, type(data_type)))
예제 #6
0
    def test_onnx_no_test_data_string(self):
        warnings.filterwarnings("ignore")
        model = OneHotEncoder()
        X = np.array([["a", "b", "c"]])
        model.fit(X)

        # Create ONNX-ML model
        onnx_ml_model = convert_sklearn(
            model, initial_types=[("input", StringTensorType([X.shape[0], X.shape[1]]))], target_opset=11
        )

        # Test backends are not case sensitive
        self.assertRaises(RuntimeError, hummingbird.ml.convert, onnx_ml_model, "onnx")
예제 #7
0
def _guess_type_proto(data_type, dims):
    # This could be moved to onnxconverter_common.
    if data_type == onnx_proto.TensorProto.FLOAT:
        return FloatTensorType(dims)
    elif data_type == onnx_proto.TensorProto.DOUBLE:
        return DoubleTensorType(dims)
    elif data_type == onnx_proto.TensorProto.STRING:
        return StringTensorType(dims)
    elif data_type == onnx_proto.TensorProto.INT64:
        return Int64TensorType(dims)
    elif data_type == onnx_proto.TensorProto.INT32:
        return Int32TensorType(dims)
    elif data_type == onnx_proto.TensorProto.BOOL:
        return BooleanTensorType(dims)
    else:
        raise NotImplementedError(
            "Unsupported data_type '{}'. You may raise an issue "
            "at https://github.com/onnx/sklearn-onnx/issues."
            "".format(data_type))
예제 #8
0
def _guess_numpy_type(data_type, dims):
    # This could be moved to onnxconverter_common.
    if data_type == np.float32:
        return FloatTensorType(dims)
    elif data_type in (np.str, str,
                       object) or str(data_type) in ('<U1', ):  # noqa
        return StringTensorType(dims)
    elif data_type in (np.int64, np.uint64) or str(data_type) == '<U6':
        return Int64TensorType(dims)
    elif data_type in (np.int32,
                       np.uint32) or str(data_type) in ('<U4', ):  # noqa
        return Int32TensorType(dims)
    elif data_type == np.bool:
        return BooleanTensorType(dims)
    else:
        raise NotImplementedError(
            "Unsupported data_type '{}'. You may raise an issue "
            "at https://github.com/onnx/sklearn-onnx/issues."
            "".format(data_type))
예제 #9
0
    def from_pb(obj):
        """
        Creates a data type from a protobuf object.
        """
        def get_shape(tt):
            return [
                tt.shape.dim[i].dim_value for i in range(len(tt.shape.dim))
            ]

        if hasattr(obj, 'extend'):
            return [Variable.from_pb(o) for o in obj]
        name = obj.name
        if obj.type.tensor_type:
            tt = obj.type.tensor_type
            elem = tt.elem_type
            shape = get_shape(tt)
            if elem == onnx_proto.TensorProto.FLOAT:
                ty = FloatTensorType(shape)
            elif elem == onnx_proto.TensorProto.BOOL:
                ty = BooleanTensorType(shape)
            elif elem == onnx_proto.TensorProto.DOUBLE:
                ty = DoubleTensorType(shape)
            elif elem == onnx_proto.TensorProto.STRING:
                ty = StringTensorType(shape)
            elif elem == onnx_proto.TensorProto.INT64:
                ty = Int64TensorType(shape)
            elif elem == onnx_proto.TensorProto.INT32:
                ty = Int32TensorType(shape)
            else:
                raise NotImplementedError("Unsupported type '{}' "
                                          "(elem_type={}).".format(
                                              type(obj.type.tensor_type),
                                              elem))
        else:
            raise NotImplementedError("Unsupported type '{}' as "
                                      "a string ({}).".format(type(obj), obj))

        return Variable(name, name, None, ty)
예제 #10
0
from onnxconverter_common.data_types import FloatTensorType, StringTensorType
from onnxmltools.convert import convert_xgboost, convert_lightgbm, \
    convert_tensorflow

env = Env()
TRANS_PATH = env.str('TRANS_PATH', './outputs/trans')
MODEL_PATH_XGB = env.str('MODEL_PATH_XGB', './outputs/xgb')
MODEL_PATH_LGB = env.str('MODEL_PATH_LGB', './outputs/lgb')
MODEL_PATH_DCN = env.str('MODEL_PATH_DCN', './outputs/dcn')
ONNX_TRANS_PATH = env.str('TRANS_PATH', './outputs/trans.onnx')
ONNX_MODEl_PATH_XGB = env.str('ONNX_MODEl_PATH_XGB', './outputs/xgb.onnx')
ONNX_MODEl_PATH_LGB = env.str('ONNX_MODEl_PATH_LGB', './outputs/lgb.onnx')
ONNX_MODEl_PATH_DCN = env.str('ONNX_MODEl_PATH_DCN', './outputs/dcn.onnx')

trans_initial_type = [('num_feat', FloatTensorType([None, 13])),
                      ('cat_feat', StringTensorType([None, 26]))]
model_initial_type = [('num_feat', FloatTensorType([None, 39]))]

print('convert sklearn transformer')
trans = joblib.load(TRANS_PATH)
onx = convert_sklearn(trans, initial_types=trans_initial_type)
onnx.save(onx, ONNX_TRANS_PATH)

print('convert XGBoost model')
model = xgb.XGBClassifier()
model.load_model(MODEL_PATH_XGB)
onx = convert_xgboost(model, initial_types=model_initial_type)
onnx.save(onx, ONNX_MODEl_PATH_XGB)

print('convert LightGBM model')
model = lgb.Booster(model_file=MODEL_PATH_LGB)