def get_shape(inp: dict):
    if 'shape' not in inp:
        raise ValueError("shape key not found")
    shape = inp['shape']
    default_shape = inp.get('default_shape')
    if not isinstance(shape, list):
        raise ValueError("shape should be list, found {}, type {}".format(
            shape, type(shape)))

    list_items = 0
    for s in shape:
        if isinstance(s, list):
            list_items += 1
    is_enum_shapes = list_items == len(shape)
    # All or None shapes element shoud be list
    # Regular shape [1,20,20,3]
    # Enumerated Shapes [[1,20,20,3], [1,40,40,3]]
    user_assert(list_items == 0 or is_enum_shapes,
                "Can not parse shape {}".format(shape))
    if is_enum_shapes:
        enum_shapes = [transform_shape(s) for s in shape]
        print("enum_shapes:", enum_shapes)
        return ct.EnumeratedShapes(shapes=enum_shapes, default=default_shape)
    # Replace string elements such as "1..50" with RangeDim(1, 50)
    tx_shape = transform_shape(shape)
    print("tx_shape:", tx_shape)
    return ct.Shape(shape=tx_shape, default=default_shape)
    def test_image_input_rangedim(self, convert_to):
        example_input = torch.rand(1, 3, 50, 50) * 255
        traced_model = torch.jit.trace(TestConvModule().eval(), example_input)

        input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45),
                                      ct.RangeDim(25, 100, default=45)))
        model = ct.convert(traced_model,
                           inputs=[ct.ImageType(shape=input_shape)],
                           convert_to=convert_to)

        spec = model.get_spec()
        assert spec.description.input[0].type.imageType.width == 45
        assert spec.description.input[0].type.imageType.height == 45
        assert spec.description.input[
            0].type.imageType.imageSizeRange.widthRange.lowerBound == 25
        assert spec.description.input[
            0].type.imageType.imageSizeRange.widthRange.upperBound == 100
        _assert_torch_coreml_output_shapes(model,
                                           spec,
                                           traced_model,
                                           example_input,
                                           is_image_input=True)
    def test_multiarray_input_rangedim(self, convert_to):
        if convert_to == "mlprogram" and ct.utils._macos_version() < (12, 0):
            return

        example_input = torch.rand(1, 3, 50, 50) * 100
        traced_model = torch.jit.trace(TestConvModule().eval(), example_input)

        input_shape = ct.Shape(shape=(1, 3, ct.RangeDim(25, 100, default=45),
                                      ct.RangeDim(25, 100, default=45)))
        model = ct.convert(traced_model,
                           inputs=[ct.TensorType(shape=input_shape)],
                           convert_to=convert_to)

        spec = model.get_spec()
        assert list(spec.description.input[0].type.multiArrayType.shape) == [
            1, 3, 45, 45
        ]
        assert spec.description.input[
            0].type.multiArrayType.shapeRange.sizeRanges[2].lowerBound == 25
        assert spec.description.input[
            0].type.multiArrayType.shapeRange.sizeRanges[2].upperBound == 100
        _assert_torch_coreml_output_shapes(model, spec, traced_model,
                                           example_input)
Ejemplo n.º 4
0
from coremltools.converters.onnx import convert

bfModel = BlazeFace()
bfModel.load_weights("./blazeface.pth")
bfModel.load_anchors("./anchors.npy")

bfs = BlazeFaceScaled(bfModel)
bfs.eval()

traced_model = torch.jit.trace(bfs,
                               torch.rand(1, 3, 128, 128),
                               check_trace=True)
# print(traced_model)
mlmodel = ct.convert(traced_model,
                     inputs=[
                         ct.ImageType(name="image",
                                      shape=ct.Shape(shape=(
                                          1,
                                          3,
                                          128,
                                          128,
                                      )),
                                      bias=[-1, -1, -1],
                                      scale=1 / 127.5)
                     ])
mlmodel.save('../App/BlazeFace CoreML/BlazeFaceScaled.mlmodel')

print(mlmodel)
# Save converted CoreML model

# result = mlmodel.predict({"betas_pose_trans": x, "v_personal": y}, usesCPUOnly=True)
Ejemplo n.º 5
0
# Convert to Core ML
model = ct.convert([tf_model], source='tensorflow')

x = np.random.rand(1, 256, 256, 3)
tf_out = model.predict([x])

# convert functions
predict_model = tf.saved_model.load('./models/magenta_colab/predict_incepv3')
transfer_model = tf.saved_model.load('./models/magenta_colab/transfer_incepv3')
concrete_func = predict_model.signatures[
    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
print(concrete_func.inputs)
concrete_func.inputs[0].set_shape([1, 256, 256, 3])

# Range for the sequence dimension is "arbitary"
input_shape = ct.Shape(shape=(1, ct.RangeDim(), ct.RangeDim(), 3))
model_input = ct.TensorType(shape=input_shape)

# Convert the model
predict_mlmodel = ct.convert(model=[concrete_func],
                             source='tensorflow',
                             inputs=[model_input])

predict_model = tf.saved_model.load('./models/magenta_colab/predict_incepv3')
predict_model.summary()

model = ct.convert(predict_model, source='tensorflow')


#
def convert_coreml_model_dynamic(saved_model_path,
Ejemplo n.º 6
0
# model(data)

# transforms.ToPILImage()(image[0]).show(command='fim')
# to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr',
#                 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ]

# visuals = util.get_subset_dict(
#     model.model.get_current_visuals(), to_visualize)

# for key, value in visuals.items():
#     print(key)
#     transforms.ToPILImage()(value[0]).show(command='fim')
output = model(img, hint)
output = util.lab2rgb(output, opt=opt)
transforms.ToPILImage()(output[0]).show(command='fim')

traced_model = torch.jit.trace(model, (img, hint), check_trace=False)

mlmodel = ct.convert(
    model=traced_model,
    inputs=[
        ct.TensorType(name="image",
                      shape=ct.Shape(shape=(1, 3, ct.RangeDim(1, 4096),
                                            ct.RangeDim(1, 4096)))),
        ct.TensorType(name="hint",
                      shape=ct.Shape(shape=(1, 3, ct.RangeDim(1, 4096),
                                            ct.RangeDim(1, 4096)))),
    ])
mlmodel.save("~/color.mlmodel")