示例#1
0
def test_nested_model():
    model1 = keras.models.Sequential()
    model1.add(
        keras.layers.Dense(8,
                           use_bias=False,
                           activation=None,
                           input_shape=(4, )))

    model2 = keras.models.Sequential()
    model2.add(
        keras.layers.Dense(4,
                           use_bias=False,
                           activation=None,
                           input_shape=(2, )))
    model2.add(model1)
    model2.add(keras.layers.Dense(16, use_bias=False, activation=None))
    model2.build()

    graph = KerasConverter(batch_size=1).convert(model2)

    assert_equal(len(graph.inputs), 1)

    ops = traverse.listup_operators(graph)
    assert_equal(len(ops), 3)
    assert_equal(type(ops[0]), Linear)
    assert_equal(type(ops[1]), Linear)
    assert_equal(type(ops[2]), Linear)

    assert_equal(len(graph.outputs), 1)
示例#2
0
def test_use_same_layer_twice():
    model = keras.models.Sequential()
    model.add(
        keras.layers.Dense(4,
                           use_bias=False,
                           activation=None,
                           input_shape=(2, )))

    layer = keras.layers.Dense(4, use_bias=False, activation=None)

    model.add(layer)
    model.add(layer)

    model.build()

    graph = KerasConverter(batch_size=1).convert(model)

    assert_equal(len(graph.inputs), 1)

    ops = traverse.listup_operators(graph)
    assert_equal(len(ops), 3)
    assert_equal(type(ops[0]), Linear)
    assert_equal(type(ops[1]), Linear)
    assert_equal(type(ops[2]), Linear)
    assert_equal(len(graph.outputs), 1)
示例#3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out',
                        '-o',
                        default='output_keras',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    args = parser.parse_args()

    model = resnet50.ResNet50(include_top=True, weights='imagenet')

    sys.setrecursionlimit(10000)
    graph = KerasConverter(batch_size=1).convert(model)
    for backend in ["webgpu", "webassembly", "fallback"]:
        graph_exec_data = generate_descriptor(
            backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
示例#4
0
def test_residual():
    x = keras.layers.Input(shape=(4, ))
    h1 = keras.layers.Dense(8, use_bias=False, activation=None)(x)
    h21 = keras.layers.Dense(4, use_bias=False, activation=None)(h1)
    h22 = keras.layers.Dense(4, use_bias=False, activation=None)(h1)
    y = keras.layers.add([h21, h22])
    model = keras.models.Model([x], [y])
    model.build(input_shape=(1, 4))

    graph = KerasConverter(batch_size=1).convert(model)

    assert_equal(len(graph.inputs), 1)

    ops = traverse.listup_operators(graph)
    assert_equal(len(ops), 4)
    assert_equal(type(ops[0]), Linear)
    assert_equal(type(ops[1]), Linear)
    assert_equal(type(ops[2]), Linear)
    assert_equal(type(ops[3]), ElementwiseAdd)

    assert_equal(len(graph.outputs), 1)
示例#5
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph
    parser = argparse.ArgumentParser()
    parser.add_argument("kerasmodel")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback",
                        help="comma-separated list of backends")
    parser.add_argument("--input_shape", required=True,
                        help="shape of blobs for inputs (example: '(1,3,224,224)')")
    # parser.add_argument("--input_data_format", choices=["channels_first", "channels_last"])
    parser.add_argument("--out",
                        help="output directory (default: <model>/webdnn_graph_descriptor)")
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--visualize_ir", action="store_true")
    parser.add_argument("--plugin", action="append", help="plugin python files which are imported before transpiling")
    args = parser.parse_args()

    console.stderr(f"[{path.basename(__file__)}] Generating feedforward graph")
    class_list = []
    if args.plugin:
        for plugin_path in args.plugin:
            class_list += _load_plugin(plugin_path)
    if len(class_list) > 0:
        # custom_objects is a dictionary for load_model to load user-defined custom layers
        custom_objects = {}
        for k, v in class_list:
            custom_objects[k] = v

    input_shape, _ = Shape.parse(args.input_shape)
    input_shapes = [input_shape]

    model = keras.models.load_model(args.kerasmodel, custom_objects=custom_objects)
    model.build()
    converter = KerasConverter()
    graph = converter.convert(model)

    for graph_input, input_shape in zip(graph.inputs, input_shapes):
        for p1, p2 in zip(graph_input.shape, input_shape):
            if not Placeholder.check_resolved(p1) and Placeholder.check_resolved(p2):
                p1.value = Placeholder.force_int(p2)

            elif Placeholder.check_resolved(p1) and not Placeholder.check_resolved(p2):
                raise ValueError(f'Shape mismatch: {p1} != {p2}')

            elif Placeholder.check_resolved(p1) and Placeholder.check_resolved(p2):
                assert p1 == p2, f'Shape mismatch: {p1} != {p2}'

    if args.out:
        output_dir = args.out
    else:
        output_dir = path.join(path.dirname(args.kerasmodel), "webdnn_graph_descriptor")
    os.makedirs(output_dir, exist_ok=True)

    if args.visualize_ir:
        ir_dot_path = path.join(output_dir, "ir.dot")
        with open(ir_dot_path, "w") as f:
            f.write(dump_dot(graph))
        console.stderr(f"IR graph can be visualized with graphviz command: 'dot {ir_dot_path} -T png -o output.png'")

    console.stderr(f"[{path.basename(__file__)}] Generating graph descriptor")

    any_backend_failed = False
    backends = args.backend.split(",")
    for i, backend in enumerate(backends):
        console.stderr(f"[{path.basename(__file__)}] Backend: {console.colorize(backend, console.Color.Cyan)}")
        try:
            graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(output_dir)
        except Exception as ex:
            if flags.DEBUG:
                raise ex

            any_backend_failed = True
            console.error(f"[{path.basename(__file__)}] Failed generating descriptor for {backend} backend")
            console.stderr(traceback.format_exc())
            continue

    if any_backend_failed:
        exit(1)
示例#6
0
文件: test.py 项目: zhangaz1/webdnn
import keras
import square

from webdnn.backend.fallback.generator import FallbackDescriptorGenerator
from webdnn.backend.webassembly.generator import WebassemblyDescriptorGenerator
from webdnn.backend.webgpu.generator import WebGPUDescriptorGenerator
from webdnn.frontend.keras.converter import KerasConverter

# ---------------------------------------------------------------------------------------------------
# Define Keras model

x = keras.layers.Input((10, ))
y = square.SquareLayer()(x)
model = keras.models.Model([x], [y])

# ---------------------------------------------------------------------------------------------------
# Convert Keras model into WebDNN graph IR
graph = KerasConverter(batch_size=1).convert(model)

# ---------------------------------------------------------------------------------------------------
# Generate graph descriptors
WebGPUDescriptorGenerator.generate(graph).save("./output")
WebassemblyDescriptorGenerator.generate(graph).save("./output")
FallbackDescriptorGenerator.generate(graph).save("./output")