def main():
    if os.path.exists(model_path):
        model = keras.models.load_model(model_path)

    else:
        model = train_model()

    graph = KerasConverter(batch_size=1).convert(model)
    generate_descriptor('webgl', graph).save('./output')
Ejemplo n.º 2
0
def generate_graph(model_type, output_dir):
    model_path = os.path.join(output_dir, f"./keras_model/{model_type}.h5")
    sample_path = os.path.join(output_dir, "test_samples.json")

    if not os.path.exists(model_path):
        _train_and_save(model_type, model_path, sample_path)

    model = keras.models.load_model(model_path, compile=False)
    graph = KerasConverter(batch_size=1).convert(model)
    return model, graph
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out', '-o', default='output_keras',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback", help="backend")
    args = parser.parse_args()

    model = resnet50.ResNet50(include_top=True, weights='imagenet')

    sys.setrecursionlimit(10000)
    graph = KerasConverter(batch_size=1).convert(model)
    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
Ejemplo n.º 4
0
def generate_graph():
    model = SqueezeNet()
    graph = KerasConverter(batch_size=1).convert(model)

    return model, graph
Ejemplo n.º 5
0
model = keras.models.load_model(
    '/home/hlin/Ubuntu16VM_shared_folder/projects/colorization_main_net.h5')

models = [model]
for modelf in models:
    print('>>>>> {}'.format(modelf))
    # model = modelf(include_top=True, weights=None)

    # data_l = Input(shape=(256,256,1))
    # data_ab_mask = Input(shape=(256,256,3))
    # model = modelf(inputs = [data_l, data_ab_mask])
    model = modelf

    # model.save("resnet50.h5")
    # model.summary()

    try:
        graph = KerasConverter(batch_size=1).convert(model)
    except Exception as e:
        print('*********Failed on {}'.format(modelf))
        print(e)
    print('--------------------------------')

exit(1)
exec_info = generate_descriptor(
    "webgl", graph)  # also "webassembly", "webgl", "fallback" are available.

output_dir = "./keras_resnet50_output"
print("Saving to {}".format(output_dir))
exec_info.save(output_dir)
Ejemplo n.º 6
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph
    parser = argparse.ArgumentParser()
    parser.add_argument("kerasmodel")
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback",
                        help="comma-separated list of backends")
    parser.add_argument(
        "--input_shape",
        required=True,
        action="append",
        help=
        "shape of blobs for inputs (example: '(1,3,224,224)'), can be specified multiple times"
    )
    # parser.add_argument("--input_data_format", choices=["channels_first", "channels_last"])
    parser.add_argument(
        "--out",
        help="output directory (default: <model>/webdnn_graph_descriptor)")
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--visualize_ir", action="store_true")
    parser.add_argument(
        "--plugin",
        action="append",
        help="plugin python files which are imported before transpiling")
    args = parser.parse_args()

    console.stderr(f"[{path.basename(__file__)}] Generating feedforward graph")
    class_list = []
    if args.plugin:
        for plugin_path in args.plugin:
            class_list += _load_plugin(plugin_path)
    custom_objects = {}
    if len(class_list) > 0:
        # custom_objects is a dictionary for load_model to load user-defined custom layers
        for k, v in class_list:
            custom_objects[k] = v

    input_shapes = [
        Shape.parse(input_shape)[0] for input_shape in args.input_shape
    ]

    model = keras.models.load_model(args.kerasmodel,
                                    custom_objects=custom_objects,
                                    compile=False)
    model.build(input_shape=None)
    converter = KerasConverter(batch_size=Placeholder(label='N'))
    graph = converter.convert(model)
    traverse.dump(graph)

    for graph_input, input_shape in zip(graph.inputs, input_shapes):
        for p1, p2 in zip(graph_input.shape, input_shape):
            if not Placeholder.check_resolved(
                    p1) and Placeholder.check_resolved(p2):
                p1.value = Placeholder.force_int(p2)

            elif Placeholder.check_resolved(
                    p1) and not Placeholder.check_resolved(p2):
                raise ValueError(
                    f'Shape mismatch: expected:{input_shape}, real:{graph_input.shape}, {p1} != {p2}'
                )

            elif Placeholder.check_resolved(p1) and Placeholder.check_resolved(
                    p2):
                assert p1 == p2, f'Shape mismatch: expected:{input_shape}, real:{graph_input.shape}, {p1} != {p2}'

    if args.out:
        output_dir = args.out
    else:
        output_dir = path.join(path.dirname(args.kerasmodel),
                               "webdnn_graph_descriptor")
    os.makedirs(output_dir, exist_ok=True)

    if args.visualize_ir:
        ir_dot_path = path.join(output_dir, "ir.dot")
        with open(ir_dot_path, "w") as f:
            f.write(dump_dot(graph))
        console.stderr(
            f"IR graph can be visualized with graphviz command: 'dot {ir_dot_path} -T png -o output.png'"
        )

    console.stderr(f"[{path.basename(__file__)}] Generating graph descriptor")

    any_backend_failed = False
    backends = args.backend.split(",")
    for i, backend in enumerate(backends):
        console.stderr(
            f"[{path.basename(__file__)}] BackendName: {console.colorize(backend, console.Color.Cyan)}"
        )
        try:
            graph_exec_data = generate_descriptor(
                backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(output_dir)
        except Exception as ex:
            if flags.DEBUG:
                raise ex

            any_backend_failed = True
            console.error(
                f"[{path.basename(__file__)}] Failed generating descriptor for {backend} backend"
            )
            console.stderr(traceback.format_exc())
            continue

    if any_backend_failed:
        exit(1)
Ejemplo n.º 7
0
def generate_graph():
    model = resnet50.ResNet50(include_top=True, weights='imagenet')
    graph = KerasConverter(batch_size=1).convert(model)
    return model, graph