Exemplo n.º 1
0
def main():
    sys.setrecursionlimit(10000)

    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out',
                        '-o',
                        default='output_tensorflow',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback",
                        help="backend")
    args = parser.parse_args()

    os.makedirs(args.out, exist_ok=True)
    slim_dir = os.path.join(args.out, "models/slim")
    if not os.path.exists(slim_dir):
        clone_slim(args.out)

    model_path = download_model(args.out)

    sys.path.append(slim_dir)
    from nets import resnet_v1
    image_size = resnet_v1.resnet_v1.default_image_size

    checkpoints_dir = args.out
    sess = tf.Session()
    processed_images = tf.placeholder(tf.float32,
                                      [1, image_size, image_size, 3])

    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        logits, _ = resnet_v1.resnet_v1_50(processed_images,
                                           num_classes=1000,
                                           is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(model_path,
                                             slim.get_model_variables())

    init_fn(sess)

    graph = TensorFlowConverter(sess, batch_size=1).convert([processed_images],
                                                            [probabilities])

    from webdnn.graph import traverse
    traverse.dump(graph)

    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(
            backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
Exemplo n.º 2
0
def validate_kernel_source(descriptor: GraphDescriptor):
    # FIXME: WebGPU supports multi shader languages, but this test supposes the language as METAL.

    source = descriptor.concat_kernel_sources()

    if os.name != 'posix':
        # os.name in mac is 'posix', and xcrun command is only in mac
        console.warning(
            "[WebGPUDescriptorGenerator] 'xcrun' command is not found. validation of generated source code in webgpu backend is "
            "skipped.")
        return

    with tmp.TemporaryDirectory() as tmpdir:
        source_path = path.join(tmpdir, "kernel.metal")
        lib_path = path.join(tmpdir, "kernel.air")

        with open(source_path, "w+") as f:
            f.write(source)

        try:
            result = subprocess.run([
                "xcrun", "-sdk", "macosx", "metal", source_path, "-o", lib_path
            ],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)

            if result.returncode == 0:
                if result.stderr == b"":
                    console.debug(
                        "[WebGPUDescriptorGenerator] Generated kernel source is valid."
                    )

                else:
                    console.warning(
                        "[WebGPUDescriptorGenerator] In validating kernel source, warnings are generated."
                    )
                    console.stderr(result.stderr.decode("utf-8"))

            else:
                console.error(
                    "[WebGPUDescriptorGenerator] Generated kernel source is invalid."
                )
                console.stderr(result.stderr.decode("utf-8"))
                exit(result.returncode)

        except FileNotFoundError:
            console.warning(
                "[WebGPUDescriptorGenerator] 'xcrun' command is not found. validation of generated source code in webgpu backend is "
                "skipped.")
            return
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--out', '-o', default='output_keras', help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend", default="webgpu,webgl,webassembly,fallback", help="backend")
    args = parser.parse_args()

    _, graph = generate_graph()

    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out', '-o', default='output_keras',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback", help="backend")
    args = parser.parse_args()

    model = resnet50.ResNet50(include_top=True, weights='imagenet')

    sys.setrecursionlimit(10000)
    graph = KerasConverter(batch_size=1).convert(model)
    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
Exemplo n.º 5
0
def download_model(out_dir):
    import urllib
    model_dir = os.path.join(out_dir, "pretrained_model")
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, PRETRAINED_MODEL_FILENAME)
    if not os.path.exists(model_path):
        console.stderr(f"Downloading ResNet pretrained model...")
        tgz_filename, _ = urllib.request.urlretrieve(PRETRAINED_MODEL_URL)
        console.stderr(f"Extracting ResNet pretrained model (tar.gz)...")
        subprocess.check_call(["tar", "xf", tgz_filename], cwd=model_dir)
        os.remove(tgz_filename)
    else:
        console.stderr(
            f"Using already downloaded pretrained model {model_path}")
    return model_path
Exemplo n.º 6
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph
    parser = argparse.ArgumentParser()
    # default is Caffenet of Caffe example
    parser.add_argument("caffemodel")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback",
                        help="comma-separated list of backends")
    parser.add_argument("--input_name",
                        help="blob name for input (mandatory)")
    parser.add_argument("--input_shape",
                        help="shape of blobs for inputs (example: '(1,3,224,224)')")
    parser.add_argument("--input_npy",
                        help="npy file containing sample inputs")
    parser.add_argument("--output_names", required=True,
                        help="comma-separated blob name for output (mandatory)")
    parser.add_argument("--out",
                        help="output directory (default: <model>/webdnn_graph_descriptor)")
    parser.add_argument("--encoding", help="name of weight encoder")
    args = parser.parse_args()

    # multiple blob input can be easily implemented, but command-line arguments becomes complicated.
    input_blob, input_filled = parse_input_blob(args)
    output_names = args.output_names.split(",")

    console.stderr("[convert_caffe] Loading caffe model... (usually takes several minutes)")
    link = chainer.links.caffe.CaffeFunction(args.caffemodel)

    console.stderr("[convert_caffe] Generating feedforward graph")
    if chainer.__version__ >= "2.":
        chainer.using_config("train", False)
        output_blobs = list(
            link(inputs={args.input_name: input_blob}, outputs=output_names))  # list of Variable
    else:
        output_blobs = list(
            link(inputs={args.input_name: input_blob}, outputs=output_names, train=False))  # list of Variable
    chainer_cg = chainer.computational_graph.build_computational_graph(output_blobs)
    converter = ChainerConverter()
    graph = converter.convert(chainer_cg, [input_blob], output_blobs)  # type: Graph

    if args.out:
        output_dir = args.out
    else:
        output_dir = path.join(path.dirname(args.caffemodel), "webdnn_graph_descriptor")
    os.makedirs(output_dir, exist_ok=True)

    if input_filled:
        # save output of Caffe Network (not required for inference)
        output_arrays = {output_name: output_blob.data for output_name, output_blob in zip(output_names, output_blobs)}
        np.savez(path.join(output_dir, "example_output.npz"), **output_arrays)

    console.stderr("[convert_caffe] Generating descriptors")
    any_backend_failed = False
    for backend in args.backend.split(","):
        try:
            graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(output_dir)
        except Exception as ex:
            any_backend_failed = True
            console.error(f"[convert_caffe] Failed generating descriptor for backend {backend}: {str(ex)}")

    if any_backend_failed:
        sys.exit(1)
Exemplo n.º 7
0
def clone_slim(out_dir):
    console.stderr(f"Git cloning {SLIM_GITHUB} into {out_dir}...")
    subprocess.check_call(["git", "clone", SLIM_GITHUB, "models"], cwd=out_dir)
    subprocess.check_call(["git", "checkout", SLIM_COMMIT],
                          cwd=os.path.join(out_dir, './models'))
Exemplo n.º 8
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph
    parser = argparse.ArgumentParser()
    parser.add_argument("kerasmodel")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback",
                        help="comma-separated list of backends")
    parser.add_argument("--input_shape", required=True,
                        help="shape of blobs for inputs (example: '(1,3,224,224)')")
    # parser.add_argument("--input_data_format", choices=["channels_first", "channels_last"])
    parser.add_argument("--out",
                        help="output directory (default: <model>/webdnn_graph_descriptor)")
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--visualize_ir", action="store_true")
    parser.add_argument("--plugin", action="append", help="plugin python files which are imported before transpiling")
    args = parser.parse_args()

    console.stderr(f"[{path.basename(__file__)}] Generating feedforward graph")
    class_list = []
    if args.plugin:
        for plugin_path in args.plugin:
            class_list += _load_plugin(plugin_path)
    if len(class_list) > 0:
        # custom_objects is a dictionary for load_model to load user-defined custom layers
        custom_objects = {}
        for k, v in class_list:
            custom_objects[k] = v

    input_shape, _ = Shape.parse(args.input_shape)
    input_shapes = [input_shape]

    model = keras.models.load_model(args.kerasmodel, custom_objects=custom_objects)
    model.build()
    converter = KerasConverter()
    graph = converter.convert(model)

    for graph_input, input_shape in zip(graph.inputs, input_shapes):
        for p1, p2 in zip(graph_input.shape, input_shape):
            if not Placeholder.check_resolved(p1) and Placeholder.check_resolved(p2):
                p1.value = Placeholder.force_int(p2)

            elif Placeholder.check_resolved(p1) and not Placeholder.check_resolved(p2):
                raise ValueError(f'Shape mismatch: {p1} != {p2}')

            elif Placeholder.check_resolved(p1) and Placeholder.check_resolved(p2):
                assert p1 == p2, f'Shape mismatch: {p1} != {p2}'

    if args.out:
        output_dir = args.out
    else:
        output_dir = path.join(path.dirname(args.kerasmodel), "webdnn_graph_descriptor")
    os.makedirs(output_dir, exist_ok=True)

    if args.visualize_ir:
        ir_dot_path = path.join(output_dir, "ir.dot")
        with open(ir_dot_path, "w") as f:
            f.write(dump_dot(graph))
        console.stderr(f"IR graph can be visualized with graphviz command: 'dot {ir_dot_path} -T png -o output.png'")

    console.stderr(f"[{path.basename(__file__)}] Generating graph descriptor")

    any_backend_failed = False
    backends = args.backend.split(",")
    for i, backend in enumerate(backends):
        console.stderr(f"[{path.basename(__file__)}] Backend: {console.colorize(backend, console.Color.Cyan)}")
        try:
            graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(output_dir)
        except Exception as ex:
            if flags.DEBUG:
                raise ex

            any_backend_failed = True
            console.error(f"[{path.basename(__file__)}] Failed generating descriptor for {backend} backend")
            console.stderr(traceback.format_exc())
            continue

    if any_backend_failed:
        exit(1)
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph

    parser = argparse.ArgumentParser()
    parser.add_argument("--backend", default="webgpu,webassembly")
    parser.add_argument("--encoding", default="eightbit")
    parser.add_argument('--out',
                        '-o',
                        default='webdnn/image-caption-model',
                        help='Directory to output the graph descriptor')
    parser.add_argument('--sentence',
                        '-s',
                        required=True,
                        type=str,
                        help='sentence dataset file path')
    parser.add_argument('--model',
                        '-m',
                        required=True,
                        type=str,
                        help='input model file path')
    parser.add_argument("--example_image",
                        help="example image for comparing output")
    parser.add_argument("--visualize_ir", action="store_true")

    args = parser.parse_args()

    os.makedirs(args.out, exist_ok=True)
    out_dir_graph1 = os.path.join(args.out, "image-feature")
    out_dir_graph2 = os.path.join(args.out, "caption-generation")

    hidden_num = 512
    with open(args.sentence, 'rb') as f:
        sentence_dataset = pickle.load(f)
    word_ids = sentence_dataset['word_ids']
    word_num = len(word_ids)
    id_to_word = [""] * word_num
    for k, v in word_ids.items():
        id_to_word[v] = k

    with open(os.path.join(args.out, "word_data.json"), "w") as f:
        json.dump(
            {
                "id_to_word": id_to_word,
                "bos_id": word_ids["<S>"],
                "eos_id": word_ids["</S>"],
                "word_num": word_num,
                "hidden_num": hidden_num
            }, f)

    caption_net = ImageCaption(word_num=word_num,
                               feature_num=2048,
                               hidden_num=hidden_num)
    chainer.serializers.load_hdf5(args.model, caption_net)
    graph1 = generate_graph_model1(caption_net)
    graph2 = generate_graph_model2(caption_net, hidden_num=hidden_num)

    if args.example_image:
        example_io = generate_example_io(caption_net, word_ids,
                                         args.example_image)
        with open(os.path.join(args.out, "example_io.json"), "w") as f:
            json.dump(example_io, f)

    if args.visualize_ir:
        ir_dot_path = os.path.join(args.out, "ir.dot")
        with open(ir_dot_path, "w") as f:
            f.write(dump_dot(graph2))
        console.stderr(
            f"IR graph can be visualized with graphviz command: 'dot {ir_dot_path} -T png -o output.png'"
        )

    any_backend_failed = False
    last_backend_exception = None
    for backend in args.backend.split(","):
        try:
            graph_exec_data = generate_descriptor(
                backend, graph1, constant_encoder_name=args.encoding)
            graph_exec_data.save(out_dir_graph1)
            graph_exec_data = generate_descriptor(
                backend, graph2, constant_encoder_name=args.encoding)
            graph_exec_data.save(out_dir_graph2)
        except Exception as ex:
            any_backend_failed = True
            last_backend_exception = ex
            console.error(
                f"Failed generating descriptor for backend {backend}: {str(ex)}\n"
            )

    if any_backend_failed:
        raise last_backend_exception