コード例 #1
0
def main():
    sys.setrecursionlimit(10000)

    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out',
                        '-o',
                        default='output_tensorflow',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback",
                        help="backend")
    args = parser.parse_args()

    os.makedirs(args.out, exist_ok=True)
    slim_dir = os.path.join(args.out, "models/slim")
    if not os.path.exists(slim_dir):
        clone_slim(args.out)

    model_path = download_model(args.out)

    sys.path.append(slim_dir)
    from nets import resnet_v1
    image_size = resnet_v1.resnet_v1.default_image_size

    checkpoints_dir = args.out
    sess = tf.Session()
    processed_images = tf.placeholder(tf.float32,
                                      [1, image_size, image_size, 3])

    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        logits, _ = resnet_v1.resnet_v1_50(processed_images,
                                           num_classes=1000,
                                           is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(model_path,
                                             slim.get_model_variables())

    init_fn(sess)

    graph = TensorFlowConverter(sess, batch_size=1).convert([processed_images],
                                                            [probabilities])

    from webdnn.graph import traverse
    traverse.dump(graph)

    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(
            backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
コード例 #2
0
def generate_graph(model_type, output_dir):
    session_path = os.path.join(output_dir, f"session/session_{model_type}")
    sample_path = os.path.join(output_dir, "test_samples.json")
    data_path = os.path.join(output_dir, "data")

    x, y, t, loss, accuracy, optimizer = setup_model(model_type)

    sess = tf.Session()

    if os.path.exists(session_path + ".index"):
        # -------------------------------------------------------------------------------
        # Load pretrained model

        saver = tf.train.Saver()
        saver.restore(sess, session_path)

    else:
        # -------------------------------------------------------------------------------
        # Train model

        mnist = input_data.read_data_sets(data_path, one_hot=True)

        with sess.as_default():
            tf.global_variables_initializer().run()

            for step in range(1000):
                batch_xs, batch_ys = mnist.train.next_batch(100)
                _, loss_val = sess.run([optimizer, loss],
                                       feed_dict={
                                           x: batch_xs,
                                           t: batch_ys
                                       })

                if step % 100 == 0:
                    print(f"Step {step}: loss = {loss_val}")

            print(
                f"accuracy: {sess.run(accuracy, feed_dict={x: mnist.test.images, t: mnist.test.labels})}"
            )

            saver = tf.train.Saver()
            saver.save(sess, session_path)

        with open(sample_path, "w") as f:
            json.dump([{
                "x": mnist.test.images[i].flatten().tolist(),
                "y": int(mnist.test.labels[i].argmax())
            } for i in range(10)], f)

    # -------------------------------------------------------------------------------
    # Convert

    graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y])
    return sess, x, y, graph
コード例 #3
0
ファイル: converter.py プロジェクト: zhangaz1/webdnn
    def convert(self, model: "keras.models.Model") -> Graph:
        """convert(model, input_orders=None)

        Convert kerasmodel into WebDNN IR Graph. First, WebDNN try to convert backend TensorFlow graph by TensorFlowConverter.
        If TensorFlowConverter failed to convert, then KerasConverter converts model by itself

        Args:
            model (`keras.models.Model`): keras model

        .. admonition:: example

            Convert pre-trained keras ResNet model.

            .. code::

                import keras
                from webdnn.frontend.keras import KerasConverter

                model = keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet')
                graph = KerasConverter(batch_size=1).convert(model)

        Returns:
            (:class:`~webdnn.graph.graph.Graph`): WebDNN IR Graph
        """
        if not self._use_tensorflow_converter:
            return self._convert_fallback(model)

        else:
            # noinspection PyBroadException
            try:
                return TensorFlowConverter(
                    session=K.get_session(),
                    batch_size=self._batch_size).convert(
                        model.inputs, model.outputs)

            except Exception:
                self._use_tensorflow_converter = False
                console.debug(traceback.format_exc())
                console.debug(
                    "[KerasConverter] TensorflowConverter failed to convert.")

        return self._convert_fallback(model)
コード例 #4
0
ファイル: convert_webdnn.py プロジェクト: rexl2018/AZ.js
    version, blocks, filters, weights = read_net(sys.argv[1])

    if data_format == 'NHWC':
        planes = tf.placeholder(tf.float32, [None, BOARD_SIZE, BOARD_SIZE, FEATURES])
        probs = tf.placeholder(tf.float32, [None, BOARD_SIZE * BOARD_SIZE + 1])
        winner = tf.placeholder(tf.float32, [None, 1])
    else:
        planes = tf.placeholder(tf.float32, [None, FEATURES, BOARD_SIZE, BOARD_SIZE])
        probs = tf.placeholder(tf.float32, [None, BOARD_SIZE * BOARD_SIZE + 1])
        winner = tf.placeholder(tf.float32, [None, 1])

    tfprocess = TFProcess()
    tfprocess.INPUT_DIM = 2
    tfprocess.DATA_FORMAT = data_format
    tfprocess.BOARD_SIZE = BOARD_SIZE
    tfprocess.FEATURES = FEATURES
    tfprocess.RESIDUAL_FILTERS = filters
    tfprocess.RESIDUAL_BLOCKS = blocks
    training = tfprocess.training
    tfprocess.training = False # batch normalizationをコンバートするため
    tfprocess.init_net(planes, probs, winner)
    tfprocess.replace_weights(weights)
    graph = TensorFlowConverter(tfprocess.session).convert([planes], [tfprocess.y_conv, tfprocess.z_conv])
    print("generating webgpu...")
    exec_info = generate_descriptor("webgpu", graph)
    exec_info.save("./ELF_OpenGo")
    print("done")
    print("generating webgl...")
    exec_info = generate_descriptor("webgl", graph)
    exec_info.save("./ELF_OpenGo")
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="fc", choices=["fc", "conv"])
    parser.add_argument("--out", default="output_tensorflow")
    parser.add_argument("--backends",
                        action="append",
                        default=["webgpu", "webgl", "webassembly", "fallback"])
    args = parser.parse_args()

    session_path = os.path.join(args.out, "session")
    sample_path = os.path.join(args.out, "test_samples.json")
    data_path = os.path.join(args.out, "data")

    x, y, t, loss, accuracy, optimizer = setup_model(args.model)

    sess = tf.Session()

    if os.path.exists(session_path):
        # -------------------------------------------------------------------------------
        # Load pretrained model

        saver = tf.train.Saver()
        saver.restore(sess, os.path.join(session_path,
                                         f"session_{args.model}"))

    else:
        # -------------------------------------------------------------------------------
        # Train model

        mnist = input_data.read_data_sets(data_path, one_hot=True)

        with sess.as_default():
            tf.global_variables_initializer().run()

            for step in range(1000):
                batch_xs, batch_ys = mnist.train.next_batch(100)
                _, loss_val = sess.run([optimizer, loss],
                                       feed_dict={
                                           x: batch_xs,
                                           t: batch_ys
                                       })

                if step % 100 == 0:
                    print(f"Step {step}: loss = {loss_val}")

            print(
                f"accuracy: {sess.run(accuracy, feed_dict={x: mnist.test.images, t: mnist.test.labels})}"
            )

            saver = tf.train.Saver()
            saver.save(sess, os.path.join(session_path,
                                          f"session_{args.model}"))

        with open(sample_path, "w") as f:
            json.dump([{
                "x": mnist.test.images[i].flatten().tolist(),
                "y": int(mnist.test.labels[i].argmax())
            } for i in range(10)], f)

    # -------------------------------------------------------------------------------
    # Convert

    webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y])

    # # When you try to convert more complex model, maybe WebDNN failed to infer the data order.
    # # In this case, you can give "data-order hints" to WebDNN graph converter.
    #
    # webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y], order_hints={
    #     x: OrderNC,
    #     W: OrderCN,
    #     b: OrderC,
    #     y: OrderNC
    # })

    for backend in args.backends:
        desc = generate_descriptor(backend, webdnn_graph)
        desc.save(args.out)
コード例 #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--out", default="output_tensorflow")
    parser.add_argument("--backends",
                        action="append",
                        default=["webgpu", "webassembly", "fallback"])
    args = parser.parse_args()

    session_path = os.path.join(args.out, "session")
    sample_path = os.path.join(args.out, "test_samples.json")
    data_path = os.path.join(args.out, "data")

    x = tf.placeholder(tf.float32, [None, 784])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    t = tf.placeholder(tf.float32, [None, 10])
    loss = tf.reduce_mean(-tf.reduce_sum(t * tf.log(y), reduction_indices=[1]))
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(t, 1)), tf.float32))
    optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)

    sess = tf.Session()

    if os.path.exists(session_path):
        # -------------------------------------------------------------------------------
        # Load pretrained model

        saver = tf.train.Saver()
        saver.restore(sess, os.path.join(session_path, "session"))

    else:
        # -------------------------------------------------------------------------------
        # Train model

        mnist = input_data.read_data_sets(data_path, one_hot=True)

        tf.global_variables_initializer().run()

        for step in range(1000):
            batch_xs, batch_ys = mnist.train.next_batch(100)
            _, loss_val = sess.run([optimizer, loss],
                                   feed_dict={
                                       x: batch_xs,
                                       t: batch_ys
                                   })

            if step % 100 == 0:
                print(f"Step {step}: loss = {loss_val}")

        print(
            f"accuracy: {sess.run(accuracy, feed_dict={x: mnist.test.images, t: mnist.test.labels})}"
        )

        saver = tf.train.Saver()
        saver.save(sess, os.path.join(session_path, "session"))

        with open(sample_path, "w") as f:
            json.dump([{
                "x": mnist.test.images[i].flatten().tolist(),
                "y": int(mnist.test.labels[i].argmax())
            } for i in range(10)], f)

    # -------------------------------------------------------------------------------
    # Convert

    webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y])

    # # When you try to convert more complex model, maybe WebDNN failed to infer the data order.
    # # In this case, you can give "data-order hints" to WebDNN graph converter.
    #
    # webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y], order_hints={
    #     x: OrderNC,
    #     W: OrderCN,
    #     b: OrderC,
    #     y: OrderNC
    # })

    for backend in args.backends:
        desc = generate_descriptor(backend, webdnn_graph)
        desc.save(args.out)