def main():
    if os.path.exists(model_path):
        model = keras.models.load_model(model_path)

    else:
        model = train_model()

    graph = KerasConverter(batch_size=1).convert(model)
    generate_descriptor('webgl', graph).save('./output')
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument("--model", default="fc", choices=["fc", "conv"])
    parser.add_argument(
        '--out',
        '-o',
        default='output_chainer',
        help='Directory to output the graph descriptor and sample test data')
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback")

    args = parser.parse_args()

    output_dir = os.path.join(args.out, f"./chainer_model")
    os.makedirs(output_dir, exist_ok=True)

    model, test, graph = generate_graph(args.model, output_dir)

    for backend in args.backend.split(","):
        exec_info = generate_descriptor(backend, graph)
        exec_info.save(args.out)

    test_samples_json = []
    for i in range(10):
        image, label = test[i]
        test_samples_json.append({
            'x': image.flatten().tolist(),
            'y': int(label)
        })
    with open(os.path.join(args.out, 'test_samples.json'), 'w') as f:
        json.dump(test_samples_json, f)
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback")
    parser.add_argument("--encoding")
    parser.add_argument('--out',
                        '-o',
                        default='output_chainer',
                        help='Directory to output the graph descriptor')

    _, _, _, graph = generate_graph()

    args = parser.parse_args()
    os.makedirs(args.out, exist_ok=True)

    any_backend_failed = False
    last_backend_exception = None
    for backend in args.backend.split(","):
        try:
            graph_exec_data = generate_descriptor(
                backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(args.out)
        except Exception as ex:
            any_backend_failed = True
            last_backend_exception = ex
            console.error(
                f"Failed generating descriptor for backend {backend}: {str(ex)}\n"
            )

    if any_backend_failed:
        raise last_backend_exception
Exemplo n.º 4
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph

    parser = argparse.ArgumentParser()
    parser.add_argument("--model",
                        default="resnet50",
                        choices=["vgg16", "resnet50"])
    parser.add_argument("--backend", default="webgpu,webassembly,fallback")
    parser.add_argument("--encoding")
    parser.add_argument('--out',
                        '-o',
                        default='output_chainer',
                        help='Directory to output the graph descriptor')

    args = parser.parse_args()

    os.makedirs(args.out, exist_ok=True)

    sample_image = np.zeros((224, 224, 3),
                            dtype=np.uint8)  # PIL.Image.open("")
    if args.model == "vgg16":
        link = chainer.links.model.vision.vgg.VGG16Layers()
        prepared_image = chainer.links.model.vision.vgg.prepare(
            sample_image)  # BGR, CHW

    elif args.model == "resnet50":
        link = chainer.links.model.vision.resnet.ResNet50Layers()
        prepared_image = chainer.links.model.vision.resnet.prepare(
            sample_image)

    else:
        raise NotImplementedError

    nn_input = chainer.Variable(np.array([prepared_image], dtype=np.float32))

    if chainer.__version__ >= "2.":
        with chainer.using_config('train', False):
            nn_output = link(nn_input, layers=['prob'])['prob']

    else:
        nn_output = link(nn_input, layers=['prob'], test=True)['prob']

    graph = ChainerConverter().convert([nn_input], [nn_output])  # type: Graph

    any_backend_failed = False
    last_backend_exception = None
    for backend in args.backend.split(","):
        try:
            graph_exec_data = generate_descriptor(
                backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(args.out)
        except Exception as ex:
            any_backend_failed = True
            last_backend_exception = ex
            console.error(
                f"Failed generating descriptor for backend {backend}: {str(ex)}\n"
            )

    if any_backend_failed:
        raise last_backend_exception
Exemplo n.º 5
0
def main():
    sys.setrecursionlimit(10000)

    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out',
                        '-o',
                        default='output_tensorflow',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback",
                        help="backend")
    args = parser.parse_args()

    os.makedirs(args.out, exist_ok=True)
    slim_dir = os.path.join(args.out, "models/slim")
    if not os.path.exists(slim_dir):
        clone_slim(args.out)

    model_path = download_model(args.out)

    sys.path.append(slim_dir)
    from nets import resnet_v1
    image_size = resnet_v1.resnet_v1.default_image_size

    checkpoints_dir = args.out
    sess = tf.Session()
    processed_images = tf.placeholder(tf.float32,
                                      [1, image_size, image_size, 3])

    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        logits, _ = resnet_v1.resnet_v1_50(processed_images,
                                           num_classes=1000,
                                           is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(model_path,
                                             slim.get_model_variables())

    init_fn(sess)

    graph = TensorFlowConverter(sess, batch_size=1).convert([processed_images],
                                                            [probabilities])

    from webdnn.graph import traverse
    traverse.dump(graph)

    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(
            backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="fc", choices=["fc", "conv"])
    parser.add_argument("--out", default="output_tensorflow")
    parser.add_argument("--backend", default=",".join(backend_names))
    args = parser.parse_args()

    _, _, _, graph = generate_graph(args.model, args.out)

    for backend in args.backend.split(","):
        desc = generate_descriptor(backend, graph)
        desc.save(args.out)
def main():
    args = arg()
    model = AutoEncoder(F.mean_squared_error)
    chainer.serializers.load_npz(args.model, model)

    example_input = np.zeros((1, 1, 28, 28)).astype("f")
    x = chainer.Variable(example_input)
    y = model.predict(x)
    graph = ChainerConverter().convert([x], [y])
    for backend in ["webgpu", "webassembly"]:
        exec_info = generate_descriptor(backend, graph)
        exec_info.save(args.out)
Exemplo n.º 8
0
def main():
    model_descriptions = []
    for model_config in model_configs:
        model = load_model(model_config)
        graph = get_graph(model)
        exec_info = generate_descriptor("webassembly", graph, constant_encoder_name="eightbit")
        output_dir = f"../docs/webdnn_model/{model_config['name']}"
        exec_info.save(output_dir)
        data_size = os.path.getsize(f"{output_dir}/weight_webassembly.bin") + 65536  # 重み以外の関連ファイルサイズ加算

        model_descriptions.append({"name": model_config["name"],
                                   "description": f"{model_config['name']} {data_size // 1024}kB"})
    export_model_metadata(model_descriptions, "../src/models.ts")
Exemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="fc", choices=["fc", "conv"])
    parser.add_argument("--out", default="output_tensorflow")
    parser.add_argument("--backends",
                        action="append",
                        default=["webgpu", "webgl", "webassembly", "fallback"])
    args = parser.parse_args()

    _, _, _, graph = generate_graph(args.model, args.out)

    for backend in args.backends:
        desc = generate_descriptor(backend, graph)
        desc.save(args.out)
Exemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--out', '-o', default='output_keras', help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend", default="webgpu,webgl,webassembly,fallback", help="backend")
    args = parser.parse_args()

    _, graph = generate_graph()

    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
def export_feature_extractor(output_dir: str):
    # model = chainercv.links.model.resnet.ResNet50(pretrained_model="imagenet", arch="he")
    # model.pick = "pool5"  # =>2048dim
    model = SqueezeNetFeatureExtactor()
    with chainer.using_config("train", False):
        with chainer.using_config("enable_backprop", True):
            nn_input = chainer.Variable(
                np.zeros((1, 3, 227, 227), dtype=np.float32))
            nn_output = model(nn_input)
            graph = ChainerConverter().convert([nn_input], [nn_output])
    for backend in ["webgpu", "webgl", "webassembly"]:
        graph_exec_data = generate_descriptor(backend,
                                              graph,
                                              constant_encoder_name="eightbit")
        graph_exec_data.save(output_dir)
Exemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model",
        default="fc",
        choices=["fc", "conv", "dilated_conv", "residual", "complex"])
    parser.add_argument("--out", default="output_keras")
    parser.add_argument("--backend", default=",".join(backend_names))
    args = parser.parse_args()

    model, graph = generate_graph(args.model, args.out)

    for backend in args.backend.split(","):
        exec_info = generate_descriptor(backend, graph)
        exec_info.save(args.out)
Exemplo n.º 13
0
def main():
    z_dim = 100
    device = -1  #CPU
    batch_size = 1
    model = Generator(z_dim)

    model.to_gpu()
    chainer.serializers.load_npz('result-dcgan/gen_snapshot_epoch-200.npz',
                                 model)
    model.to_cpu()

    x, _ = model.generate_noise(device, batch_size)
    y = model(x)

    graph = ChainerConverter().convert([x], [y])
    exec_info = generate_descriptor("webassembly", graph)
    exec_info.save("./model")
def main():
    # construct model object and load weights
    model = chainer.links.Classifier(CNN())
    chainer.serializers.load_npz('chainer_output/chainer_model.npz', model)

    # run model with dummy variable
    input_variable = chainer.Variable(np.zeros((1, 1, 28, 28), dtype=np.float32))
    prediction_raw_variable = model.predictor(input_variable)  # raw activation before softmax
    prediction_with_softmax_variable = chainer.functions.softmax(prediction_raw_variable)

    # convert graph to intermediate representation
    graph = ChainerConverter().convert([input_variable], [
        prediction_with_softmax_variable])

    # generate graph descriptor
    backend = 'webgl'
    exec_info = generate_descriptor(backend, graph)
    exec_info.save('webdnn_graph_descriptor')
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="resnet50", choices=["resnet50"])
    parser.add_argument('--out', '-o', default='output_keras',
                        help='Directory to output the graph descriptor')
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback", help="backend")
    args = parser.parse_args()

    model = resnet50.ResNet50(include_top=True, weights='imagenet')

    sys.setrecursionlimit(10000)
    graph = KerasConverter(batch_size=1).convert(model)
    for backend in args.backend.split(","):
        graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
        graph_exec_data.save(args.out)

    console.stderr("Done.")
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--backend',
                        choices=('webgl', 'webassembly'),
                        nargs='+')
    parser.add_argument('--out', default='model')
    args = parser.parse_args()

    model = SSD(chainercv.links.SSD300(pretrained_model='voc0712'))
    x = chainer.Variable(
        np.empty((1, 3, model.insize, model.insize), dtype=np.float32))
    ys = model(x)
    print(x.shape, '->', ', '.join('{}'.format(y.shape) for y in ys))

    graph = ChainerConverter().convert([x], ys)

    for backend in args.backend:
        print('backend:', backend)
        desc = generate_descriptor(backend, graph)
        desc.save(args.out)
Exemplo n.º 17
0
def main():
    parser = argparse.ArgumentParser(
        description='chainer implementation of pix2pix')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=1,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=200,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--enc-npz', default='', required=True)
    parser.add_argument('--dec-npz', default='', required=True)
    args = parser.parse_args()

    # Set up a neural network to train
    enc = Encoder(in_ch=3)
    dec = Decoder(out_ch=3)

    chainer.serializers.load_npz(args.enc_npz, enc)
    chainer.serializers.load_npz(args.dec_npz, dec)

    # graph
    input = np.zeros((1, 3, 256, 256), dtype=np.float32)
    x = chainer.Variable(input)
    z = enc(x)
    y = dec(z)
    graph = ChainerConverter().convert([x], [y])
    exec_info = generate_descriptor("webassembly", graph)
    exec_info.save(args.out)
Exemplo n.º 18
0
def main():
    FLAGS(sys.argv)

    # 0. load dataset
    char_list = [
        line.strip().split('\t')[0] for line in open(FLAGS.vocab_file)
    ]
    charset_size = len(char_list) + 1

    # 1. build model
    assert FLAGS.model in ('rnnlm')

    if FLAGS.model == 'rnnlm':
        model = Decoder(charset_size=charset_size,
                        hidden_size=FLAGS.hidden_size,
                        n_layers=FLAGS.n_layers,
                        dropout=FLAGS.dropout)

    ins, outs = model.webdnn_anchor()
    graph = ChainerConverter().convert(ins, outs)
    exec_info = generate_descriptor("webgpu", graph)
    exec_info.save("./output")
Exemplo n.º 19
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph
    parser = argparse.ArgumentParser()
    # default is Caffenet of Caffe example
    parser.add_argument("caffemodel")
    parser.add_argument("--backend", default="webgpu,webassembly,fallback",
                        help="comma-separated list of backends")
    parser.add_argument("--input_name",
                        help="blob name for input (mandatory)")
    parser.add_argument("--input_shape",
                        help="shape of blobs for inputs (example: '(1,3,224,224)')")
    parser.add_argument("--input_npy",
                        help="npy file containing sample inputs")
    parser.add_argument("--output_names", required=True,
                        help="comma-separated blob name for output (mandatory)")
    parser.add_argument("--out",
                        help="output directory (default: <model>/webdnn_graph_descriptor)")
    parser.add_argument("--encoding", help="name of weight encoder")
    args = parser.parse_args()

    # multiple blob input can be easily implemented, but command-line arguments becomes complicated.
    input_blob, input_filled = parse_input_blob(args)
    output_names = args.output_names.split(",")

    console.stderr("[convert_caffe] Loading caffe model... (usually takes several minutes)")
    link = chainer.links.caffe.CaffeFunction(args.caffemodel)

    console.stderr("[convert_caffe] Generating feedforward graph")
    if chainer.__version__ >= "2.":
        chainer.using_config("train", False)
        output_blobs = list(
            link(inputs={args.input_name: input_blob}, outputs=output_names))  # list of Variable
    else:
        output_blobs = list(
            link(inputs={args.input_name: input_blob}, outputs=output_names, train=False))  # list of Variable
    chainer_cg = chainer.computational_graph.build_computational_graph(output_blobs)
    converter = ChainerConverter()
    graph = converter.convert(chainer_cg, [input_blob], output_blobs)  # type: Graph

    if args.out:
        output_dir = args.out
    else:
        output_dir = path.join(path.dirname(args.caffemodel), "webdnn_graph_descriptor")
    os.makedirs(output_dir, exist_ok=True)

    if input_filled:
        # save output of Caffe Network (not required for inference)
        output_arrays = {output_name: output_blob.data for output_name, output_blob in zip(output_names, output_blobs)}
        np.savez(path.join(output_dir, "example_output.npz"), **output_arrays)

    console.stderr("[convert_caffe] Generating descriptors")
    any_backend_failed = False
    for backend in args.backend.split(","):
        try:
            graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(output_dir)
        except Exception as ex:
            any_backend_failed = True
            console.error(f"[convert_caffe] Failed generating descriptor for backend {backend}: {str(ex)}")

    if any_backend_failed:
        sys.exit(1)
Exemplo n.º 20
0
def save_generated_image(image, name):
    Imag = combine_images(image)
    save_images(Imag, name)


z_size = 128

noise = np.random.normal(0, 0.5, [1, z_size])

#model_set
g = generator(512, 512, z_size)
serializers.load_npz("generator.model", g)
x = chainer.Variable(np.zeros((1, z_size), dtype=np.float32))
y = g(x, np.zeros(NUMBER_OF_TAG), 5, 1)

noise = np.random.normal(0, 0.5, [1, z_size]).astype(np.float32)
image = g(noise, np.zeros(NUMBER_OF_TAG), 5, 1)
image = image.data[0]
image = image.transpose(1, 2, 0)
save_images((image * 127.5) + 127.5, "test")

from webdnn.frontend.chainer import ChainerConverter
graph = ChainerConverter().convert([x], [y])

from webdnn.backend import generate_descriptor

exec_info = generate_descriptor(
    "webgpu", graph)  # also "webassembly", "webgl", "fallback" are available.
exec_info.save("./output")
Exemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument("--model", default="mlp", choices=["mlp", "conv"])
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=5,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--frequency',
                        '-f',
                        type=int,
                        default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument(
        '--out',
        '-o',
        default='output_chainer',
        help='Directory to output the graph descriptor and sample test data')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    os.makedirs(args.out, exist_ok=True)

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = L.Classifier(models[args.model](10))
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the MNIST dataset
    train, test = chainer.datasets.get_mnist(ndim=3)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'),
                               out=os.path.join(args.out, 'chainer_model'))

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot for each specified epoch
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Save two plot images to the result dir
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch',
                file_name='accuracy.png'))

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()

    # conversion

    print('Transpiling model to WebDNN graph descriptor')

    example_input = numpy.expand_dims(
        train[0][0], axis=0)  # example input (anything ok, (batch_size, 784))
    x = chainer.Variable(example_input)
    y = F.softmax(model.predictor(x))  # run model
    graph = ChainerConverter().convert_from_inout_vars(
        [x], [y])  # convert graph to intermediate representation
    for backend in ["webgpu", "webassembly", "fallback"]:
        try:
            exec_info = generate_descriptor(backend, graph)
            exec_info.save(args.out)
        except Exception as ex:
            print(
                f"Failed generating descriptor for backend {backend}: {str(ex)}\n"
            )
        else:
            print(f"Backend {backend} ok\n")

    print('Exporting test samples (for demo purpose)')
    test_samples_json = []
    for i in range(10):
        image, label = test[i]
        test_samples_json.append({
            'x': image.flatten().tolist(),
            'y': int(label)
        })
    with open(os.path.join(args.out, 'test_samples.json'), 'w') as f:
        json.dump(test_samples_json, f)
Exemplo n.º 22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--out", default="output_tensorflow")
    parser.add_argument("--backends",
                        action="append",
                        default=["webgpu", "webassembly", "fallback"])
    args = parser.parse_args()

    session_path = os.path.join(args.out, "session")
    sample_path = os.path.join(args.out, "test_samples.json")
    data_path = os.path.join(args.out, "data")

    x = tf.placeholder(tf.float32, [None, 784])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    t = tf.placeholder(tf.float32, [None, 10])
    loss = tf.reduce_mean(-tf.reduce_sum(t * tf.log(y), reduction_indices=[1]))
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(t, 1)), tf.float32))
    optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)

    sess = tf.Session()

    if os.path.exists(session_path):
        # -------------------------------------------------------------------------------
        # Load pretrained model

        saver = tf.train.Saver()
        saver.restore(sess, os.path.join(session_path, "session"))

    else:
        # -------------------------------------------------------------------------------
        # Train model

        mnist = input_data.read_data_sets(data_path, one_hot=True)

        tf.global_variables_initializer().run()

        for step in range(1000):
            batch_xs, batch_ys = mnist.train.next_batch(100)
            _, loss_val = sess.run([optimizer, loss],
                                   feed_dict={
                                       x: batch_xs,
                                       t: batch_ys
                                   })

            if step % 100 == 0:
                print(f"Step {step}: loss = {loss_val}")

        print(
            f"accuracy: {sess.run(accuracy, feed_dict={x: mnist.test.images, t: mnist.test.labels})}"
        )

        saver = tf.train.Saver()
        saver.save(sess, os.path.join(session_path, "session"))

        with open(sample_path, "w") as f:
            json.dump([{
                "x": mnist.test.images[i].flatten().tolist(),
                "y": int(mnist.test.labels[i].argmax())
            } for i in range(10)], f)

    # -------------------------------------------------------------------------------
    # Convert

    webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y])

    # # When you try to convert more complex model, maybe WebDNN failed to infer the data order.
    # # In this case, you can give "data-order hints" to WebDNN graph converter.
    #
    # webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y], order_hints={
    #     x: OrderNC,
    #     W: OrderCN,
    #     b: OrderC,
    #     y: OrderNC
    # })

    for backend in args.backends:
        desc = generate_descriptor(backend, webdnn_graph)
        desc.save(args.out)
Exemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="fc", choices=["fc", "conv"])
    parser.add_argument("--out", default="output_tensorflow")
    parser.add_argument("--backends",
                        action="append",
                        default=["webgpu", "webgl", "webassembly", "fallback"])
    args = parser.parse_args()

    session_path = os.path.join(args.out, "session")
    sample_path = os.path.join(args.out, "test_samples.json")
    data_path = os.path.join(args.out, "data")

    x, y, t, loss, accuracy, optimizer = setup_model(args.model)

    sess = tf.Session()

    if os.path.exists(session_path):
        # -------------------------------------------------------------------------------
        # Load pretrained model

        saver = tf.train.Saver()
        saver.restore(sess, os.path.join(session_path,
                                         f"session_{args.model}"))

    else:
        # -------------------------------------------------------------------------------
        # Train model

        mnist = input_data.read_data_sets(data_path, one_hot=True)

        with sess.as_default():
            tf.global_variables_initializer().run()

            for step in range(1000):
                batch_xs, batch_ys = mnist.train.next_batch(100)
                _, loss_val = sess.run([optimizer, loss],
                                       feed_dict={
                                           x: batch_xs,
                                           t: batch_ys
                                       })

                if step % 100 == 0:
                    print(f"Step {step}: loss = {loss_val}")

            print(
                f"accuracy: {sess.run(accuracy, feed_dict={x: mnist.test.images, t: mnist.test.labels})}"
            )

            saver = tf.train.Saver()
            saver.save(sess, os.path.join(session_path,
                                          f"session_{args.model}"))

        with open(sample_path, "w") as f:
            json.dump([{
                "x": mnist.test.images[i].flatten().tolist(),
                "y": int(mnist.test.labels[i].argmax())
            } for i in range(10)], f)

    # -------------------------------------------------------------------------------
    # Convert

    webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y])

    # # When you try to convert more complex model, maybe WebDNN failed to infer the data order.
    # # In this case, you can give "data-order hints" to WebDNN graph converter.
    #
    # webdnn_graph = TensorFlowConverter(sess, batch_size=1).convert([x], [y], order_hints={
    #     x: OrderNC,
    #     W: OrderCN,
    #     b: OrderC,
    #     y: OrderNC
    # })

    for backend in args.backends:
        desc = generate_descriptor(backend, webdnn_graph)
        desc.save(args.out)
Exemplo n.º 24
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument(
        '--out',
        '-o',
        default='output_pytorch',
        help='Directory to output the graph descriptor and sample test data')
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback")
    args = parser.parse_args()

    training_dir = os.path.join(args.out, "pytorch_model")
    os.makedirs(training_dir, exist_ok=True)

    model_path = os.path.join(training_dir, "model.proto")

    if not os.path.exists(model_path):
        # model training part (as usual)
        torch.manual_seed(1)

        device = torch.device("cpu")

        train_loader = torch.utils.data.DataLoader(
            datasets.MNIST(
                os.path.join(args.out, 'data'),
                train=True,
                download=True,
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    # If input is normalized, input for WebDNN also have to be normalized with same parameter.
                    # Default of datasets.MNIST is 0=black, 1=white
                    # transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=64,
            shuffle=True)
        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST(
                os.path.join(args.out, 'data'),
                train=False,
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    # transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=1000,
            shuffle=True)

        model = Net().to(device)
        optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

        for epoch in range(1, 4):
            train(args, model, device, train_loader, optimizer, epoch)
            test(args, model, device, test_loader)

        # export model as ONNX format
        dummy_input = torch.autograd.Variable(torch.randn(1, 1, 28, 28))
        torch.onnx.export(model, dummy_input, model_path, verbose=True)

    # model conversion using WebDNN
    onnx_model = onnx.load(model_path)
    graph = ONNXConverter().convert(onnx_model)

    for backend in args.backend.split(","):
        exec_info = generate_descriptor(backend, graph)
        exec_info.save(args.out)

    # test data for demo
    output_test_samples(
        datasets.MNIST(
            os.path.join(args.out, 'data'),
            train=False,
            transform=transforms.Compose([
                transforms.ToTensor(),
                # transforms.Normalize((0.1307,), (0.3081,))
            ])),
        os.path.join(args.out, "test_samples.json"))
Exemplo n.º 25
0
print(f"model: {args.model}")
print(f"backend: {args.backend}")
print(f"encoding: {args.encoding}")

# Load chainer pre-trained model
model = FastStyleNet()

model_path = NSTModelPath[args.model].value
if not path.exists(model_path):
    raise FileNotFoundError(f"Model data ({model_path}) is not found. Please clone " +
                            "'https://github.com/gafr/chainer-fast-neuralstyle-models' under the resource directory. " +
                            "Clone command takes about a few minute, the repository size is about 200MB.")

chainer.serializers.load_npz(model_path, model)

# Execute forward propagation to construct computation graph
if chainer.__version__ >= "2.":
    with chainer.using_config("train", False):  # fixes batch normalization
        x = chainer.Variable(np.zeros((1, 3, 144, 192), dtype=np.float32))
        y = model(x)
else:
    x = chainer.Variable(np.zeros((1, 3, 144, 192), dtype=np.float32))
    y = model(x)

# Convert chainer computation graph into IR
graph = ChainerConverter().convert_from_inout_vars([x], [y])

# Generate graph descriptor
generate_descriptor(args.backend, graph, constant_encoder_name=args.encoding).save(path.join(path.dirname(__file__), "./output"))
Exemplo n.º 26
0
    version, blocks, filters, weights = read_net(sys.argv[1])

    if data_format == 'NHWC':
        planes = tf.placeholder(tf.float32, [None, BOARD_SIZE, BOARD_SIZE, FEATURES])
        probs = tf.placeholder(tf.float32, [None, BOARD_SIZE * BOARD_SIZE + 1])
        winner = tf.placeholder(tf.float32, [None, 1])
    else:
        planes = tf.placeholder(tf.float32, [None, FEATURES, BOARD_SIZE, BOARD_SIZE])
        probs = tf.placeholder(tf.float32, [None, BOARD_SIZE * BOARD_SIZE + 1])
        winner = tf.placeholder(tf.float32, [None, 1])

    tfprocess = TFProcess()
    tfprocess.INPUT_DIM = 2
    tfprocess.DATA_FORMAT = data_format
    tfprocess.BOARD_SIZE = BOARD_SIZE
    tfprocess.FEATURES = FEATURES
    tfprocess.RESIDUAL_FILTERS = filters
    tfprocess.RESIDUAL_BLOCKS = blocks
    training = tfprocess.training
    tfprocess.training = False # batch normalizationをコンバートするため
    tfprocess.init_net(planes, probs, winner)
    tfprocess.replace_weights(weights)
    graph = TensorFlowConverter(tfprocess.session).convert([planes], [tfprocess.y_conv, tfprocess.z_conv])
    print("generating webgpu...")
    exec_info = generate_descriptor("webgpu", graph)
    exec_info.save("./ELF_OpenGo")
    print("done")
    print("generating webgl...")
    exec_info = generate_descriptor("webgl", graph)
    exec_info.save("./ELF_OpenGo")
Exemplo n.º 27
0
def main():
    sys.setrecursionlimit(10000)  # workaround for deep copying large graph
    parser = argparse.ArgumentParser()
    parser.add_argument("kerasmodel")
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback",
                        help="comma-separated list of backends")
    parser.add_argument(
        "--input_shape",
        required=True,
        action="append",
        help=
        "shape of blobs for inputs (example: '(1,3,224,224)'), can be specified multiple times"
    )
    # parser.add_argument("--input_data_format", choices=["channels_first", "channels_last"])
    parser.add_argument(
        "--out",
        help="output directory (default: <model>/webdnn_graph_descriptor)")
    parser.add_argument("--encoding", help="name of weight encoder")
    parser.add_argument("--visualize_ir", action="store_true")
    parser.add_argument(
        "--plugin",
        action="append",
        help="plugin python files which are imported before transpiling")
    args = parser.parse_args()

    console.stderr(f"[{path.basename(__file__)}] Generating feedforward graph")
    class_list = []
    if args.plugin:
        for plugin_path in args.plugin:
            class_list += _load_plugin(plugin_path)
    custom_objects = {}
    if len(class_list) > 0:
        # custom_objects is a dictionary for load_model to load user-defined custom layers
        for k, v in class_list:
            custom_objects[k] = v

    input_shapes = [
        Shape.parse(input_shape)[0] for input_shape in args.input_shape
    ]

    model = keras.models.load_model(args.kerasmodel,
                                    custom_objects=custom_objects,
                                    compile=False)
    model.build(input_shape=None)
    converter = KerasConverter(batch_size=Placeholder(label='N'))
    graph = converter.convert(model)
    traverse.dump(graph)

    for graph_input, input_shape in zip(graph.inputs, input_shapes):
        for p1, p2 in zip(graph_input.shape, input_shape):
            if not Placeholder.check_resolved(
                    p1) and Placeholder.check_resolved(p2):
                p1.value = Placeholder.force_int(p2)

            elif Placeholder.check_resolved(
                    p1) and not Placeholder.check_resolved(p2):
                raise ValueError(
                    f'Shape mismatch: expected:{input_shape}, real:{graph_input.shape}, {p1} != {p2}'
                )

            elif Placeholder.check_resolved(p1) and Placeholder.check_resolved(
                    p2):
                assert p1 == p2, f'Shape mismatch: expected:{input_shape}, real:{graph_input.shape}, {p1} != {p2}'

    if args.out:
        output_dir = args.out
    else:
        output_dir = path.join(path.dirname(args.kerasmodel),
                               "webdnn_graph_descriptor")
    os.makedirs(output_dir, exist_ok=True)

    if args.visualize_ir:
        ir_dot_path = path.join(output_dir, "ir.dot")
        with open(ir_dot_path, "w") as f:
            f.write(dump_dot(graph))
        console.stderr(
            f"IR graph can be visualized with graphviz command: 'dot {ir_dot_path} -T png -o output.png'"
        )

    console.stderr(f"[{path.basename(__file__)}] Generating graph descriptor")

    any_backend_failed = False
    backends = args.backend.split(",")
    for i, backend in enumerate(backends):
        console.stderr(
            f"[{path.basename(__file__)}] BackendName: {console.colorize(backend, console.Color.Cyan)}"
        )
        try:
            graph_exec_data = generate_descriptor(
                backend, graph, constant_encoder_name=args.encoding)
            graph_exec_data.save(output_dir)
        except Exception as ex:
            if flags.DEBUG:
                raise ex

            any_backend_failed = True
            console.error(
                f"[{path.basename(__file__)}] Failed generating descriptor for {backend} backend"
            )
            console.stderr(traceback.format_exc())
            continue

    if any_backend_failed:
        exit(1)
Exemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument("--model", default="fc", choices=["fc", "conv"])
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument(
        '--out',
        '-o',
        default='output_chainer',
        help='Directory to output the graph descriptor and sample test data')
    parser.add_argument("--backend",
                        default="webgpu,webgl,webassembly,fallback")

    args = parser.parse_args()

    output_dir = os.path.join(args.out, f"./chainer_model")
    os.makedirs(output_dir, exist_ok=True)

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = L.Classifier(models[args.model](10))
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the MNIST dataset
    train, test = chainer.datasets.get_mnist(ndim=3)

    train_iter = chainer.iterators.SerialIterator(train, 128)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 128,
                                                 repeat=False,
                                                 shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (2, 'epoch'), out=output_dir)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    # Take a snapshot for each specified epoch
    trainer.extend(extensions.snapshot(filename=args.model),
                   trigger=(2, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    snapshot_path = os.path.join(output_dir, args.model)
    if os.path.exists(snapshot_path):
        # Resume from a snapshot
        chainer.serializers.load_npz(snapshot_path, trainer)
    else:
        # Run the training
        trainer.run()

    # conversion
    print('Transpiling model to WebDNN graph descriptor')

    if args.gpu >= 0:
        model.to_cpu()

    example_input = numpy.expand_dims(
        train[0][0], axis=0)  # example input (anything ok, (batch_size, 784))

    x = chainer.Variable(example_input)
    y = model.predictor(x)
    graph = ChainerConverter().convert(
        [x], [y])  # convert graph to intermediate representation
    for backend in args.backend.split(","):
        exec_info = generate_descriptor(backend, graph)
        exec_info.save(args.out)

    print('Exporting test samples (for demo purpose)')
    test_samples_json = []
    for i in range(10):
        image, label = test[i]
        test_samples_json.append({
            'x': image.flatten().tolist(),
            'y': int(label)
        })
    with open(os.path.join(args.out, 'test_samples.json'), 'w') as f:
        json.dump(test_samples_json, f)