def main(): sys.setrecursionlimit(10000) # workaround for deep copying large graph parser = argparse.ArgumentParser() parser.add_argument("--model", default="resnet50", choices=["vgg16", "resnet50"]) parser.add_argument("--backend", default="webgpu,webassembly,fallback") parser.add_argument("--encoding") parser.add_argument('--out', '-o', default='output_chainer', help='Directory to output the graph descriptor') args = parser.parse_args() os.makedirs(args.out, exist_ok=True) sample_image = np.zeros((224, 224, 3), dtype=np.uint8) # PIL.Image.open("") if args.model == "vgg16": link = chainer.links.model.vision.vgg.VGG16Layers() prepared_image = chainer.links.model.vision.vgg.prepare( sample_image) # BGR, CHW out_layer_name = "fc8" elif args.model == "resnet50": link = chainer.links.model.vision.resnet.ResNet50Layers() prepared_image = chainer.links.model.vision.resnet.prepare( sample_image) out_layer_name = "fc6" nn_input = chainer.Variable(np.array([prepared_image], dtype=np.float32)) nn_output = link(nn_input, layers=[ out_layer_name ])[out_layer_name] # 'prob' is also possible (uses softmax) chainer_cg = chainer.computational_graph.build_computational_graph( [nn_output]) converter = ChainerConverter() graph = converter.convert(chainer_cg, [nn_input], [nn_output]) # type: Graph any_backend_failed = False last_backend_exception = None for backend in args.backend.split(","): try: graph_exec_data = generate_descriptor( backend, graph, constant_encoder_name=args.encoding) graph_exec_data.save(args.out) except Exception as ex: any_backend_failed = True last_backend_exception = ex console.error( f"Failed generating descriptor for backend {backend}: {str(ex)}\n" ) if any_backend_failed: raise last_backend_exception
def get_graph(model): dummy_input = np.zeros((1, 86, 9, 9), dtype=np.float32) x = chainer.Variable(dummy_input) with chainer.using_config("train", False): # disable batch normalization move, value = model.forward(x) graph = ChainerConverter().convert([x], [move, value]) return graph
def main(): args = arg() model = AutoEncoder(F.mean_squared_error) chainer.serializers.load_npz(args.model, model) example_input = np.zeros((1, 1, 28, 28)).astype("f") x = chainer.Variable(example_input) y = model.predict(x) graph = ChainerConverter().convert([x], [y]) for backend in ["webgpu", "webassembly"]: exec_info = generate_descriptor(backend, graph) exec_info.save(args.out)
def export_feature_extractor(output_dir: str): # model = chainercv.links.model.resnet.ResNet50(pretrained_model="imagenet", arch="he") # model.pick = "pool5" # =>2048dim model = SqueezeNetFeatureExtactor() with chainer.using_config("train", False): with chainer.using_config("enable_backprop", True): nn_input = chainer.Variable( np.zeros((1, 3, 227, 227), dtype=np.float32)) nn_output = model(nn_input) graph = ChainerConverter().convert([nn_input], [nn_output]) for backend in ["webgpu", "webgl", "webassembly"]: graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name="eightbit") graph_exec_data.save(output_dir)
def generate_graph(): sample_image = np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8) model = chainer.links.model.vision.resnet.ResNet50Layers() prepared_image = chainer.links.model.vision.resnet.prepare(sample_image) nn_input = chainer.Variable(np.array([prepared_image], dtype=np.float32)) if chainer.__version__ >= "2.": with chainer.using_config('train', False): nn_output = model(nn_input, layers=['prob'])['prob'] else: nn_output = model(nn_input, layers=['prob'], test=True)['prob'] graph = ChainerConverter().convert([nn_input], [nn_output]) return model, nn_input, nn_output, graph
def main(): sys.setrecursionlimit(10000) # workaround for deep copying large graph parser = argparse.ArgumentParser() parser.add_argument("--backend", default="webgpu,webgl,webassembly,fallback") parser.add_argument("--encoding") parser.add_argument('--out', '-o', default='output_chainer', help='Directory to output the graph descriptor') args = parser.parse_args() os.makedirs(args.out, exist_ok=True) sample_image = np.zeros((224, 224, 3), dtype=np.uint8) # PIL.Image.open("") link = chainer.links.model.vision.resnet.ResNet50Layers() prepared_image = chainer.links.model.vision.resnet.prepare(sample_image) nn_input = chainer.Variable(np.array([prepared_image], dtype=np.float32)) if chainer.__version__ >= "2.": with chainer.using_config('train', False): nn_output = link(nn_input, layers=['prob'])['prob'] else: nn_output = link(nn_input, layers=['prob'], test=True)['prob'] graph = ChainerConverter().convert([nn_input], [nn_output]) # type: Graph any_backend_failed = False last_backend_exception = None for backend in args.backend.split(","): try: graph_exec_data = generate_descriptor( backend, graph, constant_encoder_name=args.encoding) graph_exec_data.save(args.out) except Exception as ex: any_backend_failed = True last_backend_exception = ex console.error( f"Failed generating descriptor for backend {backend}: {str(ex)}\n" ) if any_backend_failed: raise last_backend_exception
def main(): z_dim = 100 device = -1 #CPU batch_size = 1 model = Generator(z_dim) model.to_gpu() chainer.serializers.load_npz('result-dcgan/gen_snapshot_epoch-200.npz', model) model.to_cpu() x, _ = model.generate_noise(device, batch_size) y = model(x) graph = ChainerConverter().convert([x], [y]) exec_info = generate_descriptor("webassembly", graph) exec_info.save("./model")
def main(): # construct model object and load weights model = chainer.links.Classifier(CNN()) chainer.serializers.load_npz('chainer_output/chainer_model.npz', model) # run model with dummy variable input_variable = chainer.Variable(np.zeros((1, 1, 28, 28), dtype=np.float32)) prediction_raw_variable = model.predictor(input_variable) # raw activation before softmax prediction_with_softmax_variable = chainer.functions.softmax(prediction_raw_variable) # convert graph to intermediate representation graph = ChainerConverter().convert([input_variable], [ prediction_with_softmax_variable]) # generate graph descriptor backend = 'webgl' exec_info = generate_descriptor(backend, graph) exec_info.save('webdnn_graph_descriptor')
def main(): parser = argparse.ArgumentParser() parser.add_argument('--backend', choices=('webgl', 'webassembly'), nargs='+') parser.add_argument('--out', default='model') args = parser.parse_args() model = SSD(chainercv.links.SSD300(pretrained_model='voc0712')) x = chainer.Variable( np.empty((1, 3, model.insize, model.insize), dtype=np.float32)) ys = model(x) print(x.shape, '->', ', '.join('{}'.format(y.shape) for y in ys)) graph = ChainerConverter().convert([x], ys) for backend in args.backend: print('backend:', backend) desc = generate_descriptor(backend, graph) desc.save(args.out)
def main(): parser = argparse.ArgumentParser( description='chainer implementation of pix2pix') parser.add_argument('--batchsize', '-b', type=int, default=1, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=200, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--enc-npz', default='', required=True) parser.add_argument('--dec-npz', default='', required=True) args = parser.parse_args() # Set up a neural network to train enc = Encoder(in_ch=3) dec = Decoder(out_ch=3) chainer.serializers.load_npz(args.enc_npz, enc) chainer.serializers.load_npz(args.dec_npz, dec) # graph input = np.zeros((1, 3, 256, 256), dtype=np.float32) x = chainer.Variable(input) z = enc(x) y = dec(z) graph = ChainerConverter().convert([x], [y]) exec_info = generate_descriptor("webassembly", graph) exec_info.save(args.out)
def main(): FLAGS(sys.argv) # 0. load dataset char_list = [ line.strip().split('\t')[0] for line in open(FLAGS.vocab_file) ] charset_size = len(char_list) + 1 # 1. build model assert FLAGS.model in ('rnnlm') if FLAGS.model == 'rnnlm': model = Decoder(charset_size=charset_size, hidden_size=FLAGS.hidden_size, n_layers=FLAGS.n_layers, dropout=FLAGS.dropout) ins, outs = model.webdnn_anchor() graph = ChainerConverter().convert(ins, outs) exec_info = generate_descriptor("webgpu", graph) exec_info.save("./output")
def main(): width = 448 height = 448 predictor = CocoPredictor() image = Variable(np.zeros((1, 3, 448, 448), dtype=np.float32)) with chainer.using_config('train', False): x, y, w, h, conf, prob, x_offset, y_offset, w_anchor, h_anchor = predictor( image) graph = ChainerConverter().convert_from_inout_vars( [image, x_offset, y_offset, w_anchor, h_anchor], [x, y, w, h, conf, prob]) desc = webdnn.backend.generate_descriptor("webgpu", graph, constant_encoder_name="eightbit") desc.save("../docs/model") desc = webdnn.backend.generate_descriptor("webassembly", graph, constant_encoder_name="eightbit") desc.save("../docs/model")
def main(): sys.setrecursionlimit(10000) # workaround for deep copying large graph parser = argparse.ArgumentParser() # default is Caffenet of Caffe example parser.add_argument("caffemodel") parser.add_argument("--backend", default="webgpu,webassembly,fallback", help="comma-separated list of backends") parser.add_argument("--input_name", help="blob name for input (mandatory)") parser.add_argument("--input_shape", help="shape of blobs for inputs (example: '(1,3,224,224)')") parser.add_argument("--input_npy", help="npy file containing sample inputs") parser.add_argument("--output_names", required=True, help="comma-separated blob name for output (mandatory)") parser.add_argument("--out", help="output directory (default: <model>/webdnn_graph_descriptor)") parser.add_argument("--encoding", help="name of weight encoder") args = parser.parse_args() # multiple blob input can be easily implemented, but command-line arguments becomes complicated. input_blob, input_filled = parse_input_blob(args) output_names = args.output_names.split(",") console.stderr("[convert_caffe] Loading caffe model... (usually takes several minutes)") link = chainer.links.caffe.CaffeFunction(args.caffemodel) console.stderr("[convert_caffe] Generating feedforward graph") if chainer.__version__ >= "2.": chainer.using_config("train", False) output_blobs = list( link(inputs={args.input_name: input_blob}, outputs=output_names)) # list of Variable else: output_blobs = list( link(inputs={args.input_name: input_blob}, outputs=output_names, train=False)) # list of Variable chainer_cg = chainer.computational_graph.build_computational_graph(output_blobs) converter = ChainerConverter() graph = converter.convert(chainer_cg, [input_blob], output_blobs) # type: Graph if args.out: output_dir = args.out else: output_dir = path.join(path.dirname(args.caffemodel), "webdnn_graph_descriptor") os.makedirs(output_dir, exist_ok=True) if input_filled: # save output of Caffe Network (not required for inference) output_arrays = {output_name: output_blob.data for output_name, output_blob in zip(output_names, output_blobs)} np.savez(path.join(output_dir, "example_output.npz"), **output_arrays) console.stderr("[convert_caffe] Generating descriptors") any_backend_failed = False for backend in args.backend.split(","): try: graph_exec_data = generate_descriptor(backend, graph, constant_encoder_name=args.encoding) graph_exec_data.save(output_dir) except Exception as ex: any_backend_failed = True console.error(f"[convert_caffe] Failed generating descriptor for backend {backend}: {str(ex)}") if any_backend_failed: sys.exit(1)
def save_generated_image(image, name): Imag = combine_images(image) save_images(Imag, name) z_size = 128 noise = np.random.normal(0, 0.5, [1, z_size]) #model_set g = generator(512, 512, z_size) serializers.load_npz("generator.model", g) x = chainer.Variable(np.zeros((1, z_size), dtype=np.float32)) y = g(x, np.zeros(NUMBER_OF_TAG), 5, 1) noise = np.random.normal(0, 0.5, [1, z_size]).astype(np.float32) image = g(noise, np.zeros(NUMBER_OF_TAG), 5, 1) image = image.data[0] image = image.transpose(1, 2, 0) save_images((image * 127.5) + 127.5, "test") from webdnn.frontend.chainer import ChainerConverter graph = ChainerConverter().convert([x], [y]) from webdnn.backend import generate_descriptor exec_info = generate_descriptor( "webgpu", graph) # also "webassembly", "webgl", "fallback" are available. exec_info.save("./output")
def main(): parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument("--model", default="mlp", choices=["mlp", "conv"]) parser.add_argument('--batchsize', '-b', type=int, default=100, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=5, help='Number of sweeps over the dataset to train') parser.add_argument('--frequency', '-f', type=int, default=-1, help='Frequency of taking a snapshot') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument( '--out', '-o', default='output_chainer', help='Directory to output the graph descriptor and sample test data') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') args = parser.parse_args() print('GPU: {}'.format(args.gpu)) print('# Minibatch-size: {}'.format(args.batchsize)) print('# epoch: {}'.format(args.epoch)) print('') os.makedirs(args.out, exist_ok=True) # Set up a neural network to train # Classifier reports softmax cross entropy loss and accuracy at every # iteration, which will be used by the PrintReport extension below. model = L.Classifier(models[args.model](10)) if args.gpu >= 0: # Make a specified GPU current chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # Copy the model to the GPU # Setup an optimizer optimizer = chainer.optimizers.Adam() optimizer.setup(model) # Load the MNIST dataset train, test = chainer.datasets.get_mnist(ndim=3) train_iter = chainer.iterators.SerialIterator(train, args.batchsize) test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False) # Set up a trainer updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=os.path.join(args.out, 'chainer_model')) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu)) # Dump a computational graph from 'loss' variable at the first iteration # The "main" refers to the target link of the "main" optimizer. trainer.extend(extensions.dump_graph('main/loss')) # Take a snapshot for each specified epoch frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Save two plot images to the result dir if extensions.PlotReport.available(): trainer.extend( extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend( extensions.PlotReport( ['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend( extensions.PrintReport([ 'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time' ])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) if args.resume: # Resume from a snapshot chainer.serializers.load_npz(args.resume, trainer) # Run the training trainer.run() # conversion print('Transpiling model to WebDNN graph descriptor') example_input = numpy.expand_dims( train[0][0], axis=0) # example input (anything ok, (batch_size, 784)) x = chainer.Variable(example_input) y = F.softmax(model.predictor(x)) # run model graph = ChainerConverter().convert_from_inout_vars( [x], [y]) # convert graph to intermediate representation for backend in ["webgpu", "webassembly", "fallback"]: try: exec_info = generate_descriptor(backend, graph) exec_info.save(args.out) except Exception as ex: print( f"Failed generating descriptor for backend {backend}: {str(ex)}\n" ) else: print(f"Backend {backend} ok\n") print('Exporting test samples (for demo purpose)') test_samples_json = [] for i in range(10): image, label = test[i] test_samples_json.append({ 'x': image.flatten().tolist(), 'y': int(label) }) with open(os.path.join(args.out, 'test_samples.json'), 'w') as f: json.dump(test_samples_json, f)
def main(): parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument("--model", default="fc", choices=["fc", "conv"]) parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument( '--out', '-o', default='output_chainer', help='Directory to output the graph descriptor and sample test data') parser.add_argument("--backend", default="webgpu,webgl,webassembly,fallback") args = parser.parse_args() output_dir = os.path.join(args.out, f"./chainer_model") os.makedirs(output_dir, exist_ok=True) # Set up a neural network to train # Classifier reports softmax cross entropy loss and accuracy at every # iteration, which will be used by the PrintReport extension below. model = L.Classifier(models[args.model](10)) if args.gpu >= 0: # Make a specified GPU current chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # Copy the model to the GPU # Setup an optimizer optimizer = chainer.optimizers.Adam() optimizer.setup(model) # Load the MNIST dataset train, test = chainer.datasets.get_mnist(ndim=3) train_iter = chainer.iterators.SerialIterator(train, 128) test_iter = chainer.iterators.SerialIterator(test, 128, repeat=False, shuffle=False) # Set up a trainer updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (2, 'epoch'), out=output_dir) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu)) # Take a snapshot for each specified epoch trainer.extend(extensions.snapshot(filename=args.model), trigger=(2, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend( extensions.PrintReport([ 'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time' ])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) snapshot_path = os.path.join(output_dir, args.model) if os.path.exists(snapshot_path): # Resume from a snapshot chainer.serializers.load_npz(snapshot_path, trainer) else: # Run the training trainer.run() # conversion print('Transpiling model to WebDNN graph descriptor') if args.gpu >= 0: model.to_cpu() example_input = numpy.expand_dims( train[0][0], axis=0) # example input (anything ok, (batch_size, 784)) x = chainer.Variable(example_input) y = model.predictor(x) graph = ChainerConverter().convert( [x], [y]) # convert graph to intermediate representation for backend in args.backend.split(","): exec_info = generate_descriptor(backend, graph) exec_info.save(args.out) print('Exporting test samples (for demo purpose)') test_samples_json = [] for i in range(10): image, label = test[i] test_samples_json.append({ 'x': image.flatten().tolist(), 'y': int(label) }) with open(os.path.join(args.out, 'test_samples.json'), 'w') as f: json.dump(test_samples_json, f)
def generate_graph(model_type, output_dir): # Set up a neural network to train # Classifier reports softmax cross entropy loss and accuracy at every # iteration, which will be used by the PrintReport extension below. model = L.Classifier(models[model_type](10)) # Setup an optimizer optimizer = chainer.optimizers.Adam() optimizer.setup(model) # Load the MNIST dataset train, test = chainer.datasets.get_mnist(ndim=3) train_iter = chainer.iterators.SerialIterator(train, 128) test_iter = chainer.iterators.SerialIterator(test, 128, repeat=False, shuffle=False) # Set up a trainer updater = training.StandardUpdater(train_iter, optimizer, device=-1) trainer = training.Trainer(updater, (2, 'epoch'), out=output_dir) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=-1)) # Take a snapshot for each specified epoch trainer.extend(extensions.snapshot(filename=model_type), trigger=(2, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend( extensions.PrintReport([ 'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time' ])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) snapshot_path = os.path.join(output_dir, model_type) if os.path.exists(snapshot_path): # Resume from a snapshot chainer.serializers.load_npz(snapshot_path, trainer) else: # Run the training trainer.run() example_input = numpy.expand_dims( train[0][0], axis=0) # example input (anything ok, (batch_size, 784)) x = chainer.Variable(example_input) y = model.predictor(x) graph = ChainerConverter().convert( [x], [y]) # convert graph to intermediate representation return model, test, graph
print(f"model: {args.model}") print(f"backend: {args.backend}") print(f"encoding: {args.encoding}") # Load chainer pre-trained model model = FastStyleNet() model_path = NSTModelPath[args.model].value if not path.exists(model_path): raise FileNotFoundError(f"Model data ({model_path}) is not found. Please clone " + "'https://github.com/gafr/chainer-fast-neuralstyle-models' under the resource directory. " + "Clone command takes about a few minute, the repository size is about 200MB.") chainer.serializers.load_npz(model_path, model) # Execute forward propagation to construct computation graph if chainer.__version__ >= "2.": with chainer.using_config("train", False): # fixes batch normalization x = chainer.Variable(np.zeros((1, 3, 144, 192), dtype=np.float32)) y = model(x) else: x = chainer.Variable(np.zeros((1, 3, 144, 192), dtype=np.float32)) y = model(x) # Convert chainer computation graph into IR graph = ChainerConverter().convert_from_inout_vars([x], [y]) # Generate graph descriptor generate_descriptor(args.backend, graph, constant_encoder_name=args.encoding).save(path.join(path.dirname(__file__), "./output"))