def resnet_block(l0, nf, bn, reps, downsample): for i in range(reps): stri = 2 if (downsample and i == 0) else 1 l1 = eddl.GlorotUniform( eddl.Conv(l0, nf, [1, 1], [stri, stri], "same", False)) if (bn): l1 = eddl.BatchNormalization(l1, 0.99, 0.001, True, "") l1 = eddl.ReLu(l1) l1 = eddl.GlorotUniform( eddl.Conv(l1, nf, [3, 3], [1, 1], "same", False)) if (bn): l1 = eddl.BatchNormalization(l1, 0.99, 0.001, True, "") l1 = eddl.ReLu(l1) l1 = eddl.GlorotUniform( eddl.Conv(l1, nf * 4, [1, 1], [1, 1], "same", False)) if (bn): l1 = eddl.BatchNormalization(l1, 0.99, 0.001, True, "") if (i == 0): l0 = eddl.GlorotUniform( eddl.Conv(l0, nf * 4, [1, 1], [stri, stri], "same", False)) l0 = eddl.Add([l0, l1]) l0 = eddl.ReLu(l0) return l0
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.RandomCropScale(layer, [0.9, 1.0]) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) # LR annealing if args.epochs < 4: return eddl.setlr(net, [0.005, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 4) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def Block3_2(layer, filters): layer = eddl.ReLu(eddl.BatchNormalization( eddl.Conv(layer, filters, [3, 3], [1, 1]), True )) layer = eddl.ReLu(eddl.BatchNormalization( eddl.Conv(layer, filters, [3, 3], [1, 1]), True )) return layer
def DoubleConv(x, out_channels, mid_channels=None): if not mid_channels: mid_channels = out_channels x = eddl.Conv2D(x, mid_channels, kernel_size=[3, 3]) x = eddl.BatchNormalization(x, True) x = eddl.ReLu(x) x = eddl.Conv2D(x, out_channels, kernel_size=[3, 3]) x = eddl.BatchNormalization(x, True) x = eddl.ReLu(x) return x
def UNetWithPadding(layer): x = layer depth = 32 x = LBC(x, depth, [3, 3], [1, 1], "same") x = LBC(x, depth, [3, 3], [1, 1], "same") x2 = eddl.MaxPool(x, [2, 2], [2, 2]) x2 = LBC(x2, 2*depth, [3, 3], [1, 1], "same") x2 = LBC(x2, 2*depth, [3, 3], [1, 1], "same") x3 = eddl.MaxPool(x2, [2, 2], [2, 2]) x3 = LBC(x3, 4*depth, [3, 3], [1, 1], "same") x3 = LBC(x3, 4*depth, [3, 3], [1, 1], "same") x4 = eddl.MaxPool(x3, [2, 2], [2, 2]) x4 = LBC(x4, 8*depth, [3, 3], [1, 1], "same") x4 = LBC(x4, 8*depth, [3, 3], [1, 1], "same") x5 = eddl.MaxPool(x4, [2, 2], [2, 2]) x5 = LBC(x5, 8*depth, [3, 3], [1, 1], "same") x5 = LBC(x5, 8*depth, [3, 3], [1, 1], "same") x5 = eddl.BatchNormalization(eddl.Conv( eddl.UpSampling(x5, [2, 2]), 8*depth, [3, 3], [1, 1], "same" ), True) x4 = eddl.Concat([x4, x5]) if USE_CONCAT else eddl.Add([x4, x5]) x4 = LBC(x4, 8*depth, [3, 3], [1, 1], "same") x4 = LBC(x4, 8*depth, [3, 3], [1, 1], "same") x4 = eddl.BatchNormalization(eddl.Conv( eddl.UpSampling(x4, [2, 2]), 4*depth, [3, 3], [1, 1], "same" ), True) x3 = eddl.Concat([x3, x4]) if USE_CONCAT else eddl.Add([x3, x4]) x3 = LBC(x3, 4*depth, [3, 3], [1, 1], "same") x3 = LBC(x3, 4*depth, [3, 3], [1, 1], "same") x3 = eddl.Conv( eddl.UpSampling(x3, [2, 2]), 2*depth, [3, 3], [1, 1], "same" ) x2 = eddl.Concat([x2, x3]) if USE_CONCAT else eddl.Add([x2, x3]) x2 = LBC(x2, 2*depth, [3, 3], [1, 1], "same") x2 = LBC(x2, 2*depth, [3, 3], [1, 1], "same") x2 = eddl.BatchNormalization(eddl.Conv( eddl.UpSampling(x2, [2, 2]), depth, [3, 3], [1, 1], "same" ), True) x = eddl.Concat([x, x2]) if USE_CONCAT else eddl.Add([x, x2]) x = LBC(x, depth, [3, 3], [1, 1], "same") x = LBC(x, depth, [3, 3], [1, 1], "same") x = eddl.BatchNormalization(eddl.Conv(x, 1, [1, 1]), True) return x
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), True # initialize weights to random values ) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.save_net_to_onnx_file(net, args.output) print("saved net to", args.output) print("All done")
def defblock(l, bn, nf, reps, initializer): for i in range(reps): l = initializer(eddl.Conv(l, nf, [3, 3])) if bn: l = eddl.BatchNormalization(l, 0.99, 0.001, True, "") l = eddl.ReLu(l) l = eddl.MaxPool(l, [2, 2], [2, 2], "valid") return l
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.MaxPool(Block3_2(layer, 64)) layer = eddl.MaxPool(Block3_2(layer, 128)) layer = eddl.MaxPool(Block1(Block3_2(layer, 256), 256)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.BatchNormalization(eddl.Dense(layer, 512), True)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomHorizontalFlip(layer) layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomCutout(layer, [0.1, 0.5], [0.1, 0.5]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 32, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 128, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 256, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.Reshape(layer, [-1]) layer = eddl.Activation(eddl.BatchNormalization( eddl.Dense(layer, 128), True ), "relu") out = eddl.Softmax(eddl.BatchNormalization( eddl.Dense(layer, num_classes), True )) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["softmax_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def BG(layer): return eddl.GaussianNoise(eddl.BatchNormalization(layer, True), 0.3)
bn = int(sys.argv[1]) == 1 initializer = eddl.GlorotUniform if bn else eddl.HeUniform inp = eddl.Input([3, 32, 32]) l = inp l = defblock(l, bn, 64, 2, initializer) l = defblock(l, bn, 128, 2, initializer) l = defblock(l, bn, 256, 4, initializer) l = defblock(l, bn, 512, 4, initializer) l = defblock(l, bn, 512, 4, initializer) l = eddl.Flatten(l) for i in range(2): l = initializer(eddl.Dense(l, 4096)) if (bn): l = eddl.BatchNormalization(l, 0.99, 0.001, True, "") l = eddl.ReLu(l) out = eddl.Softmax(initializer(eddl.Dense(l, num_classes))) net = eddl.Model([inp], [out]) eddl.plot(net, "model.pdf") eddl.build(net, eddl.adam(0.00001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU() if gpu else eddl.CS_CPU()) eddl.summary(net) x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin")
def SegNetBN(x, num_classes): x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"))) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"))) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"))) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"))) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"))) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"))) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"))) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"))) x = eddl.Conv(x, num_classes, [3, 3], [1, 1], "same") return x
def Normalization(layer): return eddl.BatchNormalization(layer, True)
def Block1(layer, filters): return eddl.ReLu(eddl.BatchNormalization( eddl.Conv(layer, filters, [1, 1], [1, 1]), True ))
def LBC(layer, *args, **kwargs): return eddl.LeakyReLu(eddl.BatchNormalization(eddl.Conv( layer, *args, **kwargs ), True))