def resnet_block(l0, nf, bn, reps, downsample): for i in range(reps): stri = 2 if (downsample and i == 0) else 1 l1 = eddl.GlorotUniform( eddl.Conv(l0, nf, [1, 1], [stri, stri], "same", False)) if (bn): l1 = eddl.BatchNormalization(l1, 0.99, 0.001, True, "") l1 = eddl.ReLu(l1) l1 = eddl.GlorotUniform( eddl.Conv(l1, nf, [3, 3], [1, 1], "same", False)) if (bn): l1 = eddl.BatchNormalization(l1, 0.99, 0.001, True, "") l1 = eddl.ReLu(l1) l1 = eddl.GlorotUniform( eddl.Conv(l1, nf * 4, [1, 1], [1, 1], "same", False)) if (bn): l1 = eddl.BatchNormalization(l1, 0.99, 0.001, True, "") if (i == 0): l0 = eddl.GlorotUniform( eddl.Conv(l0, nf * 4, [1, 1], [stri, stri], "same", False)) l0 = eddl.Add([l0, l1]) l0 = eddl.ReLu(l0) return l0
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.RandomCropScale(layer, [0.9, 1.0]) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) # LR annealing if args.epochs < 4: return eddl.setlr(net, [0.005, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 4) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def Block3_2(layer, filters): layer = eddl.ReLu(Normalization(eddl.Conv( layer, filters, [3, 3], [1, 1], "same", False ))) layer = eddl.ReLu(Normalization(eddl.Conv( layer, filters, [3, 3], [1, 1], "same", False ))) return layer
def Block3_2(layer, filters): layer = eddl.ReLu(eddl.BatchNormalization( eddl.Conv(layer, filters, [3, 3], [1, 1]), True )) layer = eddl.ReLu(eddl.BatchNormalization( eddl.Conv(layer, filters, [3, 3], [1, 1]), True )) return layer
def LeNet(in_layer, num_classes): x = in_layer x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 20, [5, 5])), [2, 2], [2, 2]) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 50, [5, 5])), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.ReLu(eddl.Dense(x, 500)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 32, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 64, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 128, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 256, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.GlobalMaxPool(layer) layer = eddl.Flatten(layer) layer = eddl.Activation(eddl.Dense(layer, 128), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def ResBlock(layer, filters, nconv, half): in_ = layer strides = [2, 2] if half else [1, 1] layer = eddl.ReLu(BG(eddl.Conv(layer, filters, [3, 3], strides))) for i in range(nconv - 1): layer = eddl.ReLu(BG(eddl.Conv(layer, filters, [3, 3], [1, 1]))) if (half): return eddl.Add(BG(eddl.Conv(in_, filters, [1, 1], [2, 2])), layer) else: return eddl.Add(layer, in_)
def DoubleConv(x, out_channels, mid_channels=None): if not mid_channels: mid_channels = out_channels x = eddl.Conv2D(x, mid_channels, kernel_size=[3, 3]) x = eddl.BatchNormalization(x, True) x = eddl.ReLu(x) x = eddl.Conv2D(x, out_channels, kernel_size=[3, 3]) x = eddl.BatchNormalization(x, True) x = eddl.ReLu(x) return x
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1]))) layer = eddl.Pad(layer, [0, 1, 1, 0]) layer = ResBlock(layer, 64, 2, True) layer = ResBlock(layer, 64, 2, False) layer = ResBlock(layer, 128, 2, True) layer = ResBlock(layer, 128, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(BG(eddl.Dense(layer, 512))) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def tissue_detector_DNN(): in_ = eddl.Input([3]) layer = in_ layer = eddl.ReLu(eddl.Dense(layer, 50)) layer = eddl.ReLu(eddl.Dense(layer, 50)) layer = eddl.ReLu(eddl.Dense(layer, 50)) out = eddl.Softmax(eddl.Dense(layer, 2)) net = eddl.Model([in_], [out]) return net
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) target = eddl.Reshape(in_, [1, 28, 28]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.ReLu(eddl.Conv(layer, 8, [3, 3])) layer = eddl.ReLu(eddl.Conv(layer, 16, [3, 3])) layer = eddl.ReLu(eddl.Conv(layer, 8, [3, 3])) out = eddl.Sigmoid(eddl.Conv(layer, 1, [3, 3])) net = eddl.Model([in_], []) eddl.build( net, eddl.adam(0.001), [], [], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") if args.small: x_train = x_train.select([":6000"]) x_train.div_(255.0) mse = eddl.newloss(mse_loss, [out, target], "mse_loss") dicei = eddl.newloss(dice_loss_img, [out, target], "dice_loss_img") dicep = eddl.newloss(dice_loss_pixel, [out, target], "dice_loss_pixel") batch = Tensor([args.batch_size, 784]) num_batches = x_train.shape[0] // args.batch_size for i in range(args.epochs): print("Epoch %d/%d (%d batches)" % (i + 1, args.epochs, num_batches)) diceploss = 0.0 diceiloss = 0.0 mseloss = 0 for j in range(num_batches): print("Batch %d " % j, end="", flush=True) eddl.next_batch([x_train], [batch]) eddl.zeroGrads(net) eddl.forward(net, [batch]) diceploss += eddl.compute_loss(dicep) / args.batch_size print("diceploss = %.6f " % (diceploss / (j + 1)), end="") diceiloss += eddl.compute_loss(dicei) / args.batch_size print("diceiloss = %.6f " % (diceiloss / (j + 1)), end="") mseloss += eddl.compute_loss(mse) / args.batch_size print("mseloss = %.6f\r" % (mseloss / (j + 1)), end="") eddl.optimize(dicep) eddl.update(net) print() print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 784]) # image as a 1D signal with depth 1 layer = eddl.MaxPool1D(eddl.ReLu(eddl.Conv1D(layer, 16, [3], [1])), [4], [4]) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 32, [3], [1])), [4], [4], ) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 64, [3], [1])), [4], [4], ) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 64, [3], [1])), [4], [4], ) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), True # initialize weights to random values ) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.save_net_to_onnx_file(net, args.output) print("saved net to", args.output) print("All done")
def defblock(l, bn, nf, reps, initializer): for i in range(reps): l = initializer(eddl.Conv(l, nf, [3, 3])) if bn: l = eddl.BatchNormalization(l, 0.99, 0.001, True, "") l = eddl.ReLu(l) l = eddl.MaxPool(l, [2, 2], [2, 2], "valid") return l
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomHorizontalFlip(layer) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False))) layer = eddl.Pad(layer, [0, 1, 1, 0]) for i in range(3): layer = ResBlock(layer, 64, 0, i == 0) for i in range(4): layer = ResBlock(layer, 128, i == 0) for i in range(6): layer = ResBlock(layer, 256, i == 0) for i in range(3): layer = ResBlock(layer, 512, i == 0) layer = eddl.MaxPool(layer, [4, 4]) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: # this is slow, make it really small x_train = x_train.select([":500"]) y_train = y_train.select([":500"]) x_test = x_test.select([":100"]) y_test = y_test.select([":100"]) lr = 0.01 for j in range(3): lr /= 10.0 eddl.setlr(net, [lr, 0.9]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.ReLu(eddl.L2(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1L2(eddl.Dense(layer, 1024), 0.00001, 0.0001)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): size = 256 // 2 # Conv3D expects (B, C, dim1, dim2, dim3) in_ = eddl.Input([3, 10, size, size]) layer = in_ layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D( layer, 4, [1, 3, 3], [1, 1, 1], "same" )), [1, 2, 2], [1, 2, 2], "same") layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D( layer, 8, [1, 3, 3], [1, 1, 1], "same" )), [1, 2, 2], [1, 2, 2], "same") layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D( layer, 16, [1, 3, 3], [1, 1, 1], "same" )), [1, 2, 2], [1, 2, 2], "same") layer = eddl.GlobalMaxPool3D(layer) layer = eddl.Reshape(layer, [-1]) layer = eddl.LSTM(layer, 128) layer = eddl.Dense(layer, 100) layer = eddl.ReLu(layer) layer = eddl.Dense(layer, 2) out = eddl.ReLu(layer) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(), ["mse"], ["mse"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) seqImages = Tensor.randu([32, 10, 3, 10, size, size]) seqLabels = Tensor.randu([32, 7, 2]) eddl.fit(net, [seqImages], [seqLabels], 4, 2 if args.small else 10)
def main(args): eddl.download_imdb_2000() epochs = 2 if args.small else 10 length = 250 embdim = 33 vocsize = 2000 in_ = eddl.Input([1]) # 1 word layer = in_ layer = eddl.RandomUniform(eddl.Embedding(layer, vocsize, 1, embdim), -0.05, 0.05) layer = eddl.GRU(layer, 37) layer = eddl.ReLu(eddl.Dense(layer, 256)) out = eddl.Sigmoid(eddl.Dense(layer, 1)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["cross_entropy"], ["binary_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) x_train = Tensor.load("imdb_2000_trX.bin") y_train = Tensor.load("imdb_2000_trY.bin") x_test = Tensor.load("imdb_2000_tsX.bin") y_test = Tensor.load("imdb_2000_tsY.bin") # batch x timesteps x input_dim x_train.reshape_([x_train.shape[0], length, 1]) x_test.reshape_([x_test.shape[0], length, 1]) y_train.reshape_([y_train.shape[0], 1, 1]) y_test.reshape_([y_test.shape[0], 1, 1]) if args.small: x_train = x_train.select([":64", ":", ":"]) y_train = y_train.select([":64", ":", ":"]) x_test = x_test.select([":64", ":", ":"]) y_test = y_test.select([":64", ":", ":"]) for i in range(epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test]) print("All done")
def ResBlock(layer, filters, half, expand=0): in_ = layer layer = eddl.ReLu( BG(eddl.Conv(layer, filters, [1, 1], [1, 1], "same", False))) strides = [2, 2] if half else [1, 1] layer = eddl.ReLu( BG(eddl.Conv(layer, filters, [3, 3], strides, "same", False))) layer = eddl.ReLu( BG(eddl.Conv(layer, 4 * filters, [1, 1], [1, 1], "same", False))) if (half): return eddl.ReLu( eddl.Add( BG(eddl.Conv(in_, 4 * filters, [1, 1], [2, 2], "same", False)), layer)) else: if expand: return eddl.ReLu( eddl.Add( BG( eddl.Conv(in_, 4 * filters, [1, 1], [1, 1], "same", False)), layer)) else: return eddl.ReLu(eddl.Add(in_, layer))
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), True # initialize weights to random values ) serialized_net = eddl.serialize_net_to_onnx_string(net, False) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) print("evaluating before import") eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) imported_net = eddl.import_net_from_onnx_string(serialized_net) eddl.build( imported_net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), False # do not initialize weights to random values ) eddl.summary(imported_net) print("net layers:", len(net.layers)) print("imported_net layers:", len(imported_net.layers)) print("evaluating imported net") eddl.evaluate(imported_net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomHorizontalFlip(layer) layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomCutout(layer, [0.1, 0.5], [0.1, 0.5]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 32, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 128, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 256, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.Reshape(layer, [-1]) layer = eddl.Activation(eddl.BatchNormalization( eddl.Dense(layer, 128), True ), "relu") out = eddl.Softmax(eddl.BatchNormalization( eddl.Dense(layer, num_classes), True )) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["softmax_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def Block1(layer, filters): return eddl.ReLu(eddl.BatchNormalization( eddl.Conv(layer, filters, [1, 1], [1, 1]), True ))
def SegNet(x, num_classes): x = eddl.ReLu(eddl.Conv(x, 64, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 64, [3, 3], [1, 1], "same")) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3], [1, 1], "same")) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3], [1, 1], "same")) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3], [1, 1], "same")) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3], [1, 1], "same")) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3], [1, 1], "same")) x = eddl.ReLu(eddl.Conv(x, 64, [3, 3], [1, 1], "same")) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu(eddl.Conv(x, 64, [3, 3], [1, 1], "same")) x = eddl.Conv(x, num_classes, [3, 3], [1, 1], "same") return x
def Block1(layer, filters): return eddl.ReLu(Normalization(eddl.Conv( layer, filters, [1, 1], [1, 1], "same", False )))
def VGG16(in_layer, num_classes, seed=1234, init=eddl.HeNormal, l2_reg=None, dropout=None): x = in_layer x = eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)) x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.Dense(x, 4096) if dropout: x = eddl.Dropout(x, dropout, iw=False) if l2_reg: x = eddl.L2(x, l2_reg) x = eddl.ReLu(init(x,seed)) x = eddl.Dense(x, 4096) if dropout: x = eddl.Dropout(x, dropout, iw=False) if l2_reg: x = eddl.L2(x, l2_reg) x = eddl.ReLu(init(x,seed)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x
def SegNetBN(x, num_classes): x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"), True)) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"), True)) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"), True)) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.MaxPool(x, [2, 2], [2, 2]) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 512, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"), True)) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 256, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"), True)) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 128, [3, 3], [1, 1], "same"), True)) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"), True)) x = eddl.UpSampling(x, [2, 2]) x = eddl.ReLu( eddl.BatchNormalization(eddl.Conv(x, 64, [3, 3], [1, 1], "same"), True)) x = eddl.Conv(x, num_classes, [3, 3], [1, 1], "same") return x
def VGG16(in_layer, num_classes): x = in_layer x = eddl.ReLu(eddl.Conv(x, 64, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 64, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 128, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 256, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.ReLu(eddl.Dense(x, 4096)) x = eddl.ReLu(eddl.Dense(x, 4096)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x
bn = int(sys.argv[1]) == 1 initializer = eddl.GlorotUniform if bn else eddl.HeUniform inp = eddl.Input([3, 32, 32]) l = inp l = defblock(l, bn, 64, 2, initializer) l = defblock(l, bn, 128, 2, initializer) l = defblock(l, bn, 256, 4, initializer) l = defblock(l, bn, 512, 4, initializer) l = defblock(l, bn, 512, 4, initializer) l = eddl.Flatten(l) for i in range(2): l = initializer(eddl.Dense(l, 4096)) if (bn): l = eddl.BatchNormalization(l, 0.99, 0.001, True, "") l = eddl.ReLu(l) out = eddl.Softmax(initializer(eddl.Dense(l, num_classes))) net = eddl.Model([inp], [out]) eddl.plot(net, "model.pdf") eddl.build(net, eddl.adam(0.00001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU() if gpu else eddl.CS_CPU()) eddl.summary(net) x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255)
def Block1(layer, filters): return eddl.ReLu( eddl.GroupNormalization(eddl.Conv(layer, filters, [1, 1], [1, 1]), 4))
def Block3_2(layer, filters): layer = eddl.ReLu( eddl.GroupNormalization(eddl.Conv(layer, filters, [3, 3], [1, 1]), 4)) layer = eddl.ReLu( eddl.GroupNormalization(eddl.Conv(layer, filters, [3, 3], [1, 1]), 4)) return layer