def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.RandomCropScale(layer, [0.9, 1.0]) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) # LR annealing if args.epochs < 4: return eddl.setlr(net, [0.005, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 4) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def VGG16(in_layer, num_classes, seed=1234, init=eddl.HeNormal, l2_reg=None, dropout=None): x = in_layer x = eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 64, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 128, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)) x = eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 256, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)) x = eddl.MaxPool(eddl.ReLu(init(eddl.Conv(x, 512, [3, 3]), seed)), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.Dense(x, 4096) if dropout: x = eddl.Dropout(x, dropout, iw=False) if l2_reg: x = eddl.L2(x, l2_reg) x = eddl.ReLu(init(x,seed)) x = eddl.Dense(x, 4096) if dropout: x = eddl.Dropout(x, dropout, iw=False) if l2_reg: x = eddl.L2(x, l2_reg) x = eddl.ReLu(init(x,seed)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x
def LeNet(in_layer, num_classes): x = in_layer x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 20, [5, 5])), [2, 2], [2, 2]) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 50, [5, 5])), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.ReLu(eddl.Dense(x, 500)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 32, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 64, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 128, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 256, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.GlobalMaxPool(layer) layer = eddl.Flatten(layer) layer = eddl.Activation(eddl.Dense(layer, 128), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1]))) layer = eddl.Pad(layer, [0, 1, 1, 0]) layer = ResBlock(layer, 64, 2, True) layer = ResBlock(layer, 64, 2, False) layer = ResBlock(layer, 128, 2, True) layer = ResBlock(layer, 128, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(BG(eddl.Dense(layer, 512))) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.RandomCutout(layer, [0.1, 0.3], [0.1, 0.3]) layer = eddl.MaxPool(Block3_2(layer, 64)) layer = eddl.MaxPool(Block3_2(layer, 128)) layer = eddl.MaxPool(Block1(Block3_2(layer, 256), 256)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.Reshape(layer, [-1]) layer = eddl.Activation(eddl.Dense(layer, 512), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.setlogfile(net, "vgg16") eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def tissue_detector_DNN(): in_ = eddl.Input([3]) layer = in_ layer = eddl.ReLu(eddl.Dense(layer, 50)) layer = eddl.ReLu(eddl.Dense(layer, 50)) layer = eddl.ReLu(eddl.Dense(layer, 50)) out = eddl.Softmax(eddl.Dense(layer, 2)) net = eddl.Model([in_], [out]) return net
def test_build_net(opt_cls): num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.LeakyReLu(eddl.Dense(layer, 1024)) layer = eddl.LeakyReLu(eddl.Dense(layer, 1024)) layer = eddl.LeakyReLu(eddl.Dense(layer, 1024)) out = eddl.Softmax(eddl.Dense(layer, num_classes), -1) net = eddl.Model([in_], [out]) eddl.build(net, opt_cls(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_CPU(mem="low_mem"))
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") # y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) num_samples = x_train.shape[0] num_batches = num_samples // args.batch_size test_samples = x_test.shape[0] test_batches = test_samples // args.batch_size eddl.set_mode(net, TRMODE) for i in range(args.epochs): for j in range(num_batches): print("Epoch %d/%d (batch %d/%d)" % (i + 1, args.epochs, j + 1, num_batches)) indices = np.random.randint(0, num_samples, args.batch_size) eddl.train_batch(net, [x_train], [y_train], indices) for j in range(test_batches): print("Epoch %d/%d (batch %d/%d)" % (i + 1, args.epochs, j + 1, test_batches)) indices = np.random.randint(0, num_samples, args.batch_size) eddl.eval_batch(net, [x_train], [y_train], indices) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([28]) layer = in_ layer = eddl.LeakyReLu(eddl.Dense(layer, 32)) layer = eddl.L2(eddl.LSTM(layer, 128), 0.001) ls = layer out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.reshape_([x_train.shape[0], 28, 28]) x_test.reshape_([x_test.shape[0], 28, 28]) y_train.reshape_([y_train.shape[0], 1, 10]) y_test.reshape_([y_test.shape[0], 1, 10]) x_train.div_(255.0) x_test.div_(255.0) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) ls_in = eddl.getInput(ls) ls_in.info() ls_out = eddl.getOutput(ls) ls_out.info() print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), True # initialize weights to random values ) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.save_net_to_onnx_file(net, args.output) print("saved net to", args.output) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomHorizontalFlip(layer) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False))) layer = eddl.Pad(layer, [0, 1, 1, 0]) for i in range(3): layer = ResBlock(layer, 64, 0, i == 0) for i in range(4): layer = ResBlock(layer, 128, i == 0) for i in range(6): layer = ResBlock(layer, 256, i == 0) for i in range(3): layer = ResBlock(layer, 512, i == 0) layer = eddl.MaxPool(layer, [4, 4]) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: # this is slow, make it really small x_train = x_train.select([":500"]) y_train = y_train.select([":500"]) x_test = x_test.select([":100"]) y_test = y_test.select([":100"]) lr = 0.01 for j in range(3): lr /= 10.0 eddl.setlr(net, [lr, 0.9]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_imdb_2000() epochs = 2 if args.small else 10 length = 250 embdim = 33 vocsize = 2000 in_ = eddl.Input([1]) # 1 word layer = in_ layer = eddl.RandomUniform(eddl.Embedding(layer, vocsize, 1, embdim), -0.05, 0.05) layer = eddl.GRU(layer, 37) layer = eddl.ReLu(eddl.Dense(layer, 256)) out = eddl.Sigmoid(eddl.Dense(layer, 1)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["cross_entropy"], ["binary_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) x_train = Tensor.load("imdb_2000_trX.bin") y_train = Tensor.load("imdb_2000_trY.bin") x_test = Tensor.load("imdb_2000_tsX.bin") y_test = Tensor.load("imdb_2000_tsY.bin") # batch x timesteps x input_dim x_train.reshape_([x_train.shape[0], length, 1]) x_test.reshape_([x_test.shape[0], length, 1]) y_train.reshape_([y_train.shape[0], 1, 1]) y_test.reshape_([y_test.shape[0], 1, 1]) if args.small: x_train = x_train.select([":64", ":", ":"]) y_train = y_train.select([":64", ":", ":"]) x_test = x_test.select([":64", ":", ":"]) y_test = y_test.select([":64", ":", ":"]) for i in range(epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test]) print("All done")
def VGG16(in_layer, num_classes): x = in_layer x = eddl.ReLu(eddl.Conv(x, 64, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 64, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 128, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 128, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 256, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 256, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2]) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.ReLu(eddl.Conv(x, 512, [3, 3])) x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2]) x = eddl.Reshape(x, [-1]) x = eddl.ReLu(eddl.Dense(x, 256)) x = eddl.Softmax(eddl.Dense(x, num_classes)) return x
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.ReLu(eddl.L2(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1L2(eddl.Dense(layer, 1024), 0.00001, 0.0001)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 256), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 64), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 256), "relu") out = eddl.Dense(layer, 784) net = eddl.Model([in_], [out]) mse_loss = MSELoss() mse_metric = MSEMetric() net.build( eddl.sgd(0.001, 0.9), [mse_loss], [mse_metric], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") x_train.div_(255.0) eddl.fit(net, [x_train], [x_train], args.batch_size, args.epochs) print("All done")
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 256), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 64), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 256), "relu") out = eddl.Dense(layer, 784) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["mean_squared_error"], ["mean_squared_error"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") if args.small: x_train = x_train.select([":6000"]) x_train.div_(255.0) eddl.fit(net, [x_train], [x_train], args.batch_size, args.epochs) tout = eddl.predict(net, [x_train]) tout[0].info() print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 784]) # image as a 1D signal with depth 1 layer = eddl.MaxPool1D(eddl.ReLu(eddl.Conv1D(layer, 16, [3], [1])), [4], [4]) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 32, [3], [1])), [4], [4], ) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 64, [3], [1])), [4], [4], ) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 64, [3], [1])), [4], [4], ) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): size = 256 // 2 # Conv3D expects (B, C, dim1, dim2, dim3) in_ = eddl.Input([3, 10, size, size]) layer = in_ layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D( layer, 4, [1, 3, 3], [1, 1, 1], "same" )), [1, 2, 2], [1, 2, 2], "same") layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D( layer, 8, [1, 3, 3], [1, 1, 1], "same" )), [1, 2, 2], [1, 2, 2], "same") layer = eddl.MaxPool3D(eddl.ReLu(eddl.Conv3D( layer, 16, [1, 3, 3], [1, 1, 1], "same" )), [1, 2, 2], [1, 2, 2], "same") layer = eddl.GlobalMaxPool3D(layer) layer = eddl.Reshape(layer, [-1]) layer = eddl.LSTM(layer, 128) layer = eddl.Dense(layer, 100) layer = eddl.ReLu(layer) layer = eddl.Dense(layer, 2) out = eddl.ReLu(layer) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(), ["mse"], ["mse"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) seqImages = Tensor.randu([32, 10, 3, 10, size, size]) seqLabels = Tensor.randu([32, 7, 2]) eddl.fit(net, [seqImages], [seqLabels], 4, 2 if args.small else 10)
def main(args): eddl.download_eutrans() epochs = 1 if args.small else 5 ilength = 30 olength = 30 invs = 687 outvs = 514 embedding = 64 # Encoder in_ = eddl.Input([1]) # 1 word layer = in_ lE = eddl.RandomUniform( eddl.Embedding(layer, invs, 1, embedding, True), -0.05, 0.05 ) enc = eddl.LSTM(lE, 128, True) cps = eddl.GetStates(enc) # Decoder ldin = eddl.Input([outvs]) ld = eddl.ReduceArgMax(ldin, [0]) ld = eddl.RandomUniform( eddl.Embedding(ld, outvs, 1, embedding), -0.05, 0.05 ) layer = eddl.LSTM([ld, cps], 128) out = eddl.Softmax(eddl.Dense(layer, outvs)) eddl.setDecoder(ldin) net = eddl.Model([in_], [out]) # Build model eddl.build( net, eddl.adam(0.01), ["softmax_cross_entropy"], ["accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) # Load dataset x_train = Tensor.load("eutrans_trX.bin") y_train = Tensor.load("eutrans_trY.bin") y_train = Tensor.onehot(y_train, outvs) # batch x timesteps x input_dim x_train.reshape_([x_train.shape[0], ilength, 1]) # batch x timesteps x ouput_dim y_train.reshape_([y_train.shape[0], olength, outvs]) x_test = Tensor.load("eutrans_tsX.bin") y_test = Tensor.load("eutrans_tsY.bin") y_test = Tensor.onehot(y_test, outvs) # batch x timesteps x input_dim x_test.reshape_([x_test.shape[0], ilength, 1]) # batch x timesteps x ouput_dim y_test.reshape_([y_test.shape[0], olength, outvs]) if args.small: sel = [f":{3 * args.batch_size}", ":", ":"] x_train = x_train.select(sel) y_train = y_train.select(sel) x_test = x_test.select(sel) y_test = y_test.select(sel) # Train model ybatch = Tensor([args.batch_size, olength, outvs]) eddl.next_batch([y_train], [ybatch]) for i in range(epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) print("All done")
def main(args): batch_size = args.batch_size image_size = args.size, args.size if args.weights: os.makedirs(args.weights, exist_ok=True) training_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(image_size, ecvl.InterpolationType.cubic), ecvl.AugMirror(.5), ecvl.AugFlip(.5), ecvl.AugRotate([-180, 180]), ecvl.AugAdditivePoissonNoise([0, 10]), ecvl.AugGammaContrast([0.5, 1.5]), ecvl.AugGaussianBlur([0, 0.8]), ecvl.AugCoarseDropout([0, 0.03], [0.02, 0.05], 0.25), ecvl.AugToFloat32(255), ]) validation_test_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(image_size), ecvl.AugToFloat32(255), ]) dataset_augs = ecvl.DatasetAugmentations( [training_augs, validation_test_augs, validation_test_augs]) print('Reading dataset') d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs, ctype=ecvl.ColorType.RGB) num_classes = len(d.classes_) size = d.n_channels_, args.size, args.size if args.ckpts: net = eddl.import_net_from_onnx_file(args.ckpts, size) else: model_path = utils.DownloadModel(classification_zoo[args.model]['url'], f'{args.model}.onnx', 'model_onnx') net = eddl.import_net_from_onnx_file(model_path, size) eddl.removeLayer(net, classification_zoo[args.model] ['to_remove']) # remove last Linear of resnet top = eddl.getLayer( net, classification_zoo[args.model]['top']) # get flatten of resnet out = eddl.Softmax(eddl.Dense(top, num_classes, True, 'classifier')) # true is for the bias data_input = eddl.getLayer( net, classification_zoo[args.model]['input']) # input of the onnx net = eddl.Model([data_input], [out]) eddl.build( net, eddl.adam(args.learning_rate), ['softmax_cross_entropy'], ['accuracy'], eddl.CS_GPU(args.gpu, mem="low_mem") if args.gpu else eddl.CS_CPU(), False) out = eddl.getOut(net)[0] if not args.ckpts: eddl.initializeLayer(net, "classifier") eddl.summary(net) eddl.setlogfile(net, 'skin_lesion_classification') x = Tensor([batch_size, *size]) y = Tensor([batch_size, num_classes]) metric_fn = eddl.getMetric('accuracy') best_accuracy = 0. if args.train: num_samples_train = len(d.GetSplit()) num_batches_train = num_samples_train // args.batch_size num_samples_val = len(d.GetSplit(ecvl.SplitType.validation)) num_batches_val = num_samples_val // args.batch_size print('Starting training') for e in range(args.epochs): if args.out_dir: current_path = os.path.join(args.out_dir, f'Epoch_{e}') for c in d.classes_: c_dir = os.path.join(current_path, c) os.makedirs(c_dir, exist_ok=True) d.SetSplit(ecvl.SplitType.training) eddl.reset_loss(net) s = d.GetSplit() random.shuffle(s) d.split_.training_ = s d.ResetAllBatches() for b in range(num_batches_train): d.LoadBatch(x, y) eddl.train_batch(net, [x], [y]) losses = eddl.get_losses(net) metrics = eddl.get_metrics(net) print( f'Train - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_train}]' f' - loss={losses[0]:.3f} - accuracy={metrics[0]:.3f}', flush=True) d.SetSplit(ecvl.SplitType.validation) values = np.zeros(num_batches_val) eddl.reset_loss(net) for b in range(num_batches_val): n = 0 d.LoadBatch(x, y) eddl.forward(net, [x]) output = eddl.getOutput(out) value = metric_fn.value(y, output) values[b] = value if args.out_dir: for k in range(args.batch_size): result = output.select([str(k)]) target = y.select([str(k)]) result_a = np.array(result, copy=False) target_a = np.array(target, copy=False) classe = np.argmax(result_a).item() gt_class = np.argmax(target_a).item() single_image = x.select([str(k)]) img_t = ecvl.TensorToView(single_image) img_t.colortype_ = ecvl.ColorType.BGR single_image.mult_(255.) filename = d.samples_[d.GetSplit()[n]].location_[0] head, tail = os.path.splitext( os.path.basename(filename)) bname = '{}_gt_class_{}.png'.format(head, gt_class) cur_path = os.path.join(current_path, d.classes_[classe], bname) ecvl.ImWrite(cur_path, img_t) n += 1 print( f'Validation - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_val}] -' f' accuracy={np.mean(values[:b + 1] / batch_size):.3f}') last_accuracy = np.mean(values / batch_size) print( f'Validation - epoch [{e + 1}/{args.epochs}] - total accuracy={last_accuracy:.3f}' ) if last_accuracy > best_accuracy: best_accuracy = last_accuracy print('Saving weights') eddl.save_net_to_onnx_file( net, f'isic_classification_{args.model}_epoch_{e + 1}.onnx') elif args.test: d.SetSplit(ecvl.SplitType.test) num_samples_test = len(d.GetSplit()) num_batches_test = num_samples_test // batch_size values = np.zeros(num_batches_test) eddl.reset_loss(net) for b in range(num_batches_test): d.LoadBatch(x, y) eddl.forward(net, [x]) output = eddl.getOutput(out) value = metric_fn.value(y, output) values[b] = value if args.out_dir: n = 0 for k in range(args.batch_size): result = output.select([str(k)]) target = y.select([str(k)]) result_a = np.array(result, copy=False) target_a = np.array(target, copy=False) classe = np.argmax(result_a).item() gt_class = np.argmax(target_a).item() single_image = x.select([str(k)]) img_t = ecvl.TensorToView(single_image) img_t.colortype_ = ecvl.ColorType.BGR single_image.mult_(255.) filename = d.samples_[d.GetSplit()[n]].location_[0] head, tail = os.path.splitext(os.path.basename(filename)) bname = "%s_gt_class_%s.png" % (head, gt_class) cur_path = os.path.join(args.out_dir, d.classes_[classe], bname) ecvl.ImWrite(cur_path, img_t) n += 1 print( f'Test - batch [{b + 1}/{num_batches_test}] - accuracy={np.mean(values[:b + 1] / batch_size):.3f}' ) print(f'Test - total accuracy={np.mean(values / batch_size):.3f}')
def main(args): eddl.download_flickr() epochs = 2 if args.small else 50 olength = 20 outvs = 2000 embdim = 32 # True: remove last layers and set new top = flatten # new input_size: [3, 256, 256] (from [224, 224, 3]) net = eddl.download_resnet18(True, [3, 256, 256]) lreshape = eddl.getLayer(net, "top") # create a new model from input output image_in = eddl.getLayer(net, "input") # Decoder ldecin = eddl.Input([outvs]) ldec = eddl.ReduceArgMax(ldecin, [0]) ldec = eddl.RandomUniform(eddl.Embedding(ldec, outvs, 1, embdim, True), -0.05, 0.05) ldec = eddl.Concat([ldec, lreshape]) layer = eddl.LSTM(ldec, 512, True) out = eddl.Softmax(eddl.Dense(layer, outvs)) eddl.setDecoder(ldecin) net = eddl.Model([image_in], [out]) # Build model eddl.build( net, eddl.adam(0.01), ["softmax_cross_entropy"], ["accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) # Load dataset x_train = Tensor.load("flickr_trX.bin", "bin") y_train = Tensor.load("flickr_trY.bin", "bin") if args.small: x_train = x_train.select([f"0:{2 * args.batch_size}", ":", ":", ":"]) y_train = y_train.select([f"0:{2 * args.batch_size}", ":"]) xtrain = Tensor.permute(x_train, [0, 3, 1, 2]) y_train = Tensor.onehot(y_train, outvs) # batch x timesteps x input_dim y_train.reshape_([y_train.shape[0], olength, outvs]) eddl.fit(net, [xtrain], [y_train], args.batch_size, epochs) eddl.save(net, "img2text.bin", "bin") print("\n === INFERENCE ===\n") # Get all the reshapes of the images. Only use the CNN timage = Tensor([x_train.shape[0], 512]) # images reshape cnn = eddl.Model([image_in], [lreshape]) eddl.build( cnn, eddl.adam(0.001), # not relevant ["mse"], # not relevant ["mse"], # not relevant eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(cnn) # forward images xbatch = Tensor([args.batch_size, 3, 256, 256]) # numbatches = x_train.shape[0] / args.batch_size j = 0 eddl.next_batch([x_train], [xbatch]) eddl.forward(cnn, [xbatch]) ybatch = eddl.getOutput(lreshape) sample = str(j * args.batch_size) + ":" + str((j + 1) * args.batch_size) timage.set_select([sample, ":"], ybatch) # Create Decoder non recurrent for n-best ldecin = eddl.Input([outvs]) image = eddl.Input([512]) lstate = eddl.States([2, 512]) ldec = eddl.ReduceArgMax(ldecin, [0]) ldec = eddl.RandomUniform(eddl.Embedding(ldec, outvs, 1, embdim), -0.05, 0.05) ldec = eddl.Concat([ldec, image]) lstm = eddl.LSTM([ldec, lstate], 512, True) lstm.isrecurrent = False # Important out = eddl.Softmax(eddl.Dense(lstm, outvs)) decoder = eddl.Model([ldecin, image, lstate], [out]) eddl.build( decoder, eddl.adam(0.001), # not relevant ["softmax_cross_entropy"], # not relevant ["accuracy"], # not relevant eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(decoder) # Copy params from trained net eddl.copyParam(eddl.getLayer(net, "LSTM1"), eddl.getLayer(decoder, "LSTM2")) eddl.copyParam(eddl.getLayer(net, "dense1"), eddl.getLayer(decoder, "dense2")) eddl.copyParam(eddl.getLayer(net, "embedding1"), eddl.getLayer(decoder, "embedding2")) # N-best for sample s s = 1 if args.small else 100 # sample 100 # three input tensors with batch_size = 1 (one sentence) treshape = timage.select([str(s), ":"]) text = y_train.select([str(s), ":", ":"]) # 1 x olength x outvs for j in range(olength): print(f"Word: {j}") word = None if j == 0: word = Tensor.zeros([1, outvs]) else: word = text.select(["0", str(j - 1), ":"]) word.reshape_([1, outvs]) # batch = 1 treshape.reshape_([1, 512]) # batch = 1 state = Tensor.zeros([1, 2, 512]) # batch = 1 input_ = [word, treshape, state] eddl.forward(decoder, input_) # outword = eddl.getOutput(out) vstates = eddl.getStates(lstm) for i in range(len(vstates)): vstates[i].reshape_([1, 1, 512]) state.set_select([":", str(i), ":"], vstates[i]) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["softmax_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") eddl.setlogfile(net, "mnist") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) s = x_train.shape num_batches = s[0] // args.batch_size for i in range(args.epochs): eddl.reset_loss(net) print("Epoch %d/%d (%d batches)" % (i + 1, args.epochs, num_batches)) for j in range(num_batches): indices = np.random.randint(0, s[0], args.batch_size) eddl.train_batch(net, [x_train], [y_train], indices) losses1 = eddl.get_losses(net) metrics1 = eddl.get_metrics(net) for l, m in zip(losses1, metrics1): print("Loss: %.6f\tMetric: %.6f" % (l, m)) s = x_test.shape num_batches = s[0] // args.batch_size for j in range(num_batches): indices = np.arange(j * args.batch_size, j * args.batch_size + args.batch_size) eddl.eval_batch(net, [x_test], [y_test], indices) losses2 = eddl.get_losses(net) metrics2 = eddl.get_metrics(net) for l, m in zip(losses2, metrics2): print("Loss: %.6f\tMetric: %.6f" % (l, m)) last_batch_size = s[0] % args.batch_size if last_batch_size: indices = np.arange(j * args.batch_size, j * args.batch_size + args.batch_size) eddl.eval_batch(net, [x_test], [y_test], indices) losses3 = eddl.get_losses(net) metrics3 = eddl.get_metrics(net) for l, m in zip(losses3, metrics3): print("Loss: %.6f\tMetric: %.6f" % (l, m)) print("All done")
batch_size = 50 num_classes = 10 bn = int(sys.argv[1]) == 1 initializer = eddl.GlorotUniform if bn else eddl.HeUniform inp = eddl.Input([3, 32, 32]) l = inp l = defblock(l, bn, 64, 2, initializer) l = defblock(l, bn, 128, 2, initializer) l = defblock(l, bn, 256, 4, initializer) l = defblock(l, bn, 512, 4, initializer) l = defblock(l, bn, 512, 4, initializer) l = eddl.Flatten(l) for i in range(2): l = initializer(eddl.Dense(l, 4096)) if (bn): l = eddl.BatchNormalization(l, 0.99, 0.001, True, "") l = eddl.ReLu(l) out = eddl.Softmax(initializer(eddl.Dense(l, num_classes))) net = eddl.Model([inp], [out]) eddl.plot(net, "model.pdf") eddl.build(net, eddl.adam(0.00001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU() if gpu else eddl.CS_CPU()) eddl.summary(net)
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), True # initialize weights to random values ) serialized_net = eddl.serialize_net_to_onnx_string(net, False) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) print("evaluating before import") eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) imported_net = eddl.import_net_from_onnx_string(serialized_net) eddl.build( imported_net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), False # do not initialize weights to random values ) eddl.summary(imported_net) print("net layers:", len(net.layers)) print("imported_net layers:", len(imported_net.layers)) print("evaluating imported net") eddl.evaluate(imported_net, [x_test], [y_test], bs=args.batch_size) print("All done")
batch_size = 100 num_classes = 10 bn = int(sys.argv[1]) == 1 inp = eddl.Input([3, 32, 32]) l = inp l = defblock(l, bn, 64, 2) l = defblock(l, bn, 128, 2) l = defblock(l, bn, 256, 3) l = defblock(l, bn, 512, 3) l = defblock(l, bn, 512, 3) l = eddl.Flatten(l) for i in range(2): l = eddl.GlorotUniform(eddl.Dense(l, 4096)) if(bn): l = eddl.BatchNormalization(l, 0.99, 0.001, True, "") l = eddl.ReLu(l) out = eddl.Softmax(eddl.GlorotUniform(eddl.Dense(l, num_classes))) net = eddl.Model([inp], [out]) eddl.plot(net, "model.pdf") eddl.build(net, eddl.adam(0.0001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU() if gpu else eddl.CS_CPU() )
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomHorizontalFlip(layer) layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomCutout(layer, [0.1, 0.5], [0.1, 0.5]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 32, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 128, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(eddl.BatchNormalization( eddl.HeUniform(eddl.Conv(layer, 256, [3, 3], [1, 1], "same", False)), True)), [2, 2]) layer = eddl.Reshape(layer, [-1]) layer = eddl.Activation(eddl.BatchNormalization( eddl.Dense(layer, 128), True ), "relu") out = eddl.Softmax(eddl.BatchNormalization( eddl.Dense(layer, num_classes), True )) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["softmax_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): freeze_epochs = 2 unfreeze_epochs = 5 num_classes = 10 # 10 labels in cifar10 eddl.download_cifar10() eddl.download_model("resnet18.onnx", "re7jodd12srksd7") net = eddl.import_net_from_onnx_file("resnet18.onnx", [3, 32, 32], DEV_CPU) names = [_.name for _ in net.layers] # Remove dense output layer eddl.removeLayer(net, "resnetv15_dense0_fwd") # Get last layer to connect the new dense layer = eddl.getLayer(net, "flatten_170") out = eddl.Softmax(eddl.Dense(layer, num_classes, True, "new_dense")) # Get input layer in_ = eddl.getLayer(net, "data") # Create a new model net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.0001), ["softmax_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), False # do not initialize weights to random values ) eddl.summary(net) # Force initialization of new layers eddl.initializeLayer(net, "new_dense") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") if args.small: sel = [f":{2 * args.batch_size}"] x_train = x_train.select(sel) y_train = y_train.select(sel) x_test = x_test.select(sel) y_test = y_test.select(sel) x_train.div_(255.0) x_test.div_(255.0) # Freeze pretrained weights for n in names: eddl.setTrainable(net, n, False) # Train new layers eddl.fit(net, [x_train], [y_train], args.batch_size, freeze_epochs) # Unfreeze weights for n in names: eddl.setTrainable(net, n, True) # Train all layers eddl.fit(net, [x_train], [y_train], args.batch_size, unfreeze_epochs) # Evaluate eddl.evaluate(net, [x_test], [y_test], args.batch_size) print("All done")