def reset_eddl_net_params(net, weight='zeros', bias='zeros'): layers = net.layers for l in layers: name = l.name params = l.params w_np = None b_np = None for index, p in enumerate(params): if index == 0: if weight == 'zeros': w_np = np.zeros_like(p.getdata()) else: w_np = np.ones_like(p.getdata()) if index == 1: if bias == 'zeros': b_np = np.zeros_like(p.getdata()) else: b_np = np.ones_like(p.getdata()) w_np_t = Tensor.fromarray(w_np) b_np_t = Tensor.fromarray(b_np) # Update of the parameters l.update_weights(w_np_t, b_np_t) eddl.distributeParams(l)
def main(args): if not os.path.isfile(args.input): raise RuntimeError("input file '%s' not found" % args.input) eddl.download_mnist() print("importing net from", args.input) net = eddl.import_net_from_onnx_file(args.input) print("input.shape:", net.layers[0].input.shape) print("output size =", len(net.lout)) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), False # do not initialize weights to random values ) net.resize(args.batch_size) # resize manually since we don't use "fit" eddl.summary(net) x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") x_test.div_(255.0) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def eddl_load_data_cifar(path=None, download=False, load_subset=False, subset_size=4096): """ :param path: Path to CIFAR10 dataset :param download: Default-False - download the dataset to current directory. :param load_subset: Default-False - use subset of the dataset for testing purpose. :param subset_size: Default-4096, the test size is subset_size/4. :return: two tuples of (x_train, y_train), (x_test, y_test) . """ (x_train, y_train), (x_test, y_test) = resnet.load_cifar(path, download) if load_subset: x_subset_train = Tensor([subset_size, 3, 32, 32]) y_subset_train = Tensor([subset_size, 10]) eddl.next_batch([x_train], [x_subset_train]) eddl.next_batch([y_train], [y_subset_train]) x_subset_test = Tensor([math.ceil(subset_size / 4), 3, 32, 32]) y_subset_test = Tensor([math.ceil(subset_size / 4), 10]) eddl.next_batch([x_test], [x_subset_test]) eddl.next_batch([y_test], [y_subset_test]) return (x_subset_train, y_subset_train), (x_subset_test, y_subset_test) return (x_train, y_train), (x_test, y_test)
def main(args): img = ecvl.ImRead(args.in_img) augs = ecvl.SequentialAugmentationContainer([ ecvl.AugRotate([-5, 5]), ecvl.AugMirror(.5), ecvl.AugFlip(.5), ecvl.AugGammaContrast([3, 5]), ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]), ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0.5), ecvl.AugAdditivePoissonNoise([0, 40]), ecvl.AugResizeDim([500, 500]), ]) ecvl.AugmentationParam.SetSeed(0) augs.Apply(img) print("Executing ImageToTensor") t = ecvl.ImageToTensor(img) t.div_(128) t.mult_(128) print("Executing TensorToImage") img = ecvl.TensorToImage(t) print("Executing TensorToView") ecvl.TensorToView(t) _ = ecvl.AugmentationFactory.create(AUG_TXT) training_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugRotate([-5, 5]), ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]), ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0), ecvl.AugAdditivePoissonNoise([0, 40]), ecvl.AugResizeDim([30, 30]), ]) test_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim([30, 30]), ]) ds_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs]) batch_size = 64 print("Creating a DLDataset") d = ecvl.DLDataset(args.in_ds, batch_size, ds_augs, ecvl.ColorType.GRAY) print("Create x and y") x = Tensor( [batch_size, d.n_channels_, d.resize_dims_[0], d.resize_dims_[1]]) y = Tensor([batch_size, len(d.classes_)]) # Load a batch of d.batch_size_ images into x and corresponding labels # into y. Images are resized to the dimensions specified in the # augmentations chain print("Executing LoadBatch on training set") d.LoadBatch(x, y) # Change colortype and channels img = ecvl.TensorToImage(x) img.colortype_ = ecvl.ColorType.GRAY img.channels_ = "xyc" # Switch to Test split and load a batch of images print("Executing LoadBatch on test set") d.SetSplit(ecvl.SplitType.test) d.LoadBatch(x, y)
def load_cifar(path=None, download=False): if path is None: path = "./" if download: eddl.download_cifar10() print(path) try: x_train = Tensor.load(path + "/cifar_trX.bin") y_train = Tensor.load(path + "/cifar_trY.bin") except: print( "Fail to load the train set, make sure you supply the correct path" ) exit() try: x_test = Tensor.load(path + "/cifar_tsX.bin") y_test = Tensor.load(path + "/cifar_tsY.bin") except: print( "Fail to load the test set, make sure you supply the correct path") exit() x_train.div_(255.0) x_test.div_(255.0) return (x_train, y_train), (x_test, y_test)
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.RandomCropScale(layer, [0.9, 1.0]) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) # LR annealing if args.epochs < 4: return eddl.setlr(net, [0.005, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 4) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def read_input(filename, split_ratio=0.7): data = np.load(filename)['d'] # shuffle data sel_index = np.arange(data.shape[0]) np.random.shuffle(sel_index) shuffled_data = data[sel_index] shuffled_data = np.c_[shuffled_data, np.zeros(shuffled_data.shape[0])] # Add column for two class labels shuffled_data[:,4][shuffled_data[:,3] == 0] = 1. ## [1] -> [1 0], [0]-->[0 1] shuffled_data[:,[3,4]] = shuffled_data[:,[4,3]] ## Swap last two columns [1] --> [0 1], [0] --> [1 0] # Split train test n_train = int (shuffled_data.shape[0] * split_ratio ) train = shuffled_data[0:n_train] test = shuffled_data[n_train:] x_trn = train[:,:3] y_trn = train[:,3:] x_test = test[:,:3] y_test = test[:,3:] # Tensor creation x_train_t = Tensor.fromarray(x_trn.astype(np.float32)) y_train_t = Tensor.fromarray(y_trn.astype(np.float32)) x_test_t = Tensor.fromarray(x_test.astype(np.float32)) y_test_t = Tensor.fromarray(y_test.astype(np.float32)) return x_train_t, y_train_t, x_test_t, y_test_t
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomHorizontalFlip(layer) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False))) layer = eddl.Pad(layer, [0, 1, 1, 0]) for i in range(3): layer = ResBlock(layer, 64, 0, i == 0) for i in range(4): layer = ResBlock(layer, 128, i == 0) for i in range(6): layer = ResBlock(layer, 256, i == 0) for i in range(3): layer = ResBlock(layer, 512, i == 0) layer = eddl.MaxPool(layer, [4, 4]) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: # this is slow, make it really small x_train = x_train.select([":500"]) y_train = y_train.select([":500"]) x_test = x_test.select([":100"]) y_test = y_test.select([":100"]) lr = 0.01 for j in range(3): lr /= 10.0 eddl.setlr(net, [lr, 0.9]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def update_eddl_net_params(keras_params_d, net, include_top=True): if include_top: layers_l = [(l.name, l.params[0].getShape(), l.params[1].getShape(), l) for l in net.layers if 'conv' in l.name or 'dense' in l.name] keras_l = [k for k in sorted(keras_params_d.keys())] else: layers_l = [(l.name, l.params[0].getShape(), l.params[1].getShape(), l) for l in net.layers if 'conv' in l.name] keras_l = [k for k in sorted(keras_params_d.keys()) if 'conv' in k] for index, k in enumerate(keras_l): w_np = keras_params_d[k]['w'] b_np = keras_params_d[k]['b'] l = layers_l[index] # Transpose to match eddl tensor shape for convolutional layer if 'conv' in l[0]: print("Conv before transpose", w_np.shape) w_np = np.transpose(w_np, (3, 2, 0, 1)) # dense layer immediatly after the flattening. he order of weight is different because previous feature maps are channel last in Keras # but EDDL expects as they are channel first if l[0] == 'dense1': x = keras_params_d[keras_l[index - 1]]['w'].shape[0] y = keras_params_d[keras_l[index - 1]]['w'].shape[1] n_ch = keras_params_d[keras_l[index - 1]]['w'].shape[3] print('After flattening. #Channels of previous layers is: %d' % n_ch) # Converting w_np as the previous feature maps was channel first outputs = w_np.shape[1] print(w_np.shape) w_np_ch_f = np.zeros_like(w_np) for o in range(outputs): for offset in range(n_ch): lll = w_np[offset::n_ch, o].shape[0] w_np_ch_f[offset:offset + lll, o] = w_np[offset::n_ch, o] # Shapes check eddl_w_shape = np.array(l[1]) eddl_b_shape = np.array(l[2]) print(l[0], k) print(eddl_w_shape, w_np.shape) print(eddl_b_shape, b_np.shape) # converting numpy arrays to tensor w_np_t = Tensor.fromarray(w_np) b_np_t = Tensor.fromarray(b_np) # Update of the parameters l[3].update_weights(w_np_t, b_np_t) eddl.distributeParams(l[3])
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 32, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 64, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 128, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.MaxPool(eddl.ReLu(Normalization( eddl.Conv(layer, 256, [3, 3], [1, 1]) )), [2, 2]) layer = eddl.GlobalMaxPool(layer) layer = eddl.Flatten(layer) layer = eddl.Activation(eddl.Dense(layer, 128), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1]))) layer = eddl.Pad(layer, [0, 1, 1, 0]) layer = ResBlock(layer, 64, 2, True) layer = ResBlock(layer, 64, 2, False) layer = ResBlock(layer, 128, 2, True) layer = ResBlock(layer, 128, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(BG(eddl.Dense(layer, 512))) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def test_py_metric(): shape = [8, 10] a = np.random.random(shape).astype(np.float32) b = np.random.random(shape).astype(np.float32) t, y = Tensor.fromarray(a), Tensor.fromarray(b) v = MSEMetric().value(t, y) exp_v = eddl.getMetric("mse").value(t, y) assert v == pytest.approx(exp_v) v = CategoricalAccuracy().value(t, y) exp_v = eddl.getMetric("categorical_accuracy").value(t, y) assert v == pytest.approx(exp_v)
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.RandomCutout(layer, [0.1, 0.3], [0.1, 0.3]) layer = eddl.MaxPool(Block3_2(layer, 64)) layer = eddl.MaxPool(Block3_2(layer, 128)) layer = eddl.MaxPool(Block1(Block3_2(layer, 256), 256)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.Reshape(layer, [-1]) layer = eddl.Activation(eddl.Dense(layer, 512), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.setlogfile(net, "vgg16") eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) target = eddl.Reshape(in_, [1, 28, 28]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.ReLu(eddl.Conv(layer, 8, [3, 3])) layer = eddl.ReLu(eddl.Conv(layer, 16, [3, 3])) layer = eddl.ReLu(eddl.Conv(layer, 8, [3, 3])) out = eddl.Sigmoid(eddl.Conv(layer, 1, [3, 3])) net = eddl.Model([in_], []) eddl.build( net, eddl.adam(0.001), [], [], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") if args.small: x_train = x_train.select([":6000"]) x_train.div_(255.0) mse = eddl.newloss(mse_loss, [out, target], "mse_loss") dicei = eddl.newloss(dice_loss_img, [out, target], "dice_loss_img") dicep = eddl.newloss(dice_loss_pixel, [out, target], "dice_loss_pixel") batch = Tensor([args.batch_size, 784]) num_batches = x_train.shape[0] // args.batch_size for i in range(args.epochs): print("Epoch %d/%d (%d batches)" % (i + 1, args.epochs, num_batches)) diceploss = 0.0 diceiloss = 0.0 mseloss = 0 for j in range(num_batches): print("Batch %d " % j, end="", flush=True) eddl.next_batch([x_train], [batch]) eddl.zeroGrads(net) eddl.forward(net, [batch]) diceploss += eddl.compute_loss(dicep) / args.batch_size print("diceploss = %.6f " % (diceploss / (j + 1)), end="") diceiloss += eddl.compute_loss(dicei) / args.batch_size print("diceiloss = %.6f " % (diceiloss / (j + 1)), end="") mseloss += eddl.compute_loss(mse) / args.batch_size print("mseloss = %.6f\r" % (mseloss / (j + 1)), end="") eddl.optimize(dicep) eddl.update(net) print() print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 784]) # image as a 1D signal with depth 1 layer = eddl.MaxPool1D(eddl.ReLu(eddl.Conv1D(layer, 16, [3], [1])), [4], [4]) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 32, [3], [1])), [4], [4], ) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 64, [3], [1])), [4], [4], ) layer = eddl.MaxPool1D( eddl.ReLu(eddl.Conv1D(layer, 64, [3], [1])), [4], [4], ) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") # y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) num_samples = x_train.shape[0] num_batches = num_samples // args.batch_size test_samples = x_test.shape[0] test_batches = test_samples // args.batch_size eddl.set_mode(net, TRMODE) for i in range(args.epochs): for j in range(num_batches): print("Epoch %d/%d (batch %d/%d)" % (i + 1, args.epochs, j + 1, num_batches)) indices = np.random.randint(0, num_samples, args.batch_size) eddl.train_batch(net, [x_train], [y_train], indices) for j in range(test_batches): print("Epoch %d/%d (batch %d/%d)" % (i + 1, args.epochs, j + 1, test_batches)) indices = np.random.randint(0, num_samples, args.batch_size) eddl.eval_batch(net, [x_train], [y_train], indices) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.BatchNormalization(layer, True) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), True # initialize weights to random values ) eddl.summary(net) x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.save_net_to_onnx_file(net, args.output) print("saved net to", args.output) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([28]) layer = in_ layer = eddl.LeakyReLu(eddl.Dense(layer, 32)) layer = eddl.L2(eddl.LSTM(layer, 128), 0.001) ls = layer out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.rmsprop(0.001), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.reshape_([x_train.shape[0], 28, 28]) x_test.reshape_([x_test.shape[0], 28, 28]) y_train.reshape_([y_train.shape[0], 1, 10]) y_test.reshape_([y_test.shape[0], 1, 10]) x_train.div_(255.0) x_test.div_(255.0) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) ls_in = eddl.getInput(ls) ls_in.info() ls_out = eddl.getOutput(ls) ls_out.info() print("All done")
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 256), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 64), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 256), "relu") out = eddl.Dense(layer, 784) net = eddl.Model([in_], [out]) mse_loss = MSELoss() mse_metric = MSEMetric() net.build( eddl.sgd(0.001, 0.9), [mse_loss], [mse_metric], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") x_train.div_(255.0) eddl.fit(net, [x_train], [x_train], args.batch_size, args.epochs) print("All done")
def main(args): in_channels = 3 in_height = 224 in_width = 224 print("Importing ONNX model") net = eddl.import_net_from_onnx_file(args.model_fn, [in_channels, in_height, in_width]) # Add a softmax layer to get probabilities directly from the model input_ = net.lin[0] # getLayer(net,"input_layer_name") output = net.lout[0] # getLayer(net,"output_layer_name") new_output = eddl.Softmax(output) net = eddl.Model([input_], [new_output]) eddl.build( net, eddl.adam(0.001), # not used for prediction ["softmax_cross_entropy"], # not used for prediction ["categorical_accuracy"], # not used for prediction eddl.CS_GPU() if args.gpu else eddl.CS_CPU(), False # Disable model initialization, we want to use the ONNX weights ) eddl.summary(net) image = Tensor.load(args.img_fn) image_preprocessed = preprocess_input_resnet34(image, [in_height, in_width]) outputs = eddl.predict(net, [image_preprocessed]) print("Reading class names...") with open(args.classes_fn, "rt") as f: class_names = [_.strip() for _ in f] print("Top 5 predictions:") print(eddl.get_topk_predictions(outputs[0], class_names, 5))
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 256), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 64), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 256), "relu") out = eddl.Dense(layer, 784) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["mean_squared_error"], ["mean_squared_error"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") if args.small: x_train = x_train.select([":6000"]) x_train.div_(255.0) eddl.fit(net, [x_train], [x_train], args.batch_size, args.epochs) tout = eddl.predict(net, [x_train]) tout[0].info() print("All done")
def main(args): if not os.path.isfile(args.input): raise RuntimeError("input file '%s' not found" % args.input) eddl.download_mnist() net = eddl.import_net_from_onnx_file(args.input) eddl.build( net, eddl.rmsprop(0.01), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem), False # do not initialize weights to random values ) net.resize(args.batch_size) # resize manually since we don't use "fit" eddl.summary(net) x_test = Tensor.load("mnist_tsX.bin") x_test.div_(255.0) sys.stderr.write("forward...\n") eddl.forward(net, [x_test]) sys.stderr.write("forward done\n") sys.stderr.write("lout: %r\n" % (net.lout, )) out = eddl.getOut(net) sys.stderr.write("getOut done\n") sys.stderr.write("out: %r\n" % (out, )) print("All done")
def test_py_loss(): shape = [8, 10] a = np.random.random(shape).astype(np.float32) b = np.random.random(shape).astype(np.float32) t, y = Tensor.fromarray(a), Tensor.fromarray(b) z = Tensor(shape) exp_z = Tensor(shape) py_mse_loss = MSELoss() mse_loss = eddl.getLoss("mse") mse_loss.delta(t, y, exp_z) py_mse_loss.delta(t, y, z) c = np.array(z, copy=False) exp_c = np.array(exp_z, copy=False) assert np.array_equal(c, exp_c) v = py_mse_loss.value(t, y) exp_v = mse_loss.value(t, y) assert v == pytest.approx(exp_v)
def preprocess_input_resnet34(input_, target_size): mean_vec = Tensor.fromarray( np.array([0.485, 0.456, 0.406], dtype=np.float32), input_.device) std_vec = Tensor.fromarray( np.array([0.229, 0.224, 0.225], dtype=np.float32), input_.device) if input_.ndim not in {3, 4}: raise RuntimeError("Input tensor must be 3D or 4D") if input_.ndim == 3: input_.unsqueeze_(0) # convert to 4D new_input = input_.scale(target_size) # (height, width) # Normalization [0..1] new_input.mult_(1 / 255.0) # Standardization: (X - mean) / std mean = Tensor.broadcast(mean_vec, new_input) std = Tensor.broadcast(std_vec, new_input) new_input.sub_(mean) new_input.div_(std) return new_input
def main(args): eddl.download_imdb_2000() epochs = 2 if args.small else 10 length = 250 embdim = 33 vocsize = 2000 in_ = eddl.Input([1]) # 1 word layer = in_ layer = eddl.RandomUniform(eddl.Embedding(layer, vocsize, 1, embdim), -0.05, 0.05) layer = eddl.GRU(layer, 37) layer = eddl.ReLu(eddl.Dense(layer, 256)) out = eddl.Sigmoid(eddl.Dense(layer, 1)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.adam(0.001), ["cross_entropy"], ["binary_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) x_train = Tensor.load("imdb_2000_trX.bin") y_train = Tensor.load("imdb_2000_trY.bin") x_test = Tensor.load("imdb_2000_tsX.bin") y_test = Tensor.load("imdb_2000_tsY.bin") # batch x timesteps x input_dim x_train.reshape_([x_train.shape[0], length, 1]) x_test.reshape_([x_test.shape[0], length, 1]) y_train.reshape_([y_train.shape[0], 1, 1]) y_test.reshape_([y_test.shape[0], 1, 1]) if args.small: x_train = x_train.select([":64", ":", ":"]) y_train = y_train.select([":64", ":", ":"]) x_test = x_test.select([":64", ":", ":"]) y_test = y_test.select([":64", ":", ":"]) for i in range(epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test]) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.ReLu(eddl.L2(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1L2(eddl.Dense(layer, 1024), 0.00001, 0.0001)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def read_slide(slide_fn, level=4): levels = ecvl.OpenSlideGetLevels(slide_fn) dims = [0, 0] + levels[level] img = ecvl.OpenSlideRead(slide_fn, level, dims) t = ecvl.ImageToTensor(img) t_np = t.getdata() s = t_np.shape t_np = t_np.transpose((1, 2, 0)).reshape( (s[1] * s[2], 3)) # Channel last and reshape t_eval = Tensor.fromarray(t_np) print(t_eval.getShape()) return t_eval, s
def fun(rows): assert (len(rows) == 1) item = rows[0] feat, lab = self._get_img(item) with self.lock: self.feats.append(feat) self.labels.append(lab) self.perm.append(idx) self.cow += 1 self.onair -= 1 if (self.cow == self.tot): # last patch # recover original order of images sh = [] for (i, x) in enumerate(self.perm): sh.append([x, i]) sh = np.array(sorted(sh))[:, 1] # reorder data and conclude feats = np.array(self.feats)[sh] labels = np.array(self.labels)[sh] self.bb = (Tensor(feats.transpose(0, 3, 1, 2)), Tensor(labels)) self.finished_event.set()
def read_input(filename, split_ratio=0.7): data = np.load(filename)['d'] # shuffle data sel_index = np.arange(data.shape[0]) np.random.shuffle(sel_index) shuffled_data = data[sel_index] shuffled_data = np.c_[shuffled_data, np.zeros(shuffled_data.shape[0])] # Add column for two class labels shuffled_data[:,4][shuffled_data[:,3] == 0] = 1. ## [1] -> [1 0], [0]-->[0 1] shuffled_data[:,[3,4]] = shuffled_data[:,[4,3]] ## Swap last two columns [1] --> [0 1], [0] --> [1 0] # Split train test n_train = int (shuffled_data.shape[0] * split_ratio ) train = shuffled_data[0:n_train] test = shuffled_data[n_train:] x_trn = train[:,:3] y_trn = train[:,3:] # Class balancing of validation set test_0 = test[test[:, 3] == 1] test_1 = test[test[:, 4] == 1] c0_size = test_0.shape[0] c1_size = test_1.shape[0] c_size = min(c0_size, c1_size) test_bal = np.concatenate((test_0[:c_size], test_1[:c_size]),axis=0) x_test = test_bal[:,:3] y_test = test_bal[:,3:] # Tensor creation x_train_t = Tensor.fromarray(x_trn.astype(np.float32)) y_train_t = Tensor.fromarray(y_trn.astype(np.int32)) x_test_t = Tensor.fromarray(x_test.astype(np.float32)) y_test_t = Tensor.fromarray(y_test.astype(np.int32)) return x_train_t, y_train_t, x_test_t, y_test_t