def main(args): eddl.download_mnist() in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 256), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 64), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 256), "relu") out = eddl.Dense(layer, 784) net = eddl.Model([in_], [out]) mse_loss = MSELoss() mse_metric = MSEMetric() net.build( eddl.sgd(0.001, 0.9), [mse_loss], [mse_metric], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") x_train.div_(255.0) eddl.fit(net, [x_train], [x_train], args.batch_size, args.epochs) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Reshape(layer, [1, 28, 28]) layer = eddl.RandomCropScale(layer, [0.9, 1.0]) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) layer = eddl.ReLu( eddl.GaussianNoise( eddl.BatchNormalization(eddl.Dense(layer, 1024), True), 0.3)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) # LR annealing if args.epochs < 4: return eddl.setlr(net, [0.005, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 2) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) eddl.setlr(net, [0.0001, 0.9]) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs // 4) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 256), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 64), "relu") layer = eddl.Activation(eddl.Dense(layer, 128), "relu") layer = eddl.Activation(eddl.Dense(layer, 256), "relu") out = eddl.Dense(layer, 784) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["mean_squared_error"], ["mean_squared_error"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") if args.small: x_train = x_train.select([":6000"]) x_train.div_(255.0) eddl.fit(net, [x_train], [x_train], args.batch_size, args.epochs) tout = eddl.predict(net, [x_train]) tout[0].info() print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomHorizontalFlip(layer) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1], "same", False))) layer = eddl.Pad(layer, [0, 1, 1, 0]) for i in range(3): layer = ResBlock(layer, 64, 0, i == 0) for i in range(4): layer = ResBlock(layer, 128, i == 0) for i in range(6): layer = ResBlock(layer, 256, i == 0) for i in range(3): layer = ResBlock(layer, 512, i == 0) layer = eddl.MaxPool(layer, [4, 4]) layer = eddl.Reshape(layer, [-1]) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: # this is slow, make it really small x_train = x_train.select([":500"]) y_train = y_train.select([":500"]) x_test = x_test.select([":100"]) y_test = y_test.select([":100"]) lr = 0.01 for j in range(3): lr /= 10.0 eddl.setlr(net, [lr, 0.9]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.ReLu(BG(eddl.Conv(layer, 64, [3, 3], [1, 1]))) layer = eddl.Pad(layer, [0, 1, 1, 0]) layer = ResBlock(layer, 64, 2, True) layer = ResBlock(layer, 64, 2, False) layer = ResBlock(layer, 128, 2, True) layer = ResBlock(layer, 128, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = ResBlock(layer, 256, 2, True) layer = ResBlock(layer, 256, 2, False) layer = eddl.Reshape(layer, [-1]) layer = eddl.ReLu(BG(eddl.Dense(layer, 512))) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf", "TB") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_cifar10() num_classes = 10 in_ = eddl.Input([3, 32, 32]) layer = in_ layer = eddl.RandomCropScale(layer, [0.8, 1.0]) layer = eddl.RandomFlip(layer, 1) layer = eddl.RandomCutout(layer, [0.1, 0.3], [0.1, 0.3]) layer = eddl.MaxPool(Block3_2(layer, 64)) layer = eddl.MaxPool(Block3_2(layer, 128)) layer = eddl.MaxPool(Block1(Block3_2(layer, 256), 256)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.MaxPool(Block1(Block3_2(layer, 512), 512)) layer = eddl.Reshape(layer, [-1]) layer = eddl.Activation(eddl.Dense(layer, 512), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.setlogfile(net, "vgg16") eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("cifar_trX.bin") y_train = Tensor.load("cifar_trY.bin") x_train.div_(255.0) x_test = Tensor.load("cifar_tsX.bin") y_test = Tensor.load("cifar_tsY.bin") x_test.div_(255.0) if args.small: x_train = x_train.select([":5000"]) y_train = y_train.select([":5000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) for i in range(args.epochs): eddl.fit(net, [x_train], [y_train], args.batch_size, 1) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") layer = eddl.Activation(eddl.Dense(layer, 1024), "relu") out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") # y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) num_samples = x_train.shape[0] num_batches = num_samples // args.batch_size test_samples = x_test.shape[0] test_batches = test_samples // args.batch_size eddl.set_mode(net, TRMODE) for i in range(args.epochs): for j in range(num_batches): print("Epoch %d/%d (batch %d/%d)" % (i + 1, args.epochs, j + 1, num_batches)) indices = np.random.randint(0, num_samples, args.batch_size) eddl.train_batch(net, [x_train], [y_train], indices) for j in range(test_batches): print("Epoch %d/%d (batch %d/%d)" % (i + 1, args.epochs, j + 1, test_batches)) indices = np.random.randint(0, num_samples, args.batch_size) eddl.eval_batch(net, [x_train], [y_train], indices) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) layer = eddl.BatchNormalization( eddl.Activation(eddl.L2(eddl.Dense(layer, 1024), 0.0001), "relu"), True ) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) acc = CategoricalAccuracy() net.build( eddl.sgd(0.01, 0.9), [eddl.getLoss("soft_cross_entropy")], [acc], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.ReLu(eddl.L2(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1(eddl.Dense(layer, 1024), 0.0001)) layer = eddl.ReLu(eddl.L1L2(eddl.Dense(layer, 1024), 0.00001, 0.0001)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.01, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem) ) eddl.summary(net) eddl.plot(net, "model.pdf") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) eddl.fit(net, [x_train], [y_train], args.batch_size, args.epochs) eddl.evaluate(net, [x_test], [y_test], bs=args.batch_size) print("All done")
def classificate(args): args = dotdict(args) ckpts_dir = opjoin(settings.TRAINING_DIR, 'ckpts') outputfile = None inference = None train = True if args.mode == 'training' else False batch_size = args.batch_size if args.mode == 'training' else args.test_batch_size weight_id = args.weight_id weight = dj_models.ModelWeights.objects.get(id=weight_id) if train: pretrained = None if weight.pretrained_on: pretrained = weight.pretrained_on.location else: inference_id = args.inference_id inference = dj_models.Inference.objects.get(id=inference_id) pretrained = weight.location save_stdout = sys.stdout size = [args.input_h, args.input_w] # Height, width try: model = bindings.models_binding[args.model_id] except KeyError: raise Exception( f'Model with id: {args.model_id} not found in bindings.py') try: dataset_path = str( dj_models.Dataset.objects.get(id=args.dataset_id).path) except KeyError: raise Exception( f'Dataset with id: {args.dataset_id} not found in bindings.py') dataset = bindings.dataset_binding.get(args.dataset_id) if dataset is None and not train: # Binding does not exist. it's a single image dataset # Use as dataset "stub" the dataset on which model has been trained dataset = bindings.dataset_binding.get(weight.dataset_id.id) elif dataset is None and train: raise Exception( f'Dataset with id: {args.dataset_id} not found in bindings.py') basic_augs = ecvl.SequentialAugmentationContainer( [ecvl.AugResizeDim(size)]) train_augs = basic_augs val_augs = basic_augs test_augs = basic_augs if args.train_augs: train_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ecvl.AugmentationFactory.create(args.train_augs) ]) if args.val_augs: val_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ecvl.AugmentationFactory.create(args.val_augs) ]) if args.test_augs: test_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ecvl.AugmentationFactory.create(args.test_augs) ]) logging.info('Reading dataset') print('Reading dataset', flush=True) dataset = dataset( dataset_path, batch_size, ecvl.DatasetAugmentations([train_augs, val_augs, test_augs])) d = dataset.d num_classes = dataset.num_classes in_ = eddl.Input([d.n_channels_, size[0], size[1]]) out = model(in_, num_classes) # out is already softmaxed in classific models net = eddl.Model([in_], [out]) if train: logfile = open(Path(weight.logfile), 'w') else: logfile = open(inference.logfile, 'w') outputfile = open(inference.outputfile, 'w') with redirect_stdout(logfile): # Save args to file print('args: ' + json.dumps(args, indent=2, sort_keys=True), flush=True) logging.info('args: ' + json.dumps(args, indent=2, sort_keys=True)) eddl.build( net, eddl.sgd(args.lr, 0.9), [bindings.losses_binding.get(args.loss)], [bindings.metrics_binding.get(args.metric)], eddl.CS_GPU([1], mem='low_mem') if args.gpu else eddl.CS_CPU()) eddl.summary(net) if pretrained and os.path.exists(pretrained): eddl.load(net, pretrained) logging.info('Weights loaded') # Create tensor for images and labels images = eddlT.create([batch_size, d.n_channels_, size[0], size[1]]) labels = eddlT.create([batch_size, num_classes]) logging.info(f'Starting {args.mode}') print(f'Starting {args.mode}', flush=True) if train: num_samples_train = len(d.GetSplit(ecvl.SplitType.training)) num_batches_train = num_samples_train // batch_size num_samples_val = len(d.GetSplit(ecvl.SplitType.validation)) num_batches_val = num_samples_val // batch_size indices = list(range(batch_size)) for e in range(args.epochs): eddl.reset_loss(net) d.SetSplit(ecvl.SplitType.training) s = d.GetSplit() random.shuffle(s) d.split_.training_ = s d.ResetCurrentBatch() # total_loss = 0. # total_metric = 0. for i in range(num_batches_train): d.LoadBatch(images, labels) images.div_(255.0) eddl.train_batch(net, [images], [labels], indices) total_loss = net.fiterr[0] total_metric = net.fiterr[1] print( f'Train Epoch: {e + 1}/{args.epochs} [{i + 1}/{num_batches_train}] {net.lout[0].name}' f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},' f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})', flush=True) logging.info( f'Train Epoch: {e + 1}/{args.epochs} [{i + 1}/{num_batches_train}] {net.lout[0].name}' f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},' f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})' ) eddl.save(net, opjoin(ckpts_dir, f'{weight_id}.bin')) logging.info('Weights saved') print('Weights saved', flush=True) if len(d.split_.validation_) > 0: logging.info(f'Validation {e}/{args.epochs}') print(f'Validation {e}/{args.epochs}', flush=True) d.SetSplit(ecvl.SplitType.validation) d.ResetCurrentBatch() for i in range(num_batches_val): d.LoadBatch(images, labels) images.div_(255.0) eddl.eval_batch(net, [images], [labels], indices) # eddl.evaluate(net, [images], [labels]) total_loss = net.fiterr[0] total_metric = net.fiterr[1] print( f'Val Epoch: {e + 1}/{args.epochs} [{i + 1}/{num_batches_val}] {net.lout[0].name}' f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},' f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})', flush=True) logging.info( f'Val Epoch: {e + 1}/{args.epochs} [{i + 1}/{num_batches_val}] {net.lout[0].name}' f'({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},' f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})' ) else: d.SetSplit(ecvl.SplitType.test) num_samples_test = len(d.GetSplit()) num_batches_test = num_samples_test // batch_size preds = np.empty((0, num_classes), np.float64) for b in range(num_batches_test): d.LoadBatch(images) images.div_(255.0) eddl.forward(net, [images]) print(f'Infer Batch {b + 1}/{num_batches_test}', flush=True) logging.info(f'Infer Batch {b + 1}/{num_batches_test}') # print( # f'Evaluation {b + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},' # f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})') # logging.info( # f'Evaluation {b + 1}/{num_batches} {net.lout[0].name}({net.losses[0].name}={total_loss / net.inferenced_samples:1.3f},' # f'{net.metrics[0].name}={total_metric / net.inferenced_samples:1.3f})') # Save network predictions for i in range(batch_size): pred = np.array(eddlT.select(eddl.getTensor(out), i), copy=False) # gt = np.argmax(np.array(labels)[indices]) # pred = np.append(pred, gt).reshape((1, num_classes + 1)) preds = np.append(preds, pred, axis=0) pred_name = d.samples_[d.GetSplit()[b * batch_size + i]].location_ # print(f'{pred_name};{pred}') outputfile.write(f'{pred_name};{pred.tolist()}\n') outputfile.close() print('<done>') logfile.close() del net del out del in_ return
def main(args): num_classes = 8 size = [224, 224] # size of images in_ = eddl.Input([3, size[0], size[1]]) out = VGG16(in_, num_classes) net = eddl.Model([in_], [out]) eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU()) eddl.summary(net) eddl.setlogfile(net, "skin_lesion_classification") training_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ecvl.AugMirror(.5), ecvl.AugFlip(.5), ecvl.AugRotate([-180, 180]), ecvl.AugAdditivePoissonNoise([0, 10]), ecvl.AugGammaContrast([0.5, 1.5]), ecvl.AugGaussianBlur([0, 0.8]), ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5) ]) validation_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ]) dataset_augs = ecvl.DatasetAugmentations( [training_augs, validation_augs, None]) print("Reading dataset") d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs) x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]]) y = Tensor([args.batch_size, len(d.classes_)]) num_samples_train = len(d.GetSplit()) num_batches_train = num_samples_train // args.batch_size d.SetSplit(ecvl.SplitType.validation) num_samples_val = len(d.GetSplit()) num_batches_val = num_samples_val // args.batch_size indices = list(range(args.batch_size)) metric = eddl.getMetric("categorical_accuracy") print("Starting training") for e in range(args.epochs): print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs), flush=True) if args.out_dir: current_path = os.path.join(args.out_dir, "Epoch_%d" % e) for c in d.classes_: c_dir = os.path.join(current_path, c) os.makedirs(c_dir, exist_ok=True) d.SetSplit(ecvl.SplitType.training) eddl.reset_loss(net) total_metric = [] s = d.GetSplit() random.shuffle(s) d.split_.training_ = s d.ResetAllBatches() for b in range(num_batches_train): print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format( e + 1, args.epochs, b + 1, num_batches_train), end="", flush=True) d.LoadBatch(x, y) x.div_(255.0) tx, ty = [x], [y] eddl.train_batch(net, tx, ty, indices) eddl.print_loss(net, b) print() print("Saving weights") eddl.save(net, "isic_classification_checkpoint_epoch_%s.bin" % e, "bin") print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True) d.SetSplit(ecvl.SplitType.validation) for b in range(num_batches_val): n = 0 print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format( e + 1, args.epochs, b + 1, num_batches_val), end="", flush=True) d.LoadBatch(x, y) x.div_(255.0) eddl.forward(net, [x]) output = eddl.getOutput(out) sum_ = 0.0 for k in range(args.batch_size): result = output.select([str(k)]) target = y.select([str(k)]) ca = metric.value(target, result) total_metric.append(ca) sum_ += ca if args.out_dir: result_a = np.array(result, copy=False) target_a = np.array(target, copy=False) classe = np.argmax(result_a).item() gt_class = np.argmax(target_a).item() single_image = x.select([str(k)]) img_t = ecvl.TensorToView(single_image) img_t.colortype_ = ecvl.ColorType.BGR single_image.mult_(255.) filename = d.samples_[d.GetSplit()[n]].location_[0] head, tail = os.path.splitext(os.path.basename(filename)) bname = "%s_gt_class_%s.png" % (head, gt_class) cur_path = os.path.join(current_path, d.classes_[classe], bname) ecvl.ImWrite(cur_path, img_t) n += 1 print("categorical_accuracy:", sum_ / args.batch_size) total_avg = sum(total_metric) / len(total_metric) print("Total categorical accuracy:", total_avg)
def main(args): num_classes = 10 size = [28, 28] # size of images ctype = ecvl.ColorType.GRAY in_ = eddl.Input([1, size[0], size[1]]) out = LeNet(in_, num_classes) net = eddl.Model([in_], [out]) eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU()) eddl.summary(net) eddl.setlogfile(net, "mnist") training_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugRotate([-5, 5]), ecvl.AugAdditivePoissonNoise([0, 10]), ecvl.AugGaussianBlur([0, 0.8]), ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0), ]) dataset_augs = ecvl.DatasetAugmentations([training_augs, None, None]) print("Reading dataset") d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs, ctype) x_train = Tensor([args.batch_size, d.n_channels_, size[0], size[1]]) y_train = Tensor([args.batch_size, len(d.classes_)]) num_samples = len(d.GetSplit()) num_batches = num_samples // args.batch_size indices = list(range(args.batch_size)) print("Training") for i in range(args.epochs): eddl.reset_loss(net) s = d.GetSplit() random.shuffle(s) d.split_.training_ = s d.ResetCurrentBatch() for j in range(num_batches): print("Epoch %d/%d (batch %d/%d) - " % (i + 1, args.epochs, j + 1, num_batches), end="", flush=True) d.LoadBatch(x_train, y_train) x_train.div_(255.0) tx, ty = [x_train], [y_train] eddl.train_batch(net, tx, ty, indices) eddl.print_loss(net, j) print() print("Saving weights") eddl.save(net, "mnist_checkpoint.bin", "bin") print("Evaluation") d.SetSplit(ecvl.SplitType.test) num_samples = len(d.GetSplit()) num_batches = num_samples // args.batch_size for i in range(num_batches): print("batch %d / %d - " % (i, num_batches), end="", flush=True) d.LoadBatch(x_train, y_train) x_train.div_(255.0) eddl.evaluate(net, [x_train], [y_train])
def main(args): eddl.download_mnist() num_classes = 10 in_ = eddl.Input([784]) layer = in_ layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) layer = eddl.ReLu(eddl.Dense(layer, 1024)) out = eddl.Softmax(eddl.Dense(layer, num_classes)) net = eddl.Model([in_], [out]) eddl.build( net, eddl.sgd(0.001, 0.9), ["softmax_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)) eddl.summary(net) eddl.plot(net, "model.pdf") eddl.setlogfile(net, "mnist") x_train = Tensor.load("mnist_trX.bin") y_train = Tensor.load("mnist_trY.bin") x_test = Tensor.load("mnist_tsX.bin") y_test = Tensor.load("mnist_tsY.bin") if args.small: x_train = x_train.select([":6000"]) y_train = y_train.select([":6000"]) x_test = x_test.select([":1000"]) y_test = y_test.select([":1000"]) x_train.div_(255.0) x_test.div_(255.0) s = x_train.shape num_batches = s[0] // args.batch_size for i in range(args.epochs): eddl.reset_loss(net) print("Epoch %d/%d (%d batches)" % (i + 1, args.epochs, num_batches)) for j in range(num_batches): indices = np.random.randint(0, s[0], args.batch_size) eddl.train_batch(net, [x_train], [y_train], indices) losses1 = eddl.get_losses(net) metrics1 = eddl.get_metrics(net) for l, m in zip(losses1, metrics1): print("Loss: %.6f\tMetric: %.6f" % (l, m)) s = x_test.shape num_batches = s[0] // args.batch_size for j in range(num_batches): indices = np.arange(j * args.batch_size, j * args.batch_size + args.batch_size) eddl.eval_batch(net, [x_test], [y_test], indices) losses2 = eddl.get_losses(net) metrics2 = eddl.get_metrics(net) for l, m in zip(losses2, metrics2): print("Loss: %.6f\tMetric: %.6f" % (l, m)) last_batch_size = s[0] % args.batch_size if last_batch_size: indices = np.arange(j * args.batch_size, j * args.batch_size + args.batch_size) eddl.eval_batch(net, [x_test], [y_test], indices) losses3 = eddl.get_losses(net) metrics3 = eddl.get_metrics(net) for l, m in zip(losses3, metrics3): print("Loss: %.6f\tMetric: %.6f" % (l, m)) print("All done")
def main(args): num_classes = 8 size = [224, 224] # size of images in_ = eddl.Input([3, size[0], size[1]]) out = VGG16(in_, num_classes) net = eddl.Model([in_], [out]) eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"], ["categorical_accuracy"], eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU()) eddl.summary(net) eddl.setlogfile(net, "skin_lesion_classification_inference") training_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ]) test_augs = ecvl.SequentialAugmentationContainer([ ecvl.AugResizeDim(size), ]) dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs]) print("Reading dataset") d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs) if args.out_dir: for c in d.classes_: os.makedirs(os.path.join(args.out_dir, c), exist_ok=True) x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]]) y = Tensor([args.batch_size, len(d.classes_)]) d.SetSplit(ecvl.SplitType.test) num_samples = len(d.GetSplit()) num_batches = num_samples // args.batch_size metric = eddl.getMetric("categorical_accuracy") total_metric = [] if not os.path.exists(args.ckpts): raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts)) eddl.load(net, args.ckpts, "bin") print("Testing") for b in range(num_batches): n = 0 print("Batch {:d}/{:d}".format(b + 1, num_batches)) d.LoadBatch(x, y) x.div_(255.0) eddl.forward(net, [x]) output = eddl.getOutput(out) sum_ = 0.0 for j in range(args.batch_size): result = output.select([str(j)]) target = y.select([str(j)]) ca = metric.value(target, result) total_metric.append(ca) sum_ += ca if args.out_dir: result_a = np.array(result, copy=False) target_a = np.array(target, copy=False) classe = np.argmax(result_a).item() gt_class = np.argmax(target_a).item() single_image = x.select([str(j)]) img_t = ecvl.TensorToView(single_image) img_t.colortype_ = ecvl.ColorType.BGR single_image.mult_(255.) filename = d.samples_[d.GetSplit()[n]].location_[0] head, tail = os.path.splitext(os.path.basename(filename)) bname = "%s_gt_class_%s.png" % (head, gt_class) cur_path = os.path.join(args.out_dir, d.classes_[classe], bname) ecvl.ImWrite(cur_path, img_t) n += 1 print("categorical_accuracy:", sum_ / args.batch_size) total_avg = sum(total_metric) / len(total_metric) print("Total categorical accuracy:", total_avg)