def train(cl, dataset): state = dict( (key, val) for key, val in cl.model.state_dict().items() if key.endswith(".weight")) test_history, train_history = cl.training( manager=manager, nb_epochs=5, checkpointdir=None, fold_index=0, with_validation=False) train_state = dict( (key, val) for key, val in cl.model.state_dict().items() if key.endswith(".weight")) for key, val in state.items(): if not np.allclose(val, train_state[key]): print("--", key) idx = 0 y_pred_prob, X, y_true, loss, values = cl.testing( manager=manager, with_logit=True, predict=False) y_pred = np.argmax(y_pred_prob, axis=1) print(" ** true label : ", y_true[idx]) print(" ** predicted label : ", y_pred[idx]) titles = ["{0}-{1}".format(data.labels[it1], data.labels[it2]) for it1, it2 in zip(y_pred, y_true)] plot_data(X, labels=titles, nb_samples=5) plot_history(train_history)
metadata_path=data.metadata_path, number_of_folds=10, batch_size=1000, stratify_label="label", test_size=0.1) ############################################################################# # Displaying some images of the test dataset. from pynet.plotting import plot_data dataset = manager["test"] sample = dataset.inputs.reshape(-1, data.height, data.width) sample = np.expand_dims(sample, axis=1) plot_data(sample, nb_samples=5) ############################################################################# # Simple neural network # --------------------- # # The simplest way to create, train and test a network is to use Sequential # container. # With a sequential container, you can quickly design a linear stack of layers # and so, many kinds of models (LSTM, CNN, ...). # Here we create a simple Multilayer Perceptron (MLP) for multi-class softmax # classification. import collections import torch
merge_mode="concat", batchnorm=True) plot_net_rescue(model, shape=(1, 1, 64, 64, 64), outfileroot=None) ############################################################################ # Inspect a network # ----------------- # # The module propose utilities to inspect easyly some layers of the network. from pynet.utils import test_model from pprint import pprint import numpy as np from pynet.utils import get_named_layers from pynet.utils import layer_at from pynet.plotting import plot_data out = test_model(model, shape=(1, 1, 64, 64, 64)) layers = get_named_layers(model) pprint(layers) hook_x, weight = layer_at(model=model, layer_name="down.1.doubleconv.conv1-8.16", x=torch.FloatTensor( np.random.random((1, 1, 64, 64, 64)))) print(hook_x.shape) print(weight.shape) plot_data(hook_x[:, :1]) # import matplotlib.pyplot as plt # plt.show()
Load some images and apply the ImageNet transformation. You may need to change the 'datasetdir' parameter. """ from pynet.datasets import DataManager, fetch_gradcam from pynet.plotting import plot_data data = fetch_gradcam(datasetdir="/tmp/gradcam") manager = DataManager(input_path=data.input_path, metadata_path=data.metadata_path, number_of_folds=2, batch_size=5, test_size=1) dataset = manager["test"] print(dataset.inputs.shape) plot_data(dataset.inputs, nb_samples=5, random=False, rgb=True) ############################################################################# # Explore different architectures # ------------------------------- # # Let's automate this procedure for different networks. # We need to reload the data for the inception network. # You may need to change the 'datasetdir' parameter. import os from pynet.models.cam import get_cam_network from pynet.cam import GradCam import matplotlib.pyplot as plt data = fetch_gradcam(datasetdir="/tmp/gradcam")
dataset = split_dataset( path=dataset_desc, dataloader=LoadDataset, batch_size=1, inputs=["t1"], outputs=["mask"], label="label", number_of_folds=1, transforms=[ZeroPadding(shape=(256, 256, 256)), Downsample(scale=2)], verbose=0) ############################################################################# # We have now a test, and multiple folds with train-validation datasets that # can be used to train our network using cross-validation: from pprint import pprint import numpy as np from pynet.plotting import plot_data pprint(dataset) for batch_data in dataset["test"]: print("Inputs: ", batch_data["inputs"].shape) print("Outputs: ", batch_data["outputs"].shape) print("Labels: ", batch_data["labels"].shape) print(dataset["test"].dataset.iloc[0].values) plot_data(batch_data["inputs"][0, 0].numpy(), extradata=[np.sum(batch_data["outputs"][0].numpy(), axis=0)]) break
metadata_path=data.metadata_path, output_path=data.output_path, projection_labels=None, number_of_folds=10, batch_size=1, stratify_label="grade", #input_transforms=[ # RandomFlipDimensions(ndims=3, proba=0.5, with_channels=True), # Offset(nb_channels=4, factor=0.1)], sampler="random", add_input=True, test_size=0.1, pin_memory=True) dataset = manager["test"][:1] print(dataset.inputs.shape, dataset.outputs.shape) plot_data(dataset.inputs, channel=1, nb_samples=5) plot_data(dataset.outputs, channel=1, nb_samples=5) ############################################################################# # Training # -------- # # From the available models load the 3D NvNet, and start the training. import os from torch.optim import lr_scheduler from pynet.losses import NvNetCombinedLoss from pynet.interfaces import NvNetSegmenter from pynet.plotting import plot_history from pynet.history import History
setup_logging(level="info") data = fetch_echocardiography(datasetdir="/tmp/echocardiography") manager = DataManager(input_path=data.input_path, metadata_path=data.metadata_path, output_path=data.output_path, number_of_folds=2, stratify_label="label", sampler="random", batch_size=10, test_size=0.1, sample_size=0.2) dataset = manager["test"] print(dataset.inputs.shape, dataset.outputs.shape) data = np.concatenate((dataset.inputs, dataset.outputs), axis=1) plot_data(data, nb_samples=5) ############################################################################# # Optimisation # ------------ # # From the available models load the UNet, and start the training. # You may need to change the 'outdir' parameter. import torch import torch.nn as nn from pynet import NetParameters from pynet.interfaces import DeepLabNetSegmenter, PSPNetSegmenter from pynet.plotting import plot_history from pynet.history import History
metadata_path=data.metadata_path, number_of_folds=10, batch_size=50, stratify_label="category", test_size=0.1) ############################################################################# # We have now a test, and multiple folds with train-validation datasets that # can be used to train our network using cross-validation. import numpy as np from pynet.plotting import plot_data print("Nb folds: ", manager.number_of_folds) dataloader = manager.get_dataloader(train=True, validation=False, test=False, fold_index=0) print(dataloader) for trainloader in dataloader.train: print("Inputs: ", trainloader.inputs.shape) print("Outputs: ", trainloader.outputs) print("Labels: ", trainloader.labels.shape) plot_data(trainloader.inputs, nb_samples=5) break import os if "CI_MODE" not in os.environ: import matplotlib.pyplot as plt plt.show()
batch_size=1000, stratify_label="label", test_size=0.1, sample_size=(0.1 if "CI_MODE" not in os.environ else 0.1), input_transforms=[prepare]) ############################################################################# # Displaying some images of the test dataset. from pynet.plotting import plot_data import numpy as np dataset = manager["test"] sample = dataset.inputs.reshape(-1, data.height, data.width) sample = np.expand_dims(sample, axis=1) plot_data(sample, nb_samples=5) ############################################################################# # Load the model # -------------- # # Load the model and fix all weights. # Change the last linear layer. import pynet.interfaces as interfaces from pynet import NetParameters from pynet.utils import get_named_layers, freeze_layers, reset_weights import torch.nn as nn net_params = NetParameters(
# You may need to change the 'datasetdir' parameter. import numpy as np from pynet.datasets import DataManager, fetch_echocardiography from pynet.plotting import plot_data data = fetch_echocardiography(datasetdir="/tmp/echocardiography") manager = DataManager(input_path=data.input_path, metadata_path=data.metadata_path, output_path=data.output_path, number_of_folds=10, batch_size=10, test_size=0.1) dataset = manager["test"] data = np.concatenate((dataset.inputs, dataset.outputs), axis=1) plot_data(data, nb_samples=5) ############################################################################# # Optimisation # ------------ # # From the available models load the UNet, and start the training. # You may need to change the 'outdir' parameter. import os import torch import torch.nn as nn from pynet.encoder import UNetEncoder from pynet.plotting import plot_history from pynet.history import History
test_history, train_history, valid_history = training(net=net, dataset=dataset, optimizer=optim.Adam( net.parameters(), lr=0.01), criterion=my_loss, nb_epochs=3, metrics={"mse": my_loss}, use_cuda=False, outdir="/tmp/pynet", verbose=1) ############################################################################# # You can reload the optimization history at any time and any step from pprint import pprint from pynet.history import History valid_history = History.load("/tmp/pynet/history/valid_1_epoch_3.pkl") pprint(valid_history.history) pprint(valid_history["loss"]) ############################################################################# # You can finally display the optimization cost from pynet.plotting import plot_data x, y = valid_history["loss"] plot_data(y)
depth=3, start_filts=8, up_mode="upsample", merge_mode="concat", batchnorm=True) plot_net(model, shape=(1, 1, 64, 64, 64), static=True, outfileroot=None) ############################################################################ # Inspect a network # ----------------- # # Inspect some layers of the UNet: from pynet.utils import test_model from pprint import pprint import numpy as np from pynet.utils import get_named_layers from pynet.utils import layer_at from pynet.plotting import plot_data out = test_model(model, shape=(1, 1, 64, 64, 64)) layers = get_named_layers(model) pprint(layers) hook_x, weight = layer_at(model=model, layer_name="down.1.doubleconv.conv1-8.16", x=torch.FloatTensor( np.random.random((1, 1, 64, 64, 64)))) print(hook_x.shape) print(weight.shape) plot_data(hook_x[0, 0], extradata=[im for im in hook_x[0, 1:]])