class TestCore(unittest.TestCase): """ Test the core of pynet. """ def setUp(self): """ Setup test. """ data = fetch_cifar(datasetdir="/tmp/cifar") self.manager = DataManager(input_path=data.input_path, labels=["label"], metadata_path=data.metadata_path, number_of_folds=10, batch_size=10, stratify_label="category", test_size=0.1, sample_size=0.01) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(func.relu(self.conv1(x))) x = self.pool(func.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = func.relu(self.fc1(x)) x = func.relu(self.fc2(x)) x = self.fc3(x) return x self.cl = DeepLearningInterface(model=Net(), optimizer_name="SGD", momentum=0.9, learning_rate=0.001, loss_name="CrossEntropyLoss", metrics=["accuracy"]) def tearDown(self): """ Run after each test. """ pass def test_core(self): """ Test the core. """ test_history, train_history = self.cl.training( manager=self.manager, nb_epochs=3, checkpointdir="/tmp/pynet", fold_index=0, with_validation=True)
def breast_cancer(x_train, y_train, x_val, y_val, params): print("Iteration parameters: ", params) def weights_init_uniform_rule(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: n = m.in_features y = 1.0 / np.sqrt(n) m.weight.data.uniform_(-y, y) m.bias.data.fill_(0) manager = DataManager.from_numpy(train_inputs=x_train, train_labels=y_train, batch_size=params["batch_size"], validation_inputs=x_val, validation_labels=y_val) net = BreastCancerNet(n_feature=x_train.shape[1], first_neuron=params["first_neuron"], second_neuron=params["second_neuron"], dropout=params["dropout"]) net.apply(weights_init_uniform_rule) net.init_history() model = DeepLearningInterface(model=net, optimizer_name=params["optimizer_name"], learning_rate=params["learning_rate"], loss_name=params["loss_name"], metrics=["accuracy"]) model.add_observer("after_epoch", update_talos_history) model.training(manager=manager, nb_epochs=params["epochs"], checkpointdir=None, fold_index=0, with_validation=True) return net, net.parameters()
def setUp(self): """ Setup test. """ data = fetch_cifar(datasetdir="/tmp/cifar") self.manager = DataManager(input_path=data.input_path, labels=["label"], metadata_path=data.metadata_path, number_of_folds=10, batch_size=10, stratify_label="category", test_size=0.1, sample_size=0.01) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(func.relu(self.conv1(x))) x = self.pool(func.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = func.relu(self.fc1(x)) x = func.relu(self.fc2(x)) x = self.fc3(x) return x self.cl = DeepLearningInterface(model=Net(), optimizer_name="SGD", momentum=0.9, learning_rate=0.001, loss_name="CrossEntropyLoss", metrics=["accuracy"])
# display result img = ndimage.zoom(img, 5, order=0) img = np.expand_dims(img, axis=0) img = np.expand_dims(img, axis=0) img = (img / img.max()) * 255 board.viewer.images( img, opts={ "title": "sampling", "caption": "sampling"}, win="sampling") model = VAE(input_dim=(28 * 28), hidden_dim=128, latent_dim=20) interface = DeepLearningInterface( model=model, optimizer_name="Adam", learning_rate=0.001, loss=DecodeLoss()) interface.board = Board( port=8097, host="http://localhost", env="vae", display_pred=True, prepare_pred=prepare_pred) interface.add_observer("after_epoch", update_board) interface.add_observer("after_epoch", sampling) test_history, train_history = interface.training( manager=manager, nb_epochs=10, checkpointdir=None, fold_index=0, with_validation=True)
return l2_regularization def linear1_l1_activity_regularizer(signal): lambda1 = 0.01 layer1_out = model = signal.layer_outputs["layer1"] l1_regularization = lambda1 * torch.norm(layer1_out, 1) return l1_regularization nb_snps = X_train.shape[1] model = TwoLayersMLP(nb_snps, nb_neurons=[64, 32], nb_classes=1) print(model) cl = DeepLearningInterface(optimizer_name="SGD", learning_rate=5e-4, loss_name="MSELoss", metrics=["pearson_correlation"], model=model) cl.add_observer("regularizer", linear1_l2_kernel_regularizer) cl.add_observer("regularizer", linear1_l1_activity_regularizer) test_history, train_history = cl.training( manager=manager, nb_epochs=(100 if "CI_MODE" not in os.environ else 10), checkpointdir="/tmp/genomic_pred", fold_index=0, with_validation=True) y_hat, X, y_true, loss, values = cl.testing(manager=manager, with_logit=False, predict=False) print(y_hat.shape, y_true.shape) print(y_hat)
def forward(self, x): x = self.layers(x) return x model = OneLayerMLP(image_size, nb_neurons, 9) print(model) ############################################################################# # Then we configure the parameters of the training step and train the model. from pynet.interfaces import DeepLearningInterface cl = DeepLearningInterface(optimizer_name="Adam", learning_rate=1e-4, loss_name="NLLLoss", metrics=["accuracy"], model=model) test_history, train_history = cl.training(manager=manager, nb_epochs=10, checkpointdir="/tmp/orientation", fold_index=0, with_validation=True) ############################################################################# # We focus now on test predictions. import numpy as np from pprint import pprint from sklearn.metrics import classification_report from pynet.plotting import plot_data
labels = metrics.columns sns.heatmap(spearmanr(metrics)[0], annot=True, cmap=plt.get_cmap("Blues"), xticklabels=labels, yticklabels=labels, ax=ax) model = DenseFeedForwardNet(nb_features) print(model) extra_metric = METRICS["binary_accuracy"] extra_metric.thr = 0.4 cl = DeepLearningInterface(optimizer_name="Adam", learning_rate=1e-4, weight_decay=1.1e-4, metrics=["binary_accuracy", "sk_roc_auc"], loss=my_loss, model=model) cl.board = Board(port=8097, host="http://localhost", env="main") cl.add_observer("after_epoch", update_board) outdir = "/tmp/impac" if not os.path.isdir(outdir): os.mkdir(outdir) scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=cl.optimizer, mode="min", factor=0.1, patience=5, verbose=True, eps=1e-8) test_history, train_history = cl.training(manager=manager, nb_epochs=200,
test_history, train_history = model.training( manager=manager, nb_epochs=N_EPOCHS, checkpointdir=None, fold_index=0, scheduler=None, with_validation=False) # Test model manager = DataManager.from_numpy( test_inputs=data, test_labels=labels, batch_size=BATCH_SIZE) test_model = DeepLearningInterface( model=model.model.network, optimizer_name="SGD", learning_rate=0.01, momentum=0.9, weight_decay=10**-5, loss_name="CrossEntropyLoss") y_pred, X, y_true, loss, values = test_model.testing( manager=manager, with_logit=True, # logit_function="sigmoid", predict=False) print(y_pred.shape, X.shape, y_true.shape) # Inspect results result = pd.DataFrame.from_dict(collections.OrderedDict([ ("pred", (np.argmax(y_pred, axis=1)).astype(int)), ("truth", y_true.squeeze()),
x = func.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() ############################################################################# # Now start the optimisation. import torch from pynet.interfaces import DeepLearningInterface cl = DeepLearningInterface(model=net, optimizer_name="SGD", momentum=0.9, learning_rate=0.001, loss_name="CrossEntropyLoss", metrics=["accuracy"]) if "CI_MODE" not in os.environ: from pynet.plotting import Board def update_board(signal): """ Callback to update visdom board visualizer. Parameters ---------- signal: SignalObject an object with the trained model 'object', the emitted signal 'signal', the epoch number 'epoch' and the fold index 'fold'. """ net = signal.object.model
board.viewer.images( images, opts={ "title": "sampling", "caption": "sampling"}, win="sampling") latent_dim = 20 experts = [ VAE(input_dim=(28 * 28), hidden_dim=128, latent_dim=latent_dim) for idx in range(10)] model = Manager(input_dim=(28 * 28), hidden_dim=128, experts=experts, latent_dim=latent_dim) interface = DeepLearningInterface( model=model, optimizer_name="Adam", learning_rate=0.001, loss=ManagerLoss(balancing_weight=0.1), use_cuda=True) interface.board = Board( port=8097, host="http://localhost", env="vae") interface.add_observer("after_epoch", update_board) interface.add_observer("after_epoch", sampling) test_history, train_history = interface.training( manager=manager, nb_epochs=100, checkpointdir=None, fold_index=0, with_validation=False)