def setUp(self):
        self.temp_dir_obj = TemporaryDirectory()
        self.test_checkpoints_path = self.temp_dir_obj.name
        self.ckpt_1_path = os.path.join(self.test_checkpoints_path,
                                        "checkpoint_epoch_1.ckpt")
        self.ckpt_last_path = os.path.join(self.test_checkpoints_path,
                                           "checkpoint.ckpt")

        expt = Experiment(
            self.test_checkpoints_path,
            nn.Linear(1, 1),
            optimizer='sgd',
            loss_function='mse',
            monitor_metric="loss",
            monitor_mode="min",
        )
        train_generator = SomeDataGeneratorWithLen(2, 32, 0)
        expt.train(train_generator, epochs=1)

        self.test_experiment = Experiment(
            self.test_checkpoints_path,
            nn.Linear(1, 1),
            optimizer='sgd',
            loss_function='mse',
            monitor_metric="loss",
            monitor_mode="min",
        )
    def test_train_no_log(self):
        test_experiment = Experiment(
            self.test_checkpoints_path,
            nn.Linear(1, 1),
            optimizer='sgd',
            loss_function='mse',
            monitor_metric="loss",
            monitor_mode="min",
            logging=False,
        )
        train_generator = SomeDataGeneratorWithLen(32, 10, 0)
        valid_generator = SomeDataGeneratorWithLen(32, 10, 0)
        logs = test_experiment.train(train_generator,
                                     valid_generator,
                                     epochs=ExperimentTest.NUM_EPOCHS)

        self.assertFalse(os.path.isdir(self.test_checkpoints_path))
        self.assertFalse(os.path.isfile(self.ckpt_1_path))
        self.assertFalse(os.path.isfile(self.ckpt_last_path))
        self.assertFalse(os.path.isfile(self.optim_ckpt_path))
        self.assertFalse(os.path.isfile(self.tsv_log_path))
        self.assertFalse(os.path.isfile(self.epoch_file_path))
        self.assertFalse(os.path.isfile(self.tsv_test_log_path))
        self.assertFalse(os.path.isfile(self.time_metric_plot_png_file_path))
        self.assertFalse(os.path.isfile(self.time_metric_plot_pdf_file_path))
        self.assertFalse(os.path.isfile(self.loss_metric_plot_png_file_path))
        self.assertFalse(os.path.isfile(self.loss_metric_plot_pdf_file_path))

        self.assertEqual(len(logs), ExperimentTest.NUM_EPOCHS)
        for i, log in enumerate(logs, 1):
            self.assertIn('epoch', log)
            self.assertEqual(log['epoch'], i)
            self.assertIn('loss', log)
            self.assertIn('val_loss', log)
            self.assertIn('time', log)
    def setUp(self):
        self.temp_dir_obj = TemporaryDirectory()
        self.test_checkpoints_path = os.path.join(self.temp_dir_obj.name,
                                                  'expt')

        self.test_experiment = Experiment(
            self.test_checkpoints_path,
            nn.Linear(1, 1),
            optimizer='sgd',
            loss_function='mse',
            monitor_metric="loss",
            monitor_mode="min",
        )
        self.ckpt_1_path = os.path.join(self.test_checkpoints_path,
                                        "checkpoint_epoch_1.ckpt")
        self.ckpt_last_path = os.path.join(self.test_checkpoints_path,
                                           "checkpoint.ckpt")
        self.optim_ckpt_path = os.path.join(self.test_checkpoints_path,
                                            "checkpoint.optim")
        self.tsv_log_path = os.path.join(self.test_checkpoints_path, "log.tsv")
        self.tsv_test_log_path = os.path.join(self.test_checkpoints_path,
                                              "test_log.tsv")
        self.epoch_file_path = os.path.join(self.test_checkpoints_path,
                                            "last.epoch")
        self.time_metric_plot_png_file_path = os.path.join(
            self.test_checkpoints_path, "plots", 'time.png')
        self.time_metric_plot_pdf_file_path = os.path.join(
            self.test_checkpoints_path, "plots", 'time.pdf')
        self.loss_metric_plot_png_file_path = os.path.join(
            self.test_checkpoints_path, "plots", 'loss.png')
        self.loss_metric_plot_pdf_file_path = os.path.join(
            self.test_checkpoints_path, "plots", 'loss.pdf')
    def setUpTwoExperiment(self, a_params=None, b_params=None):
        self.test_checkpoints_path_b = os.path.join(self.temp_dir_obj.name,
                                                    'expt_b')
        self.test_checkpoints_path_a = os.path.join(self.temp_dir_obj.name,
                                                    'expt_a')

        if a_params is None:
            a_params = {}

        self.test_experiment_a = Experiment(self.test_checkpoints_path_a,
                                            nn.Linear(1, 1), **a_params)

        if b_params is None:
            b_params = {}

        self.test_experiment_b = Experiment(self.test_checkpoints_path_b,
                                            nn.Linear(1, 1), **b_params)
valid_y = np.random.randint(num_classes,
                            size=num_valid_samples).astype('int64')

num_test_samples = 200
test_x = np.random.randn(num_test_samples, num_features).astype('float32')
test_y = np.random.randint(num_classes, size=num_test_samples).astype('int64')

cuda_device = 0
device = torch.device("cuda:%d" %
                      cuda_device if torch.cuda.is_available() else "cpu")

# Define the network
network = nn.Sequential(nn.Linear(num_features, hidden_state_size), nn.ReLU(),
                        nn.Linear(hidden_state_size, num_classes))

# We need to use dataloaders (i.e. an iterable of batches) with Experiment
train_loader = DataLoader(TensorDataset(train_x, train_y), batch_size=32)
valid_loader = DataLoader(TensorDataset(valid_x, valid_y), batch_size=32)
test_loader = DataLoader(TensorDataset(test_x, test_y), batch_size=32)

# Everything is saved in ./expt/my_classification_network
expt = Experiment('./expt/my_classification_network',
                  network,
                  device=device,
                  optimizer='sgd',
                  task='classif')

expt.train(train_loader, valid_loader, epochs=5)

expt.test(test_loader)
Example #6
0
import torch
import torch.nn as nn
from torch.utils.data import random_split
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from poutyne import Experiment

# Instanciate the MNIST dataset
train_valid_dataset = MNIST('./datasets', train=True, download=True, transform=ToTensor())
test_dataset = MNIST('./datasets', train=False, download=True, transform=ToTensor())
train_dataset, valid_dataset = random_split(train_valid_dataset, [50_000, 10_000],
                                            generator=torch.Generator().manual_seed(42))

# Select CUDA device if available
cuda_device = 0
device = torch.device('cuda:%d' % cuda_device if torch.cuda.is_available() else 'cpu')

# Define the network
network = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 100), nn.ReLU(), nn.Linear(100, 10))
epochs = 5

# Define the Experiment and train
experiment = Experiment(
    './simple_model',  # Where to log
    network,
    optimizer='sgd',
    loss_function='cross_entropy',
    device=device
)
experiment.train_dataset(train_dataset, valid_dataset, epochs=epochs)
cuda_device = 0
device = torch.device("cuda:%d" %
                      cuda_device if torch.cuda.is_available() else "cpu")

# Define the network
network = nn.Sequential(
    nn.Linear(num_features, hidden_state_size),
    nn.ReLU(),
    nn.Linear(hidden_state_size, 1),
)

# We need to use dataloaders (i.e. an iterable of batches) with Experiment
train_loader = DataLoader(TensorDataset(train_x, train_y), batch_size=32)
valid_loader = DataLoader(TensorDataset(valid_x, valid_y), batch_size=32)
test_loader = DataLoader(TensorDataset(test_x, test_y), batch_size=32)

# Everything is saved in ./saves/my_regression_network
expt = Experiment(
    './saves/my_regression_network',
    network,
    device=device,
    optimizer='sgd',
    task='regression',
    batch_metrics=['l1'],
    epoch_metrics=[SKLearnMetrics(r2_score)],
)

expt.train(train_loader, valid_loader, epochs=5)

expt.test(test_loader)
Example #8
0
from poutyne import Experiment

# Instead of `task`, you can provide your own loss function and metrics.
expt = Experiment('my_directory', network, task='classifier', optimizer='sgd')
expt.train(train_loader,
           valid_loader,
           epochs=epochs,
           callbacks=callbacks,
           seed=42)
expt.test(test_loader)