예제 #1
0
 def test_cifar(self):
     from inferno.trainers.basic import Trainer
     from inferno.io.box.cifar import get_cifar10_loaders
     # Build cifar10 loaders
     trainloader, testloader = get_cifar10_loaders(
         root_directory=join(self.ROOT_DIR, 'data'),
         download=self.DOWNLOAD_CIFAR)
     # Make model
     net = self._make_test_model()
     tic = time.time()
     # Make trainer
     trainer = Trainer(model=net)\
         .build_optimizer('Adam')\
         .build_criterion('CrossEntropyLoss')\
         .build_metric('CategoricalError')\
         .validate_every((1, 'epochs'))\
         .save_every((1, 'epochs'), to_directory=join(self.ROOT_DIR, 'saves'))\
         .save_at_best_validation_score()\
         .set_max_num_epochs(2)
     # Bind trainer to datasets
     trainer.bind_loader('train',
                         trainloader).bind_loader('validate', testloader)
     # Check device and fit
     if self.CUDA:
         if self.HALF_PRECISION:
             trainer.cuda().set_precision('half').fit()
         else:
             trainer.cuda().fit()
     else:
         trainer.fit()
     toc = time.time()
     print("[*] Elapsed time: {} seconds.".format(toc - tic))
예제 #2
0
    def test_multi_gpu(self):
        if not torch.cuda.is_available():
            return

        from inferno.trainers.basic import Trainer
        from inferno.io.box.cifar import get_cifar10_loaders
        import os

        # Make model
        net = self._make_test_model()
        # Make trainer
        trainer = Trainer(model=net) \
            .build_optimizer('Adam') \
            .build_criterion('CrossEntropyLoss') \
            .build_metric('CategoricalError') \
            .validate_every((1, 'epochs')) \
            .save_every((1, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves')) \
            .save_at_best_validation_score() \
            .set_max_num_epochs(2)\
            .cuda(devices=[0, 1, 2, 3], base_device='cpu')

        train_loader, validate_loader = get_cifar10_loaders(
            root_directory=self.ROOT_DIR, download=True)
        trainer.bind_loader('train', train_loader)
        trainer.bind_loader('validate', validate_loader)

        trainer.fit()
예제 #3
0
    def setUp(self):
        # Build model
        net = self._make_test_model()

        # Build trainer
        self.trainer = Trainer(net)\
            .build_logger(TensorboardLogger(send_image_at_batch_indices=0,
                                            send_image_at_channel_indices='all',
                                            log_images_every=(20, 'iterations')),
                          log_directory=os.path.join(self.ROOT_DIR, 'logs'))\
            .build_criterion('CrossEntropyLoss')\
            .build_metric('CategoricalError')\
            .build_optimizer('Adam')\
            .validate_every((1, 'epochs'))\
            .save_every((2, 'epochs'), to_directory=os.path.join(self.ROOT_DIR, 'saves'))\
            .save_at_best_validation_score()\
            .set_max_num_epochs(2)\
            .cuda().set_precision(self.PRECISION)

        # Load CIFAR10 data
        train_loader, test_loader = \
            get_cifar10_loaders(root_directory=os.path.join(self.ROOT_DIR, 'data'),
                                download=self.DOWNLOAD_CIFAR)

        # Bind loaders
        self.trainer.bind_loader('train', train_loader).bind_loader(
            'validate', test_loader)
예제 #4
0
 def test_serialization(self):
     if not hasattr(self, 'trainer'):
         self.setUp()
     # Serialize
     self.trainer.save()
     # Unserialize
     trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
     train_loader, test_loader = \
         get_cifar10_loaders(root_directory=os.path.join(self.ROOT_DIR, 'data'),
                             download=self.DOWNLOAD_CIFAR)
     trainer.bind_loader('train',
                         train_loader).bind_loader('validate', test_loader)
     trainer.fit()
     trainer.print("Inspect logs at: {}".format(self.trainer.log_directory))
예제 #5
0
import torch.nn as nn
from inferno.extensions.layers import ConvELU2D
from inferno.extensions.layers import Flatten
model = nn.Sequential(
    ConvELU2D(in_channels=3, out_channels=256, kernel_size=3),
    nn.MaxPool2d(kernel_size=2, stride=2),
    ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),
    nn.MaxPool2d(kernel_size=2, stride=2),
    ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),
    nn.MaxPool2d(kernel_size=2, stride=2), Flatten(),
    nn.Linear(in_features=(256 * 4 * 4), out_features=10), nn.Softmax())

##################################################
# data loaders
from inferno.io.box.cifar import get_cifar10_loaders
train_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,
                                                    download=DOWNLOAD_CIFAR)

logger = TensorboardLogger(log_scalars_every=(1, 'iteration'),
                           log_images_every='never')

##################################################
# Build trainer
from inferno.trainers.basic import Trainer

trainer = Trainer(model)
trainer.build_criterion('CrossEntropyLoss')
trainer.build_metric('CategoricalError')
trainer.build_optimizer('Adam')
trainer.validate_every((2, 'epochs'))
trainer.save_every((5, 'epochs'))
trainer.save_to_directory(SAVE_DIRECTORY)