Beispiel #1
0
def main(args):
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # Generate some random images
    input_images, target_masks = generate_random_data(args.im_size,
                                                      args.im_size,
                                                      count=3)

    print(f'=> image shape: {input_images.shape} in '
          f'range: [{input_images.min()}, {input_images.max()}]')
    print(f'=> target shape: {target_masks.shape} in '
          f'range: [{target_masks.min()}, {target_masks.max()}]')

    t_form = transforms.Compose([
        transforms.ToTensor(),
    ])
    # create generator to create images
    train_set = SimDataset(args.t_size, args.im_size, transform=t_form)
    val_set = SimDataset(args.v_size, args.im_size, transform=t_form)

    # create dataloaders
    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=0,
                              drop_last=True)
    val_loader = DataLoader(val_set,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=0)

    # train model
    model = main_worker(train_loader, val_loader, args=args)

    return model
Beispiel #2
0
    def __init__(self, count, image_path, mask_path=None, transform=None):
        self.input_images, self.target_masks = simulation.generate_random_data(
            192, 192, count=count)
        self.transform = transform

        image_names = os.listdir(image_path)
        self.image_names = [
            os.path.join(image_path, image_name) for image_name in image_names
            if image_name.endswith(('jpg', 'png'))
        ]

        self.mask_names = None

        if mask_path:
            mask_names = os.listdir(mask_path)
            self.mask_names = [
                os.path.join(mask_path, mask_name) for mask_name in mask_names
                if mask_name.endswith(('jpg', 'png'))
            ]
Beispiel #3
0
 def __init__(self, count, transform=None):
     self.input_images, self.target_masks = simulation.generate_random_data(192, 192, count=count)
     self.transform = transform
Beispiel #4
0
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
    since = time.time()

    best_model_wts = copy.deepcopy(model.state_dict())
    best_loss = 1e10

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                scheduler.step()
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            batch_size = 10
            epoch_steps = 10
            for i in range(epoch_steps):
                input_images, target_masks = simulation.generate_random_data(
                    192, 192, count=batch_size)

                inputs = torch.from_numpy(input_images)
                labels = torch.from_numpy(target_masks)
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)

                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                # statistics
                running_loss += loss.item() * inputs.size(0)

            epoch_loss = running_loss / (batch_size * epoch_steps)

            print('{} Loss: {:.4f}'.format(phase, epoch_loss))

            # deep copy the model
            if phase == 'val' and epoch_loss < best_loss:
                best_loss = epoch_loss
                best_model_wts = copy.deepcopy(model.state_dict())

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val loss: {:4f}'.format(best_loss))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model
Beispiel #5
0
#%%
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')

import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import helper
import simulation

# Generate some random images
input_images, target_masks = simulation.generate_random_data(192, 192, count=3)

print(input_images.shape, target_masks.shape)

# Change channel-order and make 3 channels for matplot
input_images_rgb = [
    (x.swapaxes(0, 2).swapaxes(0, 1) * -255 + 255).astype(np.uint8)
    for x in input_images
]

# Map each channel (i.e. class) to each color
target_masks_rgb = [helper.masks_to_colorimg(x) for x in target_masks]

# Left: Input image, Right: Target mask
helper.plot_side_by_side([input_images_rgb, target_masks_rgb])

#%%
from torchvision import models
Beispiel #6
0
        z.load_model(checkpoint_file)

    # Prepare model and trainer
    lr = learning_rate_schedule(0.00001, UnitType.sample)
    momentum = C.learners.momentum_as_time_constant_schedule(0)
    trainer = C.Trainer(z, (-dice_coef, -dice_coef), C.learners.adam(z.parameters, lr=lr, momentum=momentum))

    # Get minibatches of training data and perform model training
    minibatch_size = 2
    num_epochs = 10
    num_mb_per_epoch = int(data_size / minibatch_size)

    for e in range(0, num_epochs):
        for i in range(0, num_mb_per_epoch):
            training_x = input_images[i * minibatch_size:(i + 1) * minibatch_size]
            training_y = target_masks[i * minibatch_size:(i + 1) * minibatch_size]

            trainer.train_minibatch({x: training_x, y: training_y})

        trainer.save_checkpoint(checkpoint_file)

    return trainer

if __name__ == '__main__':
    shape = (1, 128, 128)
    data_size = 500

    input_images, target_masks = simulation.generate_random_data(shape, data_size)

    train(input_images, target_masks, False)