def main():
    # Get command line arguments
    args = get_arguments()

    # Import the desired dataset generator
    if args.dataset.lower() == 'camvid':
        from data import CamVidGenerator as DataGenerator
    elif args.dataset.lower() == 'cityscapes':
        from data import CityscapesGenerator as DataGenerator
    elif args.dataset.lower() == 'dissection':
        from data import DissectionGenerator as DataGenerator
    else:
        # Should never happen...but just in case it does
        raise RuntimeError("\"{0}\" is not a supported dataset.".format(
            args.dataset))

    # Initialize training and validation dataloaders
    if args.mode.lower() in ('train', 'full'):
        train_generator = DataGenerator(args.dataset_dir,
                                        batch_size=args.batch_size,
                                        mode='train')
        val_generator = DataGenerator(args.dataset_dir,
                                      batch_size=args.batch_size,
                                      mode='val')

        # Some information about the dataset
        image_batch, label_batch = train_generator[0]
        num_classes = label_batch[0].shape[-1]
        print("--> Training batches: {}".format(len(train_generator)))
        print("--> Validation batches: {}".format(len(val_generator)))
        print("--> Image size: {}".format(image_batch.shape))
        print("--> Label size: {}".format(label_batch.shape))
        print("--> No. of classes: {}".format(num_classes))

    # Initialize test dataloader
    if args.mode.lower() in ('test', 'full'):
        test_generator = DataGenerator(args.dataset_dir,
                                       batch_size=args.batch_size,
                                       mode='test')

        # Some information about the dataset
        image_batch, label_batch = test_generator[0]
        num_classes = label_batch[0].shape[-1]
        print("--> Testing batches: {}".format(len(test_generator)))
        print("--> Image size: {}".format(image_batch.shape))
        print("--> Label size: {}".format(label_batch.shape))
        print("--> No. of classes: {}".format(num_classes))

    checkpoint_path = os.path.join(args.checkpoint_dir, args.name,
                                   args.name + '.h5')
    print("--> Checkpoint path: {}".format(checkpoint_path))

    model = None

    if args.mode.lower() in ('train', 'full'):
        if args.resume:
            print("--> Resuming model: {}".format(checkpoint_path))
            model = load_model(checkpoint_path,
                               custom_objects={
                                   'Conv2DTranspose': Conv2DTranspose,
                                   'mean_iou': MeanIoU(num_classes).mean_iou
                               })
        tensorboard_logdir = os.path.join(args.checkpoint_dir, args.name)
        model = train(
            args.epochs,
            args.initial_epoch,
            train_generator,
            val_generator,
            args.learning_rate,
            args.lr_decay,
            args.lr_decay_epochs,
            pretrained_encoder=args.pretrained_encoder,
            weights_path=args.weights_path,
            checkpoint_model=model,
            verbose=args.verbose,
            workers=args.workers,
            checkpoint_path=checkpoint_path,
            tensorboard_logdir=tensorboard_logdir,
        )

    if args.mode.lower() in ('test', 'full'):
        print("--> Loading model: {}".format(checkpoint_path))
        model = load_model(checkpoint_path,
                           custom_objects={
                               'Conv2DTranspose': Conv2DTranspose,
                               'mean_iou': MeanIoU(num_classes).mean_iou
                           })
        model = test(model, test_generator, args.workers, args.verbose)
Example #2
0
import torch.utils.data as data
import torchvision.transforms as transforms

from PIL import Image

import transforms as ext_transforms
from models.enet import ENet
from train import Train
from test import Test
from metric.iou import IoU
from args import get_arguments
from data.utils import enet_weighing, median_freq_balancing
import utils

# Get the arguments
args = get_arguments()

device = torch.device(args.device)


def load_dataset(dataset):
    print("\nLoading dataset...\n")

    print("Selected dataset:", args.dataset)
    print("Dataset directory:", args.dataset_dir)
    print("Save directory:", args.save_dir)

    image_transform = transforms.Compose(
        [transforms.Resize((args.height, args.width)),
         transforms.ToTensor()])
Example #3
0
def modify_arguments():
    global args
    args = get_arguments()
    global use_cuda
    use_cuda = args.cuda and torch.cuda.is_available()
    return args