Ejemplo n.º 1
0
 def __init__(self, args):
     self.args = args
     # image transform
     input_transform = transforms.Compose([
         transforms.ToTensor(),
         transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
     ])
     # dataset and dataloader
     trainset = get_segmentation_dataset(
         args.dataset, split='train', transform=input_transform)
     valset = get_segmentation_dataset(
         args.dataset, split='val', transform=input_transform)
     self.train_data = gluon.data.DataLoader(
         trainset, args.batch_size, shuffle=True, last_batch='rollover',
         num_workers=args.workers)
     self.eval_data = gluon.data.DataLoader(valset, args.test_batch_size,
         last_batch='keep', num_workers=args.workers)
     # create network
     model = get_segmentation_model(model=args.model, dataset=args.dataset,
                                    backbone=args.backbone, norm_layer=args.norm_layer,
                                    aux=args.aux, norm_kwargs=args.norm_kwargs)
     # model.hybridize(static_alloc=True, static_shape=True)
     print(model)
     self.net = DataParallelModel(model, args.ctx, args.syncbn)
     self.evaluator = DataParallelModel(SegEvalModel(model), args.ctx)
     # resume checkpoint if needed
     if args.resume is not None:
         if os.path.isfile(args.resume):
             model.load_params(args.resume, ctx=args.ctx)
         else:
             raise RuntimeError("=> no checkpoint found at '{}'" \
                 .format(args.resume))
     # create criterion
     criterion = SoftmaxCrossEntropyLossWithAux(args.aux)
     self.criterion = DataParallelCriterion(criterion, args.ctx, args.syncbn)
     # optimizer and lr scheduling
     self.lr_scheduler = LRScheduler(mode='poly', baselr=args.lr,
                                     niters=len(self.train_data), 
                                     nepochs=args.epochs)
     kv = mx.kv.create(args.kvstore)
     self.optimizer = gluon.Trainer(self.net.module.collect_params(), 'sgd',
                                    {'lr_scheduler': self.lr_scheduler,
                                     'wd':args.weight_decay,
                                     'momentum': args.momentum,
                                     'multi_precision': True},
                                     kvstore = kv)
Ejemplo n.º 2
0
# display
plt.show()

##############################################################################
# Training Details
# ----------------
#
# - Training Losses:
#
#     We apply a standard per-pixel Softmax Cross Entropy Loss to train FCN. For Pascal
#     VOC dataset, we ignore the loss from boundary class (number 22).
#     Additionally, an Auxiliary Loss as in PSPNet [Zhao17]_ at Stage 3 can be enabled when
#     training with command ``--aux``. This will create an additional FCN "head" after Stage 3.
#
from gluoncv.model_zoo.segbase import SoftmaxCrossEntropyLossWithAux
criterion = SoftmaxCrossEntropyLossWithAux(aux=True)

##############################################################################
# - Learning Rate and Scheduling:
#
#     We use different learning rate for FCN "head" and the base network. For the FCN "head",
#     we use :math:`10\times` base learning rate, because those layers are learned from scratch.
#     We use a poly-like learning rate scheduler for FCN training, provided in :class:`gluoncv.utils.PolyLRScheduler`.
#     The learning rate is given by :math:`lr = baselr \times (1-iter)^{power}`
#
lr_scheduler = gluoncv.utils.PolyLRScheduler(0.001,
                                             niters=len(train_data),
                                             nepochs=50)

##############################################################################
# - Dataparallel for multi-gpu training