def train(args, imgs, labels, img_val, label_val, modelConst):
    #
    # Create the image augmentation.
    t = transforms.Compose([
        DataUtil.ToPIL(),
        DataUtil.RandomFlips(),
        # DataUtil.RandomRotation(5),
        # DataUtil.ColourJitter(0.1, 0.1, 0.1, 0),
        DataUtil.RandomResizedCrop(args.cropSize, (0.5, 1.3)),
        DataUtil.ToTensor(),
        DataUtil.Normalize([0.59008044], np.sqrt([0.06342617])),
        # DataUtil.TenCrop(140, [0.59008044], np.sqrt([0.06342617])),
        #
    ])
    t_test = transforms.Compose([
        DataUtil.ToPIL(),
        DataUtil.RandomFlips(),
        DataUtil.TenCrop(args.cropSize, [0.59008044], np.sqrt([0.06342617])),
    ])
    # RandomRotation
    # FiveCrop
    # RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=2)

    topil = transforms.ToPILImage()
    train = dataloader.npdataset(imgs, labels.view(-1), t)
    validation = dataloader.npdataset(img_val, label_val.view(-1), t_test)
    stages = {
        'train':
        torch.utils.data.DataLoader(train,
                                    batch_size=args.bSize,
                                    shuffle=True,
                                    num_workers=0,
                                    pin_memory=True),
        'val':
        torch.utils.data.DataLoader(validation,
                                    batch_size=args.bSize,
                                    shuffle=False,
                                    num_workers=0,
                                    pin_memory=True),
    }
    model = modelConst()
    usegpu = torch.cuda.is_available()
    criteria = nn.CrossEntropyLoss()
    #
    # Whether to use the GPU.
    if usegpu:
        model.cuda()
    #
    # Type of optimizer.
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.0001,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=1e-2)
    bestModel = copy.deepcopy(model.state_dict())
    bestAcc = 0
    logger = None
    logEpoch = doNothing
    closeLogger = doNothing
    if args.useTB:
        logger = SummaryWriter()
        logEpoch = logEpochTensorboard
        closeLogger = closeTensorboard
    #
    # Iterate.
    for epoch in range(args.numEpochs):
        printColour('Epoch {}/{}'.format(epoch, args.numEpochs - 1),
                    colours.OKBLUE)
        for stage in stages:
            gc.collect()
            print('Stage: ', stage)
            #
            # Switch on / off gradients.
            model.train(stage == 'train')
            #
            # The current loss.
            runningLoss = 0.0
            runningCorrect = 0.0
            loader = stages[stage]
            #
            # Progress bar.
            numMini = len(loader)
            pbar = progressbar.ProgressBar(max_value=numMini - 1)
            #
            # Train.
            for i, data in enumerate(loader):
                inputs_cpu, labels_cpu = data['img'], data['label']
                if usegpu:
                    labels_cpu.squeeze_()
                    inputs, labels = Variable(
                        inputs_cpu,
                        requires_grad=False).cuda(async=True), Variable(
                            labels_cpu, requires_grad=False).cuda(async=True)
                else:
                    inputs, labels = Variable(inputs_cpu,
                                              requires_grad=False,
                                              volatile=True), Variable(
                                                  labels_cpu,
                                                  requires_grad=False,
                                                  volatile=True)
                #
                # Forward through network.
                if stage == 'train':
                    out = model(inputs)
                else:
                    #
                    # The 5 crop from above takes the corners of the iamge and center.
                    # We must now average the contributions.
                    bs, ncrops, c, h, w = inputs_cpu.size()
                    inputs = inputs.view(-1, c, h, w)
                    result = model(inputs)  # fuse batch size and ncrops
                    out = result.view(bs, ncrops, -1).mean(1)  # avg over crops
                #
                # Backward pass.
                optimizer.zero_grad()
                _, preds = torch.max(out.data, 1)
                loss = criteria(out, labels)
                #
                #  Backwards pass.
                if stage == 'train':
                    loss.backward()
                    optimizer.step()
                dCorrect = torch.sum(preds == labels.data)
                #
                #  Stats.
                runningLoss += loss.data[0]
                runningCorrect += dCorrect
                pbar.update(i)
            #
            # Overall stats
            epochLoss = runningLoss / len(stages[stage])
            epochAcc = runningCorrect / (len(stages[stage]) * args.bSize)
            #
            # Check if we have the new best model.
            isBest = False
            if stage == 'val' and epochAcc > bestAcc:
                isBest = True
                bestAcc = epochAcc
                bestModel = model.state_dict()
            #
            # Print per epoch results.
            print('\n{} Loss: {:.4f} Acc: {:.4f}'.format(
                stage, epochLoss, epochAcc))
            #
            # Summary for logging in TB.
            summary = {
                'phase': stage,
                'epoch': epoch,
                'loss': epochLoss,
                'acc': epochAcc,
                'data': data,
                'pred': preds
            }
            logEpoch(logger, model, summary)
    printColour('Best validation performance:%f' % (bestAcc), colours.OKGREEN)
    closeLogger(logger)
    retModel = copy.deepcopy(bestModel)
    for key, val in retModel.items():
        retModel[key] = val.cpu()
    return retModel, bestAcc