def predict(dataloader, model, maskColors):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    progress = ProgressMeter(len(dataloader), [batch_time, data_time],
                             prefix='Predict: ')

    # Set model in evaluation mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for epoch_step, batch in enumerate(dataloader):

            if len(batch) == 2:
                inputs, filepath = batch
            else:
                inputs, _, filepath = batch

            data_time.update(time.time() - end)

            inputs = inputs.float().cuda()

            # forward
            outputs = model(inputs)
            preds = torch.argmax(outputs, 1)

            # Save visualizations of first batch
            for i in range(inputs.size(0)):
                filename = os.path.splitext(os.path.basename(filepath[i]))[0]
                # Save input
                img = visim(inputs[i, :, :, :])
                img = Image.fromarray(img, 'RGB')
                img.save(
                    'baseline_run/results_color/{}_input.png'.format(filename))
                # Save prediction with color labels
                pred = preds[i, :, :].cpu()
                pred_color = vislbl(pred, maskColors)
                pred_color = Image.fromarray(pred_color.astype('uint8'))
                pred_color.save(
                    'baseline_run/results_color/{}_prediction.png'.format(
                        filename))
                # Save class id prediction (used for evaluation)
                pred_id = MiniCity.trainid2id[pred]
                pred_id = Image.fromarray(pred_id)
                pred_id = pred_id.resize((2048, 1024), resample=Image.NEAREST)
                pred_id.save('results/{}.png'.format(filename))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # print progress info
            progress.display(epoch_step)
def validate_epoch(dataloader,
                   model,
                   criterion,
                   epoch,
                   classLabels,
                   validClasses,
                   void=-1,
                   maskColors=None):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    loss_running = AverageMeter('Loss', ':.4e')
    acc_running = AverageMeter('Accuracy', ':.4e')
    iou = iouCalc(classLabels, validClasses, voidClass=void)
    progress = ProgressMeter(
        len(dataloader), [batch_time, data_time, loss_running, acc_running],
        prefix="Test, epoch: [{}]".format(epoch))

    # input resolution
    res = args.test_size[0] * args.test_size[1]

    # Set model in evaluation mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for epoch_step, (inputs, labels, filepath) in enumerate(dataloader):
            data_time.update(time.time() - end)

            inputs = inputs.float().cuda()
            labels = labels.long().cuda()

            # forward
            outputs = model(inputs)
            preds = torch.argmax(outputs, 1)
            loss = criterion(outputs, labels)

            # Statistics
            bs = inputs.size(0)  # current batch size
            loss = loss.item()
            loss_running.update(loss, bs)
            corrects = torch.sum(preds == labels.data)
            nvoid = int((labels == void).sum())
            acc = corrects.double() / (
                bs * res - nvoid)  # correct/(batch_size*resolution-voids)
            acc_running.update(acc, bs)
            # Calculate IoU scores of current batch
            iou.evaluateBatch(preds, labels)

            # Save visualizations of first batch
            if epoch_step == 0 and maskColors is not None:
                for i in range(inputs.size(0)):
                    filename = os.path.splitext(os.path.basename(
                        filepath[i]))[0]
                    # Only save inputs and labels once
                    if epoch == 0:
                        img = visim(inputs[i, :, :, :])
                        label = vislbl(labels[i, :, :], maskColors)
                        if len(img.shape) == 3:
                            cv2.imwrite(
                                'baseline_run/images/{}.png'.format(filename),
                                img[:, :, ::-1])
                        else:
                            cv2.imwrite(
                                'baseline_run/images/{}.png'.format(filename),
                                img)
                        cv2.imwrite(
                            'baseline_run/images/{}_gt.png'.format(filename),
                            label[:, :, ::-1])
                    # Save predictions
                    pred = vislbl(preds[i, :, :], maskColors)
                    cv2.imwrite(
                        'baseline_run/images/{}_epoch_{}.png'.format(
                            filename, epoch), pred[:, :, ::-1])

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # print progress info
            progress.display(epoch_step)

        miou = iou.outputScores()
        print('Accuracy      : {:5.3f}'.format(acc_running.avg))
        print('---------------------')

    return acc_running.avg, loss_running.avg, miou
Beispiel #3
0
def train_epoch(trainset,
                model,
                criterion,
                optimizer,
                lr_scheduler,
                epoch,
                void=-1):

    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    loss_running = AverageMeter('Loss', ':.4e')
    acc_running = AverageMeter('Accuracy', ':.3f')

    trainset.create_an_epoch()

    dataloader = torch.utils.data.DataLoader(trainset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=args.pin_memory,
                                             num_workers=args.num_workers)

    progress = ProgressMeter(
        len(dataloader), [batch_time, data_time, loss_running, acc_running],
        prefix="Train, epoch: [{}]".format(epoch))

    # input resolution
    res = args.crop_size[0] * args.crop_size[1]

    if epoch in [200, 400]:

        for param_group in optimizer.param_groups:
            param_group['lr'] = param_group['lr'] / 10

    #adjust_learning_rate(optimizer, epoch)

    # Set model in training mode
    model.train()

    end = time.time()

    with torch.set_grad_enabled(True):
        # Iterate over data.
        for epoch_step, (inputs, labels) in enumerate(dataloader):
            data_time.update(time.time() - end)

            inputs = inputs.float().cuda()
            labels = labels.long().cuda()

            _, _, h, w = inputs.shape

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward pass
            outputs = model(inputs)
            preds = torch.argmax(outputs, 1)

            loss = criterion(outputs, labels)

            # backward pass
            loss.backward()
            optimizer.step()

            # Statistics
            bs = inputs.size(0)  # current batch size
            loss = loss.item()
            loss_running.update(loss, bs)
            corrects = torch.sum(preds == labels.data)
            nvoid = int((labels == void).sum())
            acc = corrects.double() / (
                bs * res - nvoid)  # correct/(batch_size*resolution-voids)
            acc_running.update(acc, bs)

            # output training info
            progress.display(epoch_step)

            # Measure time
            batch_time.update(time.time() - end)
            end = time.time()

        # Reduce learning rate

    return loss_running.avg, acc_running.avg
def train_epoch(dataloader,
                model,
                criterion,
                optimizer,
                lr_scheduler,
                epoch,
                void=-1):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    loss_running = AverageMeter('Loss', ':.4e')
    acc_running = AverageMeter('Accuracy', ':.3f')
    progress = ProgressMeter(
        len(dataloader), [batch_time, data_time, loss_running, acc_running],
        prefix="Train, epoch: [{}]".format(epoch))

    # input resolution
    if args.crop_size is not None:
        res = args.crop_size[0] * args.crop_size[1]
    else:
        res = args.train_size[0] * args.train_size[1]

    # Set model in training mode
    model.train()

    end = time.time()

    with torch.set_grad_enabled(True):
        # Iterate over data.
        for epoch_step, (inputs, labels, _) in enumerate(dataloader):
            data_time.update(time.time() - end)

            inputs = inputs.float().cuda()
            labels = labels.long().cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward pass
            outputs = model(inputs)
            preds = torch.argmax(outputs, 1)
            loss = criterion(outputs, labels)

            # backward pass
            loss.backward()
            optimizer.step()

            # Statistics
            bs = inputs.size(0)  # current batch size
            loss = loss.item()
            loss_running.update(loss, bs)
            corrects = torch.sum(preds == labels.data)
            nvoid = int((labels == void).sum())
            acc = corrects.double() / (
                bs * res - nvoid)  # correct/(batch_size*resolution-voids)
            acc_running.update(acc, bs)

            # output training info
            progress.display(epoch_step)

            # Measure time
            batch_time.update(time.time() - end)
            end = time.time()

        # Reduce learning rate
        lr_scheduler.step(loss_running.avg)

    return loss_running.avg, acc_running.avg
Beispiel #5
0
def validate_epoch(dataloader,
                   model,
                   criterion,
                   epoch,
                   classLabels,
                   validClasses,
                   void=-1,
                   maskColors=None,
                   flip=False,
                   deg=None):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    loss_running = AverageMeter('Loss', ':.4e')
    acc_running = AverageMeter('Accuracy', ':.4e')
    iou = iouCalc(classLabels, validClasses, voidClass=void)
    progress = ProgressMeter(
        len(dataloader), [batch_time, data_time, loss_running, acc_running],
        prefix="Test, epoch: [{}]".format(epoch))

    # input resolution
    res = test_size[0] * test_size[1]

    all_predictions = torch.zeros(
        (200, 20, test_size[0], test_size[1])).float().cuda()
    all_labels = torch.zeros((200, test_size[0], test_size[1])).long().cuda()

    # Set model in evaluation mode
    model.eval()  # TODO ADD PLATO SCHEDULAR INSPECT LOSSES

    all_filepaths = []
    with torch.no_grad():
        end = time.time()
        for epoch_step, (inputs, labels, filepath) in enumerate(dataloader):

            filepath = filepath[0].split('/')[-1]

            data_time.update(time.time() - end)

            inputs = inputs.float().cuda()
            labels = labels.long().cuda()

            if flip:
                idx = [i for i in range(inputs.shape[3] - 1, -1, -1)]
                idx = torch.LongTensor(idx)

                inputs = inputs[:, :, :, idx]

            # forward
            outputs = model(inputs)

            if flip:

                idx = [i for i in range(1024 - 1, -1, -1)]
                idx = torch.LongTensor(idx)

                outputs = outputs[:, :, :, idx]

            all_predictions[epoch_step, :, :, :] = F.softmax(outputs, 1)
            all_labels[epoch_step, :, :] = labels

            preds = torch.argmax(outputs, 1)
            loss = criterion(outputs, labels)

            # Statistics
            bs = inputs.size(0)  # current batch size
            loss = loss.item()
            loss_running.update(loss, bs)
            corrects = torch.sum(preds == labels.data)
            nvoid = int((labels == void).sum())
            acc = corrects.double() / (
                bs * res - nvoid)  # correct/(batch_size*resolution-voids)
            acc_running.update(acc, bs)
            # Calculate IoU scores of current batch

            iou.evaluateBatch(preds, labels)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # print progress info
            progress.display(epoch_step)

            all_filepaths.append(filepath)

        miou = iou.outputScores()
        print('Accuracy      : {:5.3f}'.format(acc_running.avg))
        print('---------------------')

    return acc_running.avg, loss_running.avg, miou, all_predictions, all_labels, all_filepaths