Exemple #1
0
    def test(self, test_data):
        tps = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
        fps = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
        fns = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
        losses = list()

        self.model.eval()
        with torch.no_grad():
            for i, (X, y) in enumerate(test_data):
                n, c, h, w = y.shape
                y = y.view(n, h, w).type(torch.LongTensor)

                X, y = X.cuda(self.gpu,
                              non_blocking=True), y.cuda(self.gpu,
                                                         non_blocking=True)
                output = self.model(X)

                loss = self.loss_function(output, y)
                losses.append(loss.item())

                tp, fp, fn = utils.mIoU(output, y, self.num_classes, self.gpu)
                tps += tp
                fps += fp
                fns += fn
        self.model.train()
        mIoU = torch.sum(tps / (self.eps + tps + fps + fns)) / self.num_classes
        return (mIoU.item(), sum(losses) / len(losses))
Exemple #2
0
def lossandaccuracy(loader, model, factor):
    epoch_loss = []
    ious = []
    model.eval()
    with torch.no_grad():
        for i, batchdata in enumerate(loader):
            #            print (len(batchdata))
            img, labels, index, spatialWeights, maxDist = batchdata
            data = img.to(device)

            target = labels.to(device).long()
            output = model(data)

            ## loss from cross entropy is weighted sum of pixel wise loss and Canny edge loss *20
            CE_loss = criterion(output, target)
            loss = CE_loss * (torch.from_numpy(np.ones(
                spatialWeights.shape)).to(torch.float32).to(device) +
                              (spatialWeights).to(torch.float32).to(device))

            loss = torch.mean(loss).to(torch.float32).to(device)
            loss_dice = criterion_DICE(output, target)
            loss_sl = torch.mean(
                criterion_SL(output.to(device), (maxDist).to(device)))

            ##total loss is the weighted sum of suface loss and dice loss plus the boundary weighted cross entropy loss
            loss = (1 - factor) * loss_sl + factor * (loss_dice) + loss

            epoch_loss.append(loss.item())
            predict = get_predictions(output)
            iou = mIoU(predict, labels)
            ious.append(iou)
    return np.average(epoch_loss), np.average(ious)
def validate(loader, model, criterion, logger, epoch=0):
    model.eval()
    running_loss = 0.0
    iou = 0
    total = 0
    for inputs, labels in loader:
        inputs = inputs.cuda()
        labels = (labels.data*255)
        labels = labels.squeeze(1).cuda()
        with torch.no_grad():
            outputs = model(inputs)
            if outputs.size()[-1] != labels.size()[-1]:
                labels = torch.nn.functional.interpolate(labels,size=outputs.size()[-1],mode='nearest')
            elif outputs.size()[-2] != labels.size()[-2]:
                labels = labels.permute(0,2,1)
                labels = torch.nn.functional.interpolate(labels, size=outputs.size()[-2], mode='nearest')
                labels = labels.permute(0,2,1)
            labels = labels.long()
            loss = criterion(outputs, labels)
            _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        iou += mIoU(outputs, labels).item()
        running_loss += loss.item() * inputs.size(0)
    mean_val_loss = running_loss / total
    mean_val_iou = iou / total
    return mean_val_loss, mean_val_iou
Exemple #4
0
def validate(loader, model, criterion, logger, device, epoch=0):
    logger.info("Validating Epoch {}".format(epoch))
    model.eval()

    loss_meter = AverageValueMeter()
    iou_meter = AverageValueMeter()

    start_time = time.time()
    for idx, (img, v_class, label) in enumerate(loader):
        with torch.no_grad():
            img = img.squeeze(0).to(device)
            v_class = v_class.float().to(device).squeeze()
            logits, alphas = model(img, v_class, out_att=True)
            label = label.squeeze(0).unsqueeze(1)
            labels = (torch.nn.functional.interpolate(
                label.to(device), size=logits.shape[-2:]).squeeze(1) *
                      256).long()
            loss = criterion(logits, labels)
            iou = mIoU(logits, labels)

            loss_meter.add(loss.item())
            iou_meter.add(iou)

    text_print = "Epoch {} Avg loss = {:.4f} mIoU = {:.4f} Time {:.2f}".format(
        epoch, loss_meter.mean, iou_meter.mean,
        time.time() - start_time)
    logger.info(text_print)
    val_txt = "val_loss: {:.2f} val_IoU : {:.2f}".format(
        loss_meter.mean, iou_meter.mean)
    logger.info(val_txt)
    return loss_meter.mean, iou_meter.mean
Exemple #5
0
    def test(self, test_data):
        # only consider for batch size 1 on test_data
        tps = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
        fps = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
        fns = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
        losses = list()

        self.front.eval()
        self.context.eval()
        with torch.no_grad():
            for i, (X, y) in enumerate(test_data):
                n, c, h, w = y.shape
                y = y.view(n, h, w).type(torch.LongTensor)

                X, y = X.cuda(self.gpu,
                              non_blocking=True), y.cuda(self.gpu,
                                                         non_blocking=True)
                output = self.front(X)
                output = F.resize(output, (h, w), Image.BILINEAR)
                output = self.context(output)

                loss = self.loss_function(output, y)
                losses.append(loss.item())

                tp, fp, fn = utils.mIoU(output, y, self.num_classes, self.gpu)
                tps += tp
                fps += fp
                fns += fn

        self.front.train()
        self.context.train()

        mIoU = torch.sum(tps / (self.eps + tps + fps + fns)) / self.num_classes
        return (mIoU.item(), sum(losses) / len(losses))
Exemple #6
0
def validate(loader, model, criterion, logger, device, epoch=0):
    model.eval()
    val_loss = 0
    val_iou = 0
    for batch_i, (data, target) in enumerate(loader):
        with torch.no_grad():
            output = model(data.to(device))

            target = target * 255
            target = torch.nn.functional.interpolate(
                target, (output.shape[2], output.shape[3]))
            target = torch.squeeze(target, 1).long()

            loss = criterion(output, target.to(device))
            val_loss += loss.mean().item()
            val_iou += mIoU(output, target.to(device)).item()
            if ((batch_i % 150) == 0):
                print('val batch ', batch_i, ' with loss: ',
                      round(val_loss / (batch_i + 1), 5), ' with iou: ',
                      round(val_iou / (batch_i + 1), 5))

    mean_val_loss = round((val_loss / (batch_i + 1)), 5)
    mean_val_iou = round((val_iou / (batch_i + 1)), 5)

    logger.info("epoch {} mean val_loss {} mean val_iou {}".format(
        epoch, mean_val_loss, mean_val_iou))
    #raise NotImplementedError("TODO: validation routine")
    return (mean_val_loss, mean_val_iou)
Exemple #7
0
def train(loader, model, criterion, optimizer, logger, device):
    train_loss = 0
    train_iou = 0
    for batch_i, (data, target) in enumerate(loader):
        target = target * 255
        target = torch.squeeze(target, 1).long()

        optimizer.zero_grad()
        output = model(data.to(device))
        loss = criterion(output, target.to(device))
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        train_iou += mIoU(output, target.to(device)).item()

        if ((batch_i % 150) == 0):
            print('train batch ', batch_i, ' with loss: ',
                  round(train_loss / (batch_i + 1), 5), ' with iou: ',
                  round(train_iou / (batch_i + 1), 5))

    mean_train_loss = round((train_loss / (batch_i + 1)), 5)
    mean_train_iou = round((train_iou / (batch_i + 1)), 5)

    logger.info("mean train_loss {} mean train_iou {}".format(
        mean_train_loss, mean_train_iou))

    return (mean_train_loss, mean_train_iou)
def validate(loader, model, criterion, logger, epoch=0):
    losses, ious = [], []
    with torch.no_grad():
        for X_val, y_val in loader:
            X_val = X_val.to('cuda', non_blocking=True)
            y_val = y_val.to('cuda', non_blocking=True)
            y_val[y_val > 0] = 1
            y_preds = model(X_val)
            loss = criterion(y_preds, y_val.long())
            iou = mIoU(y_preds, y_val)
            losses.append(loss.item())
            ious.append(iou.item())
    return np.mean(losses), np.mean(ious)
Exemple #9
0
def train(loader, model, criterion, optimizer, logger, device, epoch=0):
    logger.info("Training")
    model.train()

    loss_meter = AverageValueMeter()
    iou_meter = AverageValueMeter()
    time_meter = AverageValueMeter()
    steps_per_epoch = len(loader.dataset) / loader.batch_size

    start_time = time.time()
    batch_time = time.time()
    for idx, (img, v_class, label) in enumerate(loader):
        img = img.to(device)
        v_class = v_class.float().to(device).squeeze()
        logits, alphas = model(img, v_class, out_att=True)
        logits = logits.squeeze()
        labels = (torch.nn.functional.interpolate(
            label.to(device), size=logits.shape[-2:]).squeeze(1) * 256).long()
        loss = criterion(logits, labels)
        iou = mIoU(logits, labels)

        # backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_meter.add(loss.item())
        iou_meter.add(iou)
        time_meter.add(time.time() - batch_time)

        if idx % 200 == 0:  #or idx == len(loader)-1:
            text_print = "Epoch {} Avg loss = {:.4f} mIoU = {:.4f} Time {:.2f} (Total:{:.2f}) Progress {}/{}".format(
                epoch, loss_meter.mean, iou_meter.mean, time_meter.mean,
                time.time() - start_time, idx, int(steps_per_epoch))
            logger.info(text_print)
            # loss_meter.reset()
            # iou_meter.reset()

        batch_time = time.time()
    time_txt = "batch time: {:.2f} total time: {:.2f}".format(
        time_meter.mean,
        time.time() - start_time)
    logger.info(time_txt)
    train_txt = "train_loss: {:.2f} train_IoU : {:.2f}".format(
        loss_meter.mean, iou_meter.mean)
    logger.info(train_txt)
    return loss_meter.mean, iou_meter.mean
def train(loader, model, criterion, optimizer, logger):
    model.train()
    running_loss = 0.0
    iou = 0
    total = 0
    for inputs, labels in loader:
        inputs = inputs.cuda()
        labels = (labels.data*255)
        labels = labels.squeeze(1).cuda()
        labels = labels.long()
        optimizer.zero_grad()
        with torch.set_grad_enabled(True):
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            _, predicted = torch.max(outputs, 1)
        total += inputs.size(0)
        iou += mIoU(outputs, labels).item()
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
    mean_train_loss = running_loss / total
    mean_train_iou = iou / total
    return mean_train_loss, mean_train_iou
Exemple #11
0
            CE_loss = criterion(output, target)
            loss = CE_loss * (torch.from_numpy(np.ones(
                spatialWeights.shape)).to(torch.float32).to(device) +
                              (spatialWeights).to(torch.float32).to(device))

            loss = torch.mean(loss).to(torch.float32).to(device)
            loss_dice = criterion_DICE(output, target)
            loss_sl = torch.mean(
                criterion_SL(output.to(device), (maxDist).to(device)))

            ##total loss is the weighted sum of suface loss and dice loss plus the boundary weighted cross entropy loss
            loss = (1 -
                    alpha[epoch]) * loss_sl + alpha[epoch] * (loss_dice) + loss
            #
            predict = get_predictions(output)
            iou = mIoU(predict, labels)
            ious.append(iou)

            if i % 10 == 0:
                logger.write('Epoch:{} [{}/{}], Loss: {:.3f}'.format(
                    epoch, i, len(trainloader), loss.item()))

            loss.backward()
            optimizer.step()

        logger.write('Epoch:{}, Train mIoU: {}'.format(epoch,
                                                       np.average(ious)))
        lossvalid, miou = lossandaccuracy(validloader, model, alpha[epoch])
        totalperf = total_metric(nparams, miou)
        f = 'Epoch:{}, Valid Loss: {:.3f} mIoU: {} Complexity: {} total: {}'
        logger.write(f.format(epoch, lossvalid, miou, nparams, totalperf))