def validate(val_loader, model, epoch, logger):
    model.eval()

    accs = []
    mius = []

    for i, (image, label) in enumerate(val_loader):
        image, label = image.cuda(), label.cuda()

        torch.cuda.synchronize()

        with torch.no_grad():
            pred, _ = model(image)
            pred = torch.argmax(pred, dim=1)
            # pred = torch.squeeze(pred)

        # print('val pred size = ', pred.size())
        # print('label size = ', label.size())
        torch.cuda.synchronize()

        pred = torch.squeeze(pred)
        label = torch.squeeze(label)
        acc = metrics.pixel_accuracy(pred, label)
        miu = metrics.mean_IU(pred, label, args.max_classes)

        accs.append(acc)
        mius.append(miu)

        acc_avg = np.mean(accs)
        miu_avg = np.mean(mius)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Pixel Acc={acc:.2f}'
                  'Mean IOU={miu:.2f}'.format(i + 1,
                                              len(val_loader),
                                              acc=acc_avg,
                                              miu=miu_avg))

    acc_avg = np.mean(accs)
    miu_avg = np.mean(mius)

    print('\n*\n'
          'Pixel Acc={acc_avg:.2f}\n'
          'Mean IOU={miu_avg:.2f}\n'.format(acc_avg=acc_avg, miu_avg=miu_avg))

    logger.add_scalar('Test/Pixel Acc', acc_avg, epoch)
    logger.add_scalar('Test/Mean IoU', miu_avg, epoch)

    return acc_avg, miu_avg
    def test_model(self, test_features, ground_truth):
        results = self.infer(test_features)
        results = data.preprocess_for_metrics(results)
        ground_truth = data.preprocess_for_metrics(ground_truth)

        length = ground_truth.shape[0]
        p_acc, m_acc, m_IU, fw_IU = 0, 0, 0, 0
        for i in range(length):
            p_acc += met.pixel_accuracy(results[i], ground_truth[i])
            m_acc += met.mean_accuracy(results[i], ground_truth[i])
            m_IU += met.mean_IU(results[i], ground_truth[i])
            fw_IU += met.frequency_weighted_IU(results[i], ground_truth[i])
        p_acc, m_acc, m_IU, fw_IU = p_acc / length, m_acc / length, m_IU / length, fw_IU / length

        return p_acc, m_acc, m_IU, fw_IU
        train_loss, train_mIoU = 0.0, 0.0
        for batch, (x, y) in enumerate(train_loader):
            if torch.cuda.is_available():
                x, y = x.cuda(), y.cuda()

            optimizer.zero_grad()
            y_pred = model(x)

            loss = criterion(y_pred, y)
            with amp_handle.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step()

            train_loss += loss.item()
            train_mIoU += mean_iou(torch.argmax(y_pred, dim=1), y, num_classes)
            train_pix_acc += pixel_accuracy(torch.argmax(y_pred, dim=1), y,
                                            num_classes)

            if torch.cuda.is_available():
                torch.cuda.empty_cache()

        # Run validation loop
        val_pix_acc = 0.0
        val_loss, val_mIoU = 0.0, 0.0
        for val_batch, (x, y) in enumerate(val_loader):
            if torch.cuda.is_available():
                x, y = x.cuda(), y.cuda()

            with torch.no_grad():
                y_pred = model(x)
                loss = criterion(y_pred, y)
Beispiel #4
0
    while True:

        latest_run = sorted(listdir('train'), reverse=True)[0] + '/checkpoints'
        latest_epoch = sorted(listdir(latest_run), reverse=True)[0]
        state = torch.load(latest_epoch, map_location='cpu')
        model.load_state_dict(state['model'])

        miou, pix_acc = [], []
        for batch, (x, y) in enumerate(val_loader):
            y_pred = model(x)
            y_pred = torch.argmax(y_pred, dim=1)

            # Metrics
            miou.append(mean_iou(y_pred, y, num_classes))
            pix_acc.append(pixel_accuracy(y_pred, y, num_classes))

            print("mean_iou", miou[-1], "pixel_acc", pix_acc[-1], end="\r")

            y, y_pred = bdd_palette(y), bdd_palette(y_pred)

            to_img = lambda t: np.moveaxis(t.cpu().numpy(), 1, -1)[0] # NCHW -> HWC
            x, y, y_pred = to_img(x), to_img(y), to_img(y_pred)
            
            mean = np.array([[[0.3518, 0.3932, 0.4011]]])
            std = np.array([[[0.2363, 0.2494, 0.2611]]])
            x = (x * std + mean).astype('float32')

            cv2.imshow('x', cv2.cvtColor(x, cv2.COLOR_BGR2RGB))
            cv2.imshow('y', cv2.cvtColor(y, cv2.COLOR_BGR2RGB))
            cv2.imshow('y_pred', cv2.cvtColor(y_pred, cv2.COLOR_BGR2RGB))
Beispiel #5
0
IOU_unet = 0
IOU_GAN = 0
F1_unet = 0
F1_GAN = 0
with torch.no_grad():
    for batch in tqdm(val_dataloader):
        img, label, onehot = batch[0].to(device), batch[1].numpy(), batch[2].numpy()
        
        output_unet = net1(img).to('cpu').numpy()
        output_GAN = net2(img).to('cpu').numpy()
        
        for i in range(img.size()[0]):
            eval_unet = img2label(onehot2img(output_unet[i]))
            eval_GAN = img2label(onehot2img(output_GAN[i]))

            PA_unet += pixel_accuracy(eval_unet, label[i])
            PA_GAN += pixel_accuracy(eval_GAN, label[i])
            IOU_unet += mean_IOU(eval_unet, label[i])
            IOU_GAN += mean_IOU(eval_GAN, label[i])
            F1_unet += f1_score(eval_unet, label[i])
            F1_GAN += f1_score(eval_GAN, label[i])
        
PA_unet /= valSize
PA_GAN /= valSize
IOU_unet /= valSize
IOU_GAN /= valSize
F1_unet /= valSize
F1_GAN /= valSize

print("PA_unet: {}, PA_GAN: {}".format(PA_unet, PA_GAN))
print("IOU_unet: {}, IOU_GAN: {}".format(IOU_unet, IOU_GAN))