Ejemplo n.º 1
0
def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(
                device=device_list[0])
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(
            out, mask)
        # detach()截断梯度的作用
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        # 计算iou
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    testF.write("Epoch:{} \n".format(epoch))
    # 求出每一个类别的iou
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(
            i, result["TP"][i] / result["TA"][i])
        print(result_string)
        # 写入log文件
        testF.write(result_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    testF.flush()
Ejemplo n.º 2
0
def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i:0 for i in range(8)}, "TA":{i:0 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0])
        out = net(image)
        mask_loss = loss_func(out, mask, config.NUM_CLASSES, epoch)
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    testF.write("Epoch:{} \n".format(epoch))
    miou = 0
    for i in range(8):
        iou_i = result["TP"][i]/result["TA"][i]
        result_string = "{}: {:.4f} \n".format(i, iou_i)
        print(result_string)
        testF.write(result_string)
        miou += iou_i
    miou /= 8
    miou_string = "{}: {:.4f} \n".format('miou', miou)
    print(miou_string)
    testF.write(miou_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    testF.flush()
Ejemplo n.º 3
0
def val_epoch(net, epoch, dataLoader, valF, args):
    logger.info("======start val epoch step=======")
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    evaluator = Evaluator(args.number_class)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=args.number_class)(
            out, mask)
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
    valF.write("Epoch:{}, val/mIoU is {:.4f} \n".format(epoch, mIoU))
    valF.write("Epoch:{}, val/Acc is {:.4f} \n".format(epoch, Acc))
    valF.write("Epoch:{}, val/Acc_class is {:.4f} \n".format(epoch, Acc_class))
    valF.write("Epoch:{}, val/FWIoU is {:.4f} \n".format(epoch, FWIoU))
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(
            i, result["TP"][i] / result["TA"][i])
        logger.info("val class result {}".format(result_string))
        valF.write(result_string)
    valF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    logger.info("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
        Acc, Acc_class, mIoU, FWIoU))
    valF.flush()
Ejemplo n.º 4
0
def valid(epoch, dataloader, model, criterion, optimizer):
    model.eval()
    total_mask_loss = 0.0
    data_process = tqdm(dataloader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    for step, sample in enumerate(data_process):
        img = sample['image']
        mask = sample['label']
        img = img.type(torch.FloatTensor)
        if torch.cuda.is_available():
            img = img.to(device)
            mask = mask.to(device)
        output = model(img)
        loss = criterion(output, mask.long())
        total_mask_loss += loss.detach().item()
        pre = torch.argmax(func.softmax(output, dim=1), dim=1)
        result = compute_iou(pre, mask, result)
        data_process.set_description_str("epoch:{}".format(epoch))
        data_process.set_postfix_str("mask_loss:{:.4f}".format(loss.item()))
Ejemplo n.º 5
0
from utils.label_preprocess import encode_labels, decode_label_color
from PIL import Image
import numpy as np
from utils.data_preprocess import ImageAug, DeformAug, ScaleAug
from torchvision import transforms
from utils.metric import compute_iou
'''
just for testing the function work
'''

dir = r'E:/data/Label/Label_road02/Label/Record001/Camera 5/170927_063813228_Camera_5_bin.png'
color_dir = r'E:/data/ColorImage/road02/Record001/Camera 5/170927_063813228_Camera_5.jpg'
img = Image.open(dir)
color_img = Image.open(color_dir)
gray_img = Image.open(dir)
gray_numpy = np.array(gray_img)
color_numpy = np.array(color_img)
gray_numpy = encode_labels(gray_numpy)
nof = gray_numpy == 5
print(np.sum(nof))
process = transforms.Compose(
    [ImageAug(), DeformAug(), ScaleAug()]
)
result = {'TP': {i:0 for i in range(8)}, 'TA': {i:0 for i in range(8)}}
result = compute_iou(gray_numpy, gray_numpy, result)
for i in range(8):
    print('{} iou is : {}'.format(i, result['TP'][i]/(result['TA'][i]+1)))
Ejemplo n.º 6
0
def main():

    # define model
    if cfg.MODEL == 'deeplabv3+':
        net = Deeplabv3plus(class_num=cfg.CLASS_NUM)
    else:
        net = UNetv1(class_num=cfg.CLASS_NUM)
    # load pretrained weights
    checkpoint = torch.load(os.path.join(cfg.LOG_DIR, cfg.FINAL_WEIGHTS))
    net.load_state_dict(checkpoint['state_dict'])
    # use cuda if available
    if torch.cuda.is_available():
        net = net.cuda(device=cfg.DEVICE_LIST[0])

    # set to eval
    net.eval()
    # create output dir
    current_date = str(pd.datetime.now()).split('.')[0]
    save_path = os.path.join(cfg.OUTPUT_DIR, current_date)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if cfg.INFER_MODE == 'eval' and cfg.SRC_IMG.endswith('.csv'):
        test_dataset = LaneSegTestDataset(cfg.SRC_IMG)
        eval_data_loader = DataLoader(test_dataset,
                                      batch_size=1,
                                      shuffle=False,
                                      drop_last=False)
        dataprocess = tqdm(eval_data_loader)
        # init confusion matrix with all zeros
        confusion_matrix = {
            "TP": {i: 0
                   for i in range(8)},
            "TN": {i: 0
                   for i in range(8)},
            "FP": {i: 0
                   for i in range(8)},
            "FN": {i: 0
                   for i in range(8)}
        }

        for data_item in dataprocess:
            # get batch data
            data_image, data_label, img_path = data_item['image'], \
                                               data_item['label'],\
                                               data_item['img_path']
            if torch.cuda.is_available():
                data_image, data_label = data_image.cuda(device=cfg.DEVICE_LIST[0]), \
                                        data_label.cuda(device=cfg.DEVICE_LIST[0])
            # forward to get output
            data_out = net(data_image)
            data_out = data_out.squeeze(0)  # (8,w1,h1)
            pred = torch.argmax(F.softmax(data_out, dim=0), dim=0)  # (w1,h1)
            # save predict label
            img_name = img_path.split('/')[-1]
            label_output(pred, save_path, img_name)
            # update confusion matrix in eval mode
            confusion_matrix = update_confusion_matrix(pred, data_label,
                                                       confusion_matrix)
        # compute metric in eval mode
        ious = compute_iou(confusion_matrix)
        m_iou = compute_mean(ious)
        precisions = compute_precision(confusion_matrix)
        m_precision = compute_mean(precisions)
        recalls = compute_recall(confusion_matrix)
        m_recall = compute_mean(recalls)
        # save eval information
        with open(os.path.join(save_path, 'eval_result.txt'), 'w') as f:
            f.write('inference mode: "eval" \n')
            f.write('source csv file: {} \n'.format(cfg.SRC_IMG))
            f.write("mean iou: {:.4f} \n".format(m_iou))
            for i in range(8):
                f.write("class '{}' iou : {:.4f} \n".format(i, ious[i]))

            f.write("mean precision: {:.4f} \n".format(m_precision))
            for i in range(8):
                f.write("class '{}' precision : {:.4f} \n".format(
                    i, precisions[i]))

            f.write("mean recall: {:.4f} \n".format(m_recall))
            for i in range(8):
                f.write("class '{}' recall : {:.4f} \n".format(i, recalls[i]))

    elif cfg.INFER_MODE == 'test':
        csv_name = 'test_mode_{}.csv'.format(current_date)
        get_img_list(csv_name)
        test_dataset = LaneSegTestDataset(csv_name)
        eval_data_loader = DataLoader(test_dataset,
                                      batch_size=1,
                                      shuffle=False,
                                      drop_last=False)
        dataprocess = tqdm(eval_data_loader)

        for data_item in dataprocess:
            # get batch data
            data_image, img_path = data_item['image'], data_item['img_path']
            if torch.cuda.is_available():
                data_image = data_image.cuda(device=cfg.DEVICE_LIST[0])
            # forward to get output
            data_out = net(data_image)
            data_out = data_out.squeeze(0)  # (8,w1,h1)
            pred = torch.argmax(F.softmax(data_out, dim=0), dim=0)  # (w1,h1)
            # save predict label
            img_name = img_path.split('/')[-1]
            label_output(pred, save_path, img_name)
        with open(os.path.join(save_path, 'test_result.txt'), 'w') as f:
            f.write('inference mode: "test" \n')
            f.write('source csv file: {} \n'.format(csv_name))
    else:
        raise ValueError(
            'Please check the inference config parameters:"INFER_MODE" and "SRC_IMG"'
        )
Ejemplo n.º 7
0
def eval_epoch(net, epoch, data_loader, eval_log):
    # set to eval
    net.eval()
    total_loss = 0.0
    data_process = tqdm(data_loader)
    # init confusion matrix with all zeros
    confusion_matrix = {
        "TP": {i: 0
               for i in range(8)},
        "TN": {i: 0
               for i in range(8)},
        "FP": {i: 0
               for i in range(8)},
        "FN": {i: 0
               for i in range(8)}
    }

    for batch_item in data_process:
        # get batch data
        batch_image, batch_label = batch_item['image'], batch_item['label']
        if cfg.MULTI_GPU:
            batch_image, batch_label = batch_image.cuda(device=cfg.DEVICE_LIST[0]), \
                                       batch_label.cuda(device=cfg.DEVICE_LIST[0])
        else:
            batch_image, batch_label = batch_image.cuda(), \
                                       batch_label.cuda()
        # forward to get output
        batch_out = net(batch_image)
        # compute loss
        # batch_loss = CrossEntropyLoss(cfg.CLASS_NUM)(batch_out, batch_label)
        batch_loss = FocalLoss(cfg.CLASS_NUM)(batch_out, batch_label)
        total_loss += batch_loss.detach().item()

        # get prediction, shape and value type same as batch_label
        pred = torch.argmax(F.softmax(batch_out, dim=1), dim=1)
        # compute confusion matrix using batch data
        confusion_matrix = update_confusion_matrix(pred, batch_label,
                                                   confusion_matrix)
        # print batch result
        data_process.set_description_str("epoch:{}".format(epoch))
        data_process.set_postfix_str("batch_loss:{:.4f}".format(batch_loss))

    eval_loss = total_loss / len(data_loader)

    # compute metric
    epoch_ious = compute_iou(confusion_matrix)
    epoch_m_iou = compute_mean(epoch_ious)
    epoch_precisions = compute_precision(confusion_matrix)
    epoch_m_precision = compute_mean(epoch_precisions)
    epoch_recalls = compute_recall(confusion_matrix)
    epoch_m_recall = compute_mean(epoch_recalls)

    # print eval iou every epoch
    print('mean iou: {} \n'.format(epoch_m_iou))
    for i in range(8):
        print_string = "class '{}' iou : {:.4f} \n".format(i, epoch_ious[i])
        print(print_string)

    # make log string
    log_values = [epoch, eval_loss, epoch_m_iou] + \
                 epoch_ious + [epoch_m_precision] + \
                 epoch_precisions + [epoch_m_recall] + epoch_recalls
    log_values = [str(v) for v in log_values]
    log_string = ','.join(log_values)
    eval_log.write(log_string + '\n')
    eval_log.flush()