Beispiel #1
0
def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(
                device=device_list[0])
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(
            out, mask)
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    testF.write("Epoch:{} \n".format(epoch))
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(
            i, result["TP"][i] / result["TA"][i])
        print(result_string)
        testF.write(result_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    testF.flush()
Beispiel #2
0
def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i:0 for i in range(8)}, "TA":{i:0 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0])
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(out, mask)
        # detach()截断梯度的作用,截断后如果tensor中的data发生了变化,反向传播时会报错(和正向传播时候的值不一样了)
        total_mask_loss += mask_loss.detach().item()
        # dim表示对channle进行处理得到N,H,W
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        # 计算iou
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    testF.write("Epoch:{} \n".format(epoch))
    # 求出每一个类别的iou
    for i in range(8):
        # 计算每一类的交并比
        result_string = "{}: {:.4f} \n".format(i, result["TP"][i]/result["TA"][i])
        print(result_string)
        # 写入测试log文件
        testF.write(result_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    testF.flush()
Beispiel #3
0
def val_epoch(net, epoch, dataLoader, valF, args):
    logger.info("======start val epoch step=======")
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    evaluator = Evaluator(args.number_class)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=args.number_class)(
            out, mask)
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
    valF.write("Epoch:{}, val/mIoU is {:.4f} \n".format(epoch, mIoU))
    valF.write("Epoch:{}, val/Acc is {:.4f} \n".format(epoch, Acc))
    valF.write("Epoch:{}, val/Acc_class is {:.4f} \n".format(epoch, Acc_class))
    valF.write("Epoch:{}, val/FWIoU is {:.4f} \n".format(epoch, FWIoU))
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(
            i, result["TP"][i] / result["TA"][i])
        logger.info("val class result {}".format(result_string))
        valF.write(result_string)
    valF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    logger.info("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
        Acc, Acc_class, mIoU, FWIoU))
    valF.flush()
Beispiel #4
0
def train_epoch(net, epoch, dataLoader, optimizer, trainF, config):
    net.train()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0]).long()
        # optimizer.zero将每个parameter的梯度清0
        optimizer.zero_grad()
        # 输出预测的mask
        out = net(image)
        # 计算交叉熵loss
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(out, mask)
        total_mask_loss += mask_loss.detach().item()
        mask_loss.backward()
        optimizer.step()
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss.detach().item()))
    # 记录数据迭代了多少次
    trainF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    trainF.flush()
Beispiel #5
0
def test_epoch(net, epoch, dataloader, writer, logger, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataloader)
    confusion_matrix = np.zeros((config.NUM_CLASS, config.NUM_CLASS))
    logger.info("Val EPOCH {}: ".format(epoch))
    with torch.no_grad():
        for batch_item in dataprocess:
            image, mask = batch_item['image'], batch_item['mask']
            if torch.cuda.is_available():
                image, mask = image.cuda(), mask.cuda()
            out = net(image)
            mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASS)(
                out, mask)
            total_mask_loss += mask_loss.detach().item()
            confusion_matrix += get_confusion_matrix(mask, out, mask.size(),
                                                     config.NUM_CLASS)
            dataprocess.set_description_str('epoch{}:'.format(epoch))
            dataprocess.set_postfix_str('mask loss is {:.4f}'.format(
                mask_loss.item()))
        logger.info("\taverage loss is {:.4f}".format(total_mask_loss /
                                                      len(dataloader)))
        pos = confusion_matrix.sum(0)
        res = confusion_matrix.sum(1)
        tp = np.diag(confusion_matrix)
        IoU_array = (tp / np.maximum(1.0, pos + res - tp))
        for i in range(8):
            print('{} IoU is : {}'.format(i, IoU_array[i]))
            logger.info('\t{} Iou is : {}'.format(i, IoU_array[i]))
        miou = IoU_array[1:].mean()
        logger.info('Val miou is : {:.4f}'.format(miou))
        with writer as w:
            w.add_scalar('EPOCH Loss', total_mask_loss / len(dataloader),
                         epoch)
            w.add_scalar('EPOCH mIoU', miou, epoch)
        print('epoch{}: miou is {}'.format(epoch, miou))