Пример #1
0
def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i:0 for i in range(8)}, "TA":{i:0 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0])
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(out, mask)
        # detach()截断梯度的作用,截断后如果tensor中的data发生了变化,反向传播时会报错(和正向传播时候的值不一样了)
        total_mask_loss += mask_loss.detach().item()
        # dim表示对channle进行处理得到N,H,W
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        # 计算iou
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    testF.write("Epoch:{} \n".format(epoch))
    # 求出每一个类别的iou
    for i in range(8):
        # 计算每一类的交并比
        result_string = "{}: {:.4f} \n".format(i, result["TP"][i]/result["TA"][i])
        print(result_string)
        # 写入测试log文件
        testF.write(result_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    testF.flush()
Пример #2
0
def test(net, epoch, dataLoader, testF, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(
                device=device_list[0])
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(
            out, mask)
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    testF.write("Epoch:{} \n".format(epoch))
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(
            i, result["TP"][i] / result["TA"][i])
        print(result_string)
        testF.write(result_string)
    testF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    testF.flush()
Пример #3
0
def val_epoch(net, epoch, dataLoader, valF, args):
    logger.info("======start val epoch step=======")
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    result = {"TP": {i: 0 for i in range(8)}, "TA": {i: 0 for i in range(8)}}
    evaluator = Evaluator(args.number_class)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=args.number_class)(
            out, mask)
        total_mask_loss += mask_loss.detach().item()
        pred = torch.argmax(F.softmax(out, dim=1), dim=1)
        result = compute_iou(pred, mask, result)
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss))
    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
    valF.write("Epoch:{}, val/mIoU is {:.4f} \n".format(epoch, mIoU))
    valF.write("Epoch:{}, val/Acc is {:.4f} \n".format(epoch, Acc))
    valF.write("Epoch:{}, val/Acc_class is {:.4f} \n".format(epoch, Acc_class))
    valF.write("Epoch:{}, val/FWIoU is {:.4f} \n".format(epoch, FWIoU))
    for i in range(8):
        result_string = "{}: {:.4f} \n".format(
            i, result["TP"][i] / result["TA"][i])
        logger.info("val class result {}".format(result_string))
        valF.write(result_string)
    valF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    logger.info("Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
        Acc, Acc_class, mIoU, FWIoU))
    valF.flush()
def main_gen():
    # 1. 数据生成器
    train_dataset = MyData(cf.params['root_dir'],
                           cf.params['train_csv'],
                           transforms=transforms.Compose([
                               ImageAug(),
                               ScaleAug(),
                               CutOut(64, 0.5),
                               ToTensor()
                           ]))
    val_dataset = MyData(cf.params['root_dir'],
                         cf.params['val_csv'],
                         transforms=transforms.Compose([ToTensor()]))
    test_dataset = MyData(cf.params['root_dir'],
                          cf.params['test_csv'],
                          transforms=transforms.Compose([ToTensor()]))

    #  2. 数据加载器
    kwargs = {
        'num_workers': 0,
        'pin_memory': True
    } if torch.cuda.is_available() else {}
    kwargs = {}
    train_data_batch = DataLoader(train_dataset,
                                  batch_size=4,
                                  shuffle=True,
                                  drop_last=True,
                                  num_workers=1)
    val_data_batch = DataLoader(val_dataset, batch_size=1)
    test_data_batch = DataLoader(test_dataset, batch_size=1)

    #   3. 模型加载
    #lane_config = Config()
    model = DeepLabv3Plus()

    #   4. 损失函数、优化器
    criterion = MySoftmaxCrossEntropyLoss(cf.params['num_class'])
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=cf.params['learning_rate'],
                                momentum=0.9)

    # GPU
    if torch.cuda.is_available():
        model = model.to(device)
        criterion = criterion.to(device)

    for epoch in range(cf.params['epochs']):
        train(epoch, train_data_batch, model, criterion, optimizer)
        # print('loss:\t', loss)
        valid(epoch, val_data_batch, model, criterion, optimizer)
        # test()
        # if epoch % 10 == 0:
        #    torch.save({'state_dict': model.state_dict()}, os.path.join(cf.params['model_save_path'],
        #                                                               'logs','laneNet{}.pth.tar'.format(epoch)))
    torch.save({'state_dict': model.state_dict()},
               os.path.join(cf.params['model_save_path'], 'finalNet.path'))
Пример #5
0
def train_epoch(net, epoch, dataLoader, optimizer, trainF, config):
    net.train()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(
                device=device_list[0])
        # optimizer.zero将每个parameter的梯度清0
        optimizer.zero_grad()
        # 输出预测的mask
        out = net(image)
        # 计算交叉熵loss
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(
            out, mask)
        total_mask_loss += mask_loss.item()
        mask_loss.backward()
        optimizer.step()
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(
            mask_loss.item()))
    # 记录数据迭代了多少次
    trainF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    trainF.flush()
Пример #6
0
def train_epoch(net, epoch, dataLoader, optimizer, trainF, config):
    net.train()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    # for batch_item in dataprocess:
    accumulation_steps = 8
    grid = GridMask(10, 30, 360, 0.6, 1, 0.8)
    for i, (batch_item) in enumerate(dataprocess):
        grid.set_prob(i, 200)
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(
                device=device_list[0])
        # optimizer.zero_grad()
        image = grid(image)
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(
            out, mask)
        # total_mask_loss += loss.item()
        total_mask_loss += mask_loss.item() / accumulation_steps
        mask_loss.backward()
        torch.nn.utils.clip_grad_norm_(net.parameters(), 0.25)
        #torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=20, norm_type=2)
        if ((i + 1) % accumulation_steps) == 0:
            optimizer.step()  # 反向传播,更新网络参数
            optimizer.zero_grad()  # 清空梯度
        # optimizer.step()
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(
            mask_loss.item()))
        trainF.write("Epoch:{}, mask loss is {:.4f} \n".format(
            epoch, total_mask_loss / len(dataLoader)))
        trainF.flush()
Пример #7
0
def train_epoch(net, epoch, dataLoader, optimizer, trainF, config):
    # 将模型设置为训练状态,此时对droupout的处理是随机失活,对bn层的处理是归一化
    # net.eval(),将模型设置为评估状态,预测的时候使用,此时对dropout的处理的
    # 使用全部神经元,并且乘以补偿系数,bn层时使用的是参数在batch下的移动平均
    net.train()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(device=device_list[0]), mask.cuda(device=device_list[0])
        # optimizer.zero将每个parameter的梯度清0
        optimizer.zero_grad()
        # 输出预测的mask
        out = net(image)
        # 计算每一类的交叉熵loss
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASSES)(out, mask)
        # 计算总交叉熵loss
        total_mask_loss += mask_loss.item()
        # 反向传播
        mask_loss.backward()
        # 更新参数
        optimizer.step()
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(mask_loss.item()))
    # 将每次的loss值写入训练的log文件中 
    trainF.write("Epoch:{}, mask loss is {:.4f} \n".format(epoch, total_mask_loss / len(dataLoader)))
    trainF.flush()
Пример #8
0
def test_epoch(net, epoch, dataloader, writer, logger, config):
    net.eval()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataloader)
    confusion_matrix = np.zeros((config.NUM_CLASS, config.NUM_CLASS))
    logger.info("Val EPOCH {}: ".format(epoch))
    with torch.no_grad():
        for batch_item in dataprocess:
            image, mask = batch_item['image'], batch_item['mask']
            if torch.cuda.is_available():
                image, mask = image.cuda(), mask.cuda()
            out = net(image)
            mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASS)(
                out, mask)
            total_mask_loss += mask_loss.detach().item()
            confusion_matrix += get_confusion_matrix(mask, out, mask.size(),
                                                     config.NUM_CLASS)
            dataprocess.set_description_str('epoch{}:'.format(epoch))
            dataprocess.set_postfix_str('mask loss is {:.4f}'.format(
                mask_loss.item()))
        logger.info("\taverage loss is {:.4f}".format(total_mask_loss /
                                                      len(dataloader)))
        pos = confusion_matrix.sum(0)
        res = confusion_matrix.sum(1)
        tp = np.diag(confusion_matrix)
        IoU_array = (tp / np.maximum(1.0, pos + res - tp))
        for i in range(8):
            print('{} IoU is : {}'.format(i, IoU_array[i]))
            logger.info('\t{} Iou is : {}'.format(i, IoU_array[i]))
        miou = IoU_array[1:].mean()
        logger.info('Val miou is : {:.4f}'.format(miou))
        with writer as w:
            w.add_scalar('EPOCH Loss', total_mask_loss / len(dataloader),
                         epoch)
            w.add_scalar('EPOCH mIoU', miou, epoch)
        print('epoch{}: miou is {}'.format(epoch, miou))
Пример #9
0
def train_epoch(net, epoch, dataloader, optimizer, writer, logger, config):
    #iter = 0
    net.train()
    confusion_matrix = np.zeros((config.NUM_CLASS, config.NUM_CLASS))
    total_mask_loss = 0.0
    dataprocess = tqdm(dataloader)
    logging.info("Train Epoch {}:".format(epoch))
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        if torch.cuda.is_available():
            image, mask = image.cuda(), mask.cuda()
        optimizer.zero_grad()
        out = net(image)
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=config.NUM_CLASS)(out,
                                                                          mask)
        confusion_matrix += get_confusion_matrix(
            mask,
            out,
            mask.size(),
            config.NUM_CLASS,
        )
        #if iter % 10 == 0:
        #w.add_scalar('Epoch{}:Loss'.format(epoch), mask_loss.item(), iter)
        #iter += 1
        total_mask_loss += mask_loss.item()
        mask_loss.backward()
        optimizer.step()
        dataprocess.set_description('epoch:{}'.format(epoch))
        dataprocess.set_postfix_str('mask loss:{:.4f}'.format(
            mask_loss.item()))
    logger.info("\taverage loss is : {:.3f}".format(total_mask_loss /
                                                    len(dataloader)))
    #confusion matrix
    pos = confusion_matrix.sum(0)
    res = confusion_matrix.sum(1)
    tp = np.diag(confusion_matrix)
    IoU_array = (tp / np.maximum(1.0, pos + res - tp))
    for i in range(8):
        print('{} iou is : {:.4f}'.format(i, IoU_array[i]))
        logger.info("\t {} iou is : {:.4f}".format(i, IoU_array[i]))
    miou = IoU_array[1:].mean()
    print('EPOCH mIoU is : {}'.format(miou))
    logger.info("Train mIoU is : {:.4f}".format(miou))
    with writer as w:
        w.add_scalar('EPOCH Loss', total_mask_loss / len(dataloader), epoch)
        w.add_scalar('Train miou', miou, epoch)
Пример #10
0
def train_epoch(net, epoch, dataLoader, optimizer, trainF, args):
    logger.info("======start training epoch step=======")
    net.train()
    total_mask_loss = 0.0
    dataprocess = tqdm(dataLoader)
    for batch_item in dataprocess:
        image, mask = batch_item['image'], batch_item['mask']
        optimizer.zero_grad()
        out = net(image)
        logger.info("train predict shape: {}".format(out.shape))
        mask_loss = MySoftmaxCrossEntropyLoss(nbclasses=args.number_class)(
            out, mask)
        total_mask_loss += mask_loss.item()
        mask_loss.backward()
        # optimizer 进行更新
        optimizer.step()
        dataprocess.set_description_str("epoch:{}".format(epoch))
        dataprocess.set_postfix_str("mask_loss:{:.4f}".format(
            mask_loss.item()))
    # 记录数据迭代了多少次
    trainF.write("Epoch:{}, mask loss is {:.4f} \n".format(
        epoch, total_mask_loss / len(dataLoader)))
    trainF.flush()
Пример #11
0
def loss_func(predict, target, nbclasses, epoch):
    ''' can modify or add losses '''
    ce_loss = MySoftmaxCrossEntropyLoss(nbclasses=nbclasses)(predict, target)
    return ce_loss