コード例 #1
0
ファイル: train.py プロジェクト: zacario-li/Fast-SCNN_pytorch
def subVal(model, criterion, dataLoader, device):
    # set to eval mode
    model.eval()

    intersectionMeter = common.AverageMeter()
    unionMeter = common.AverageMeter()
    targetMeter = common.AverageMeter()
    lossMeter = common.AverageMeter()

    for i, (x, y) in enumerate(dataLoader):
        x = x.to(device)
        y = y.to(device)
        
        out = model(x)
        mainLoss = criterion(out[0], y)
        
        # update val loss
        lossMeter.update(mainLoss.item(), x.shape[0])

        # compute IoU/accuracy
        result = out[0].max(1)[1]
        intersection, union, target = common.intersectionAndUnionGPU(result, y, numClasses, 255)
        intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()
        ## update meter
        intersectionMeter.update(intersection), unionMeter.update(union), targetMeter.update(target)
    
    # show the log
    IoU = intersectionMeter.sum/(unionMeter.sum + 1e-10)
    accuracy = intersectionMeter.sum/(targetMeter.sum + 1e-10)
    print(f'val loss:',lossMeter.avg)
    for i in range(numClasses):
        print('Class_'+str(i)+' IoU:',IoU[i],' acc:',accuracy[i])
コード例 #2
0
def subTrain(model, optimizer, criterion, dataLoader, currentepoch, maxIter):
    # set to train mode to enable dropout and bn
    model.train()

    intersectionMeter = common.AverageMeter()
    unionMeter = common.AverageMeter()
    targetMeter = common.AverageMeter()
    lossMeter = common.AverageMeter()

    for i, (x, y) in enumerate(dataLoader):
        x = x.cuda(non_blocking=True)
        y = y.cuda(non_blocking=True)

        out = model(x)
        mainLoss = criterion(out[0], y)
        auxLoss = criterion(out[1], y)

        # whole loss
        loss = 0.4 * auxLoss + mainLoss
        lossMeter.update(loss.item(), x.shape[0])
        #print('loss is:', loss.item())

        # step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # ajust lr
        curIter = currentepoch * len(dataLoader) + i + 1
        newLr = poly_learning_rate(baseLr, curIter, maxIter)
        optimizer.param_groups[0]['lr'] = newLr

        # compute IoU/accuracy
        result = out[0].max(1)[1]
        intersection, union, target = common.intersectionAndUnionGPU(
            result, y, numClasses, 255)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        ## update meter
        intersectionMeter.update(intersection), unionMeter.update(
            union), targetMeter.update(target)

    # after every epoch, print the log
    IoU = intersectionMeter.sum / (unionMeter.sum + 1e-10)
    accuracy = intersectionMeter.sum / (targetMeter.sum + 1e-10)

    print(f'[{currentepoch}/{globalEpoch}] loss:', lossMeter.avg)
    '''
def sub_sn_train(model, optimizer, criterion, dataloader, currentepoch,
                 maxIter):
    model.train()
    intersectionmeter = common.AverageMeter()
    unionmeter = common.AverageMeter()
    targetmeter = common.AverageMeter()
    lossmeter = common.AverageMeter()

    for i, (x, y) in enumerate(dataloader):
        x = x.to(DEVICE, non_blocking=True)
        y = y.to(DEVICE, non_blocking=True)

        out = model(x)
        out[1] = nn.Upsample(scale_factor=8, mode='bilinear')(out[1])

        loss = criterion(out[1], y)
        lossmeter.update(loss.item(), x.shape[0])

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        curiter = currentepoch * len(dataloader) + i + 1
        newlr = poly_learning_rate(BASELR, curiter, maxIter)
        optimizer.param_groups[0]['lr'] = newlr

        #iou
        result = out[1].max(1)[1]
        intersection, union, target = common.intersectionAndUnionGPU(
            result, torch.squeeze(y), NUM_CLASSES, 255)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()

        intersectionmeter.update(intersection), unionmeter.update(
            union), targetmeter.update(target)
    IoU = intersectionmeter.sum / (unionmeter.sum + 1e-10)
    accuracy = intersectionmeter.sum / (targetmeter.sum + 1e-10)
    print(f'[{currentepoch}/{GLOBALEPOCH}] loss:{lossmeter.avg}')
def sub_sn_val(model, criterion, dataloader):
    model.eval()
    intersectionmeter = common.AverageMeter()
    unionmeter = common.AverageMeter()
    targetmeter = common.AverageMeter()
    lossmeter = common.AverageMeter()

    for i, (x, y) in enumerate(dataloader):
        x = x.to(DEVICE, non_blocking=True)
        y = y.to(DEVICE, non_blocking=True)

        out = model(x)
        out[1] = nn.Upsample(scale_factor=8, mode='bilinear')(out[1])

        mainloss = criterion(out[1], y)

        lossmeter.update(mainloss.item(), x.shape[0])
        result = out[1].max(1)[1]
        intersection, union, target = common.intersectionAndUnionGPU(
            result, torch.squeeze(y), NUM_CLASSES, 255)
        intersection, union, target = intersection.cpu().numpy(), union.cpu(
        ).numpy(), target.cpu().numpy()
        intersectionmeter.update(intersection), unionmeter.update(
            union), targetmeter.update(target)
        # save for debug
        rt = result * 128
        rt = rt.to(torch.uint8)
        rt = rt.cpu().numpy()
        cv2.imwrite('segout.jpg', rt[0])

    #IoU
    IoU = intersectionmeter.sum / (unionmeter.sum + 1e-10)
    accuracy = intersectionmeter.sum / (targetmeter.sum + 1e-10)
    print(f'val loss:{lossmeter.avg}')
    for i in range(NUM_CLASSES):
        print(f'class_{i} IoU:{IoU[i]}, acc: {accuracy[i]}')