Esempio n. 1
0
    for i in range(dataset.data.NB):
        x, y = dataset.data.getImageAndLabel(i, torchformat=True)
        x, y = x.cuda(), y.cuda()

        h, w = y.shape[0], y.shape[1]
        globalresize = torch.nn.AdaptiveAvgPool2d((h, w))
        power2resize = torch.nn.AdaptiveAvgPool2d(
            ((h // 64) * 64, (w // 64) * 64))
        x = power2resize(x)

        z = largeforward(net, x.unsqueeze(0))
        z = globalresize(z)
        z = (z[0, 1, :, :] > z[0, 0, :, :]).float()

        for size in ["0", "1", "2"]:
            cm[size] += noisyairs.confusion(y, z, size=int(size))
        cm["bordonly"] = cm["0"] - cm["2"]

        if False:
            nextI = len(os.listdir("build"))
            debug = noisyairs.torchTOpil(globalresize(x))
            debug = PIL.Image.fromarray(numpy.uint8(debug))
            debug.save("build/" + str(nextI) + "_x.png")
            debug = y.cpu().numpy() * 255
            debug = PIL.Image.fromarray(numpy.uint8(debug))
            debug.save("build/" + str(nextI) + "_y.png")
            debug = z.cpu().numpy() * 255
            debug = PIL.Image.fromarray(numpy.uint8(debug))
            debug.save("build/" + str(nextI) + "_z.png")

    for size in ["0", "1", "2", "bordonly"]:
Esempio n. 2
0
    zz, _ = sobel(torch.unsqueeze(z[:, 1, :, :] - z[:, 0, :, :], dim=1))

    gradientdiff = torch.sum(zz * yy, dim=1)
    gradientdiff = torch.mean((1 - gradientdiff) * border)
    gradientdiff = gradientdiff * size / nbborder

    segloss = CE + dice
    regloss = gradientdiff
    loss = segloss + regloss

    with torch.no_grad():
        printloss[0] += segloss.clone().detach()
        printloss[1] += regloss.clone().detach()
        z = (z[:, 1, :, :] > z[:, 0, :, :]).clone().detach().float()
        for j in range(batchsize):
            stats += noisyairs.confusion(y[j], z[j], size=1)

        if i < 10:
            print(i, "/", nbbatchs, printloss)
        if i < 1000 and i % 100 == 99:
            print(i, "/", nbbatchs, printloss / 100)
            printloss = torch.zeros(2).cuda()
        if i >= 1000 and i % 300 == 299:
            print(i, "/", nbbatchs, printloss / 300)
            printloss = torch.zeros(2).cuda()

        if i % 1000 == 999:
            torch.save(net, "build/model.pth")
            perf = noisyairs.perf(stats)
            print(i, "perf", perf)
            stats = torch.zeros((2, 2)).cuda()
Esempio n. 3
0
with torch.no_grad():
    for i in range(dataset.data.NB):
        x, y = dataset.data.getImageAndLabel(i, torchformat=True)
        x, y = x.cuda(), y.cuda()

        h, w = y.shape[0], y.shape[1]
        globalresize = torch.nn.AdaptiveAvgPool2d((h, w))
        power2resize = torch.nn.AdaptiveAvgPool2d(
            ((h // 64) * 64, (w // 64) * 64))
        x = power2resize(x)

        z = largeforward(net, x.unsqueeze(0))
        z = globalresize(z)
        z = (z[0, 1, :, :] > z[0, 0, :, :]).float()

        cm += noisyairs.confusion(y, z, size=size)

        if False:
            nextI = len(os.listdir("build"))
            debug = noisyairs.torchTOpil(globalresize(x))
            debug = PIL.Image.fromarray(numpy.uint8(debug))
            debug.save("build/" + str(nextI) + "_x.png")
            debug = y.float()
            debug = debug * 2 * (1 - noisyairs.isborder(y, size=size))
            debug = debug + noisyairs.isborder(y, size=size)
            debug *= 127
            debug = debug.cpu().numpy()
            debug = PIL.Image.fromarray(numpy.uint8(debug))
            debug.save("build/" + str(nextI) + "_y.png")
            debug = z.cpu().numpy() * 255
            debug = PIL.Image.fromarray(numpy.uint8(debug))