Esempio n. 1
0
tmp = smp.Unet(
    encoder_name="efficientnet-b7",
    encoder_weights="imagenet",
    in_channels=3,
    classes=2,
)
net = torch.nn.Sequential()
net.add_module("core", tmp)
net.add_module("softnms", dataloader.SoftNMS())
net.add_module("softnmsbis", dataloader.SoftNMS())
net = net.cuda()
net.train()

print("load data")
if whereIam == "wdtim719z":
    cia = dataloader.CIA("custom", custom=["isprs/train", "saclay/train"])
else:
    cia = dataloader.CIA("train")
batchsize = 32

print("train")
import collections
import random


class Gscore(torch.nn.Module):
    def __init__(self):
        super(Gscore, self).__init__()
        self.tot = torch.zeros(1).cuda()
        self.correct = torch.zeros(1).cuda()
        self.good = torch.zeros(1).cuda()
Esempio n. 2
0
    sys.path.append("/d/achanhon/github/segmentation_models.pytorch")
import segmentation_models_pytorch

with torch.no_grad():
    if len(sys.argv) > 1:
        net = torch.load(sys.argv[1])
    else:
        net = torch.load("build/model.pth")
    net = net.to(device)
    net.eval()


print("load data")
import dataloader

cia = dataloader.CIA("test")

earlystopping = cia.getrandomtiles(128, 16)

print("test on testing crop")


def accu(cm):
    return 100.0 * (cm[0][0] + cm[1][1]) / np.sum(cm)


def f1(cm):
    return 50.0 * cm[0][0] / (cm[0][0] + cm[1][0] + cm[0][1]) + 50.0 * cm[1][1] / (
        cm[1][1] + cm[1][0] + cm[0][1]
    )
Esempio n. 3
0
else:
    print("no cuda")
    quit()
with torch.no_grad():
    if len(sys.argv) > 1:
        net = torch.load(sys.argv[1])
    else:
        net = torch.load("build/model.pth")
    net = net.cuda()
    net.eval()

print("load data")
import dataloader

if whereIam in ["super", "wdtim719z"]:
    cia = dataloader.CIA(flag="custom", custom=["isprs/test", "saclay/test"])
else:
    cia = dataloader.CIA("test")

print("test")
import numpy
import PIL
from PIL import Image


def accu(cm):
    return 100.0 * (cm[0][0] + cm[1][1]) / numpy.sum(cm)


def iou(cm):
    return 50.0 * cm[0][0] / (cm[0][0] + cm[1][0] +
Esempio n. 4
0
else:
    print("no cuda")
    quit()
with torch.no_grad():
    if len(sys.argv) > 1:
        net = torch.load(sys.argv[1])
    else:
        net = torch.load("build/model.pth")
    net = net.cuda()
    net.eval()

print("load data")
import dataloader

if whereIam == "wdtis719z" or True:
    miniworld = dataloader.CIA(flag="custom", custom=["isprs/test"])
else:
    miniworld = dataloader.CIA("test")

print("test")
import numpy
import PIL
from PIL import Image


def accu(cm):
    return 100.0 * (cm[0][0] + cm[1][1]) / numpy.sum(cm)


def iou(cm):
    return 50.0 * cm[0][0] / (cm[0][0] + cm[1][0] +
Esempio n. 5
0
net = torchvision.models.vgg16()
net = net.features
net._modules["30"] = torch.nn.Identity()
dummy = torch.zeros(1, 3, 16 * 5, 16 * 5)
dummy = net(dummy)
assert dummy.shape == (1, 512, 5, 5)
net.add_module("31", torch.nn.Conv2d(512, 1024, kernel_size=1, padding=0, stride=1))
net.add_module("32", torch.nn.LeakyReLU())
net.add_module("33", torch.nn.Conv2d(1024, 2, kernel_size=1, padding=0, stride=1))
net = net.cuda()
net.train()


print("load data")
cia = dataloader.CIA(flag="custom", custom=["isprs/train", "vedai/train", "dfc/train"])

print("train")
import collections
import random

optimizer = torch.optim.Adam(net.parameters(), lr=0.0001)
meanloss = collections.deque(maxlen=200)
nbepoch = 800
batchsize = 32
for epoch in range(nbepoch):
    print("epoch=", epoch, "/", nbepoch)

    XY = cia.getrandomtiles(192, batchsize)
    stats = torch.zeros(3).cuda()
Esempio n. 6
0
    sys.path.append("/d/achanhon/github/segmentation_models.pytorch")
import segmentation_models_pytorch

with torch.no_grad():
    if len(sys.argv) > 1:
        net = torch.load(sys.argv[1])
    else:
        net = torch.load("build/model.pth")
    net = net.to(device)
    net.eval()


print("eval in transfert setting")
import dataloader

cia = dataloader.CIA("test")


def accu(cm):
    return 100.0 * (cm[0][0] + cm[1][1]) / np.sum(cm)


def f1(cm):
    return 50.0 * cm[0][0] / (cm[0][0] + cm[1][0] + cm[0][1]) + 50.0 * cm[1][1] / (
        cm[1][1] + cm[1][0] + cm[0][1]
    )


cmforlogging = []
cm = {}
with torch.no_grad():
Esempio n. 7
0
import detectionhead
import dataloader

print("define model")
net = detectionhead.DetectionHead(
    smp.Unet(
        encoder_name="efficientnet-b7",
        encoder_weights="imagenet",
        in_channels=3,
        classes=2,
    ))
net = net.cuda()
net.train()

print("load data")
cia = dataloader.CIA(flag="custom", custom=["isprs/train", "saclay/train"])

print("train")
import collections
import random

optimizer = torch.optim.Adam(net.parameters(), lr=0.0001)
meanloss = collections.deque(maxlen=200)
nbepoch = 800
batchsize = 32
for epoch in range(nbepoch):
    print("epoch=", epoch, "/", nbepoch)

    XY = cia.getrandomtiles(128, batchsize)
    stats = torch.zeros(3).cuda()
Esempio n. 8
0
        classes=2,
    )
else:
    net = smp.Unet(
        encoder_name="efficientnet-b7",
        encoder_weights="imagenet",
        in_channels=3,
        classes=2,
    )
net = net.cuda()
net.train()

print("load data")
import dataloader

cia = dataloader.CIA("train")

earlystopping = cia.getrandomtiles(128, 16)
weights = torch.Tensor([1, cia.balance / 2, 0.000001]).to(device)
criterion = torch.nn.CrossEntropyLoss(weight=weights)

criterionbis = smp.losses.dice.DiceLoss(mode="multiclass", ignore_index=[2])

print("train")


def accu(cm):
    return 100.0 * (cm[0][0] + cm[1][1]) / np.sum(cm)


def f1(cm):