Esempio n. 1
0
label_path2 = '/data/scene-segmentation/CamVid/testannot/*.png'

var2 = ImageFolderSegmentation(images_path=image_path2,
                               label_path=label_path2,
                               transform=transform,
                               label_transform=label_transform)

valloader = torch.utils.data.DataLoader(var2,
                                        batch_size=10,
                                        shuffle=False,
                                        num_workers=10,
                                        pin_memory=True)

n_classes = 12

model = SegNet(in_channels=3, n_classes=n_classes)
model.init_encoder()
# model = torch.nn.DataParallel(model,
#                              device_ids=range(torch.cuda.device_count()))
model.cuda()
epochs = [200]
lrs = [0.001]

metrics = evaluation(n_classes=n_classes,
                     lr=lrs[0],
                     modelstr="SegNet",
                     textfile="newlog.txt")

weights = NormalizedWeightComputationMedian(labels_path=label_path,
                                            n_classes=n_classes)
Esempio n. 2
0
image_path = '/data/scene-segmentation/CamVid/test/*.png'

label_path = '/data/scene-segmentation/CamVid/testannot/*.png'

var = ImageFolderSegmentation(images_path=image_path,
                              label_path=label_path,
                              transform=transform,
                              label_transform=label_transform)

valloader = torch.utils.data.DataLoader(var, batch_size=1,
                                        shuffle=False, num_workers=10)

n_classes = 12
running_metrics = runningScore(n_classes=n_classes)
model = SegNet(n_classes=n_classes)
# state = convert_state_dict(torch.load('segnet_Camvid_best_model2.pkl')
#                            ['model_state'])
model.load_state_dict(torch.load('segnet_Camvid_best_model2.pkl')
                           ['model_state'])
model.eval()

for i, (images, labels) in tqdm(enumerate(valloader)):
        model.cuda()
        images = Variable(images.cuda(), volatile=True)
        labels = Variable(labels.cuda(), volatile=True)

        outputs = model(images)
        pred = outputs.data.max(1)[1].cpu().numpy()
        gt = labels.data.cpu().numpy()
        np.save("pred/pred" + str(i), pred)
Esempio n. 3
0
#     callbacks__train_loss__target_extractor=lambda x: x,
#     callbacks__valid_loss__target_extractor=lambda x: x
# )
#
# # x = torch.FloatTensor()
# # y = torch.LongTensor()
# # for i in range(len(X)):
# #     x = torch.cat((x, X[i]), 0)
# #     y = torch.cat((y, Y[i]), 0)
# # xfin = x.unsqueeze(0)
# # yfin = y.unsqueeze(0)
# net.fit(X=X, y=Y)

n_classes = 12

network = SegNet(in_channels=3, n_classes=n_classes)
network.init_encoder()

network.cuda()

net = skorch.NeuralNet(
    module=network,
    criterion=torch.nn.CrossEntropyLoss,
    train_split=None,
    use_cuda=True,
    batch_size=10,
)

params = {'lr': [0.01, 0.02], 'max_epochs': [5, 10]}

# if only training
Esempio n. 4
0

# # get some random training images
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
# fig = plt.figure()
# # show images
# imshow2(torchvision.utils.make_grid(images),
#         torchvision.utils.make_grid(labels[:,0].type(torch.DoubleTensor)))
#
# plt.show()
# plt.close(fig)
n_classes = 12

running_metrics = runningScore(n_classes=n_classes)
model = SegNet(in_channels=3, n_classes=n_classes)
model = torch.nn.DataParallel(model,
                              device_ids=range(torch.cuda.device_count()))
model.cuda()
epochs = [100]
lrs = [0.001]
best_iou = -100.0


# weights = torch.ones(n_classes).cuda()
# weights[0] = 0

criterion = nn.CrossEntropyLoss(reduce=True,
                                size_average=True).cuda()
for ep in epochs:
Esempio n. 5
0
from nn import SegNet
import torch
import torch.nn as nn
from torch import autograd, optim

# Execution
batch_size = 1
input_size = 3
num_classes = 8
learning_rate = 0.0001
nb = 500

input_ = autograd.Variable(torch.rand(1, 3, 256, 256))
target = autograd.Variable(torch.rand(1, 3, 256, 256)).long()

model = SegNet(in_channels=input_size, n_classes=num_classes)
model.init_encoder()

# opt = optim.Adam(params=model.parameters(), lr=learning_rate)
#
#
# for epoch in range(1):
#     print('epoch: ' + str(epoch))
#     out = model(input_)
#
#     print(out)
#
#     # Loss definition - cross entropy
#     criterion = nn.CrossEntropyLoss()
#     loss = criterion(out, target[:, 0])
#