Exemple #1
0
CAMVID_PATH = Path('/bigguy/data', 'SegNet-Tutorial/CamVid')
RESULTS_PATH = Path('.results/')
WEIGHTS_PATH = Path('.weights/')
RESULTS_PATH.mkdir(exist_ok=True)
WEIGHTS_PATH.mkdir(exist_ok=True)
batch_size = 2

normalize = transforms.Normalize(mean=camvid.mean, std=camvid.std)
train_joint_transformer = transforms.Compose([
    #joint_transforms.JointRandomCrop(224), # commented for fine-tuning
    joint_transforms.JointRandomHorizontalFlip()
])
train_dset = camvid.CamVid(CAMVID_PATH,
                           'train',
                           joint_transform=train_joint_transformer,
                           transform=transforms.Compose([
                               transforms.ToTensor(),
                               normalize,
                           ]))
train_loader = torch.utils.data.DataLoader(train_dset,
                                           batch_size=batch_size,
                                           shuffle=True)

val_dset = camvid.CamVid(CAMVID_PATH,
                         'val',
                         joint_transform=None,
                         transform=transforms.Compose(
                             [transforms.ToTensor(), normalize]))
val_loader = torch.utils.data.DataLoader(val_dset,
                                         batch_size=batch_size,
                                         shuffle=False)
Exemple #2
0
import torchvision
import torchvision.transforms as transforms
from datasets import camvid
import utils.imgs
import utils.training as train_utils
from datasets import joint_transforms
from pathlib import Path
from models import tiramisu

CAMVID_PATH = Path('/home/jingwenlai/data', 'CamVid/CamVid')
batch_size = 2

normalize = transforms.Normalize(mean=camvid.mean, std=camvid.std)
test_dset = camvid.CamVid(CAMVID_PATH,
                          'test',
                          joint_transform=None,
                          transform=transforms.Compose(
                              [transforms.ToTensor(), normalize]))
test_loader = torch.utils.data.DataLoader(test_dset,
                                          batch_size=batch_size,
                                          shuffle=False)

print("Test: %d" % len(test_loader.dataset.imgs))

model = tiramisu.FCDenseNet67(n_classes=12).cuda()
model_weights = ".weights/latest.th"
startEpoch = train_utils.load_weights(model, model_weights)
print("load_weights, return epoch: ", startEpoch)

train_utils.view_sample_predictions(model, test_loader, n=10)
CAMVID_PATH = "./CamVid/CamVid/"
RESULTS_PATH = Path(".results/")
WEIGHTS_PATH = Path(".weights/")
RESULTS_PATH.mkdir(exist_ok=True)
WEIGHTS_PATH.mkdir(exist_ok=True)
batch_size = hyper["batch_size"]

normalize = transforms.Normalize(mean=camvid.mean, std=camvid.std)
train_joint_transformer = transforms.Compose([
    # joint_transforms.JointRandomCrop(224), # commented for fine-tuning
    joint_transforms.JointRandomHorizontalFlip()
])

train_dset = camvid.CamVid(
    CAMVID_PATH,
    "train",
    joint_transform=train_joint_transformer,
    transform=transforms.Compose([transforms.ToTensor(), normalize]),
)

train_loader = torch.utils.data.DataLoader(train_dset,
                                           batch_size=batch_size,
                                           shuffle=True)

val_dset = camvid.CamVid(
    CAMVID_PATH,
    "val",
    joint_transform=None,
    transform=transforms.Compose([transforms.ToTensor(), normalize]),
)
val_loader = torch.utils.data.DataLoader(val_dset,
                                         batch_size=batch_size,