Example #1
0
                          Normalize(mean=(0.3598, 0.3653, 0.3662), std=(0.2573, 0.2663, 0.2756)))
train_dataset = CULane(CULane_path, "train", transform_train)
train_loader = DataLoader(train_dataset, **exp_cfg['dataset'], shuffle=True, collate_fn=train_dataset.collate, num_workers=4)

# ------------ val data ------------
transform_val = Compose(Resize((800, 288)), ToTensor(),
                        Normalize(mean=(0.3598, 0.3653, 0.3662), std=(0.2573, 0.2663, 0.2756)))
val_dataset = CULane(CULane_path, "val", transform_val)
val_loader = DataLoader(val_dataset, batch_size=8, collate_fn=train_dataset.collate, num_workers=4)

# ------------ preparation ------------
net = SCNN(pretrained=False).to(device)
net = torch.nn.DataParallel(net)

optimizer = optim.SGD(net.parameters(), **exp_cfg['optim'])
lr_scheduler = PolyLR(optimizer, 0.9, exp_cfg['MAX_ITER'])
best_val_loss = 1e6


def train(epoch):
    print("Train Epoch: {}".format(epoch))
    net.train()
    train_loss = 0
    train_loss_seg = 0
    train_loss_exist = 0
    progressbar = tqdm(range(len(train_loader)))

    for batch_idx, sample in enumerate(train_loader):
        img = sample['img'].to(device)
        segLabel = sample['segLabel'].to(device)
        exist = sample['exist'].to(device)
Example #2
0
train_loader = DataLoader(train_dataset, batch_size=exp_cfg['dataset']['batch_size'], shuffle=True, collate_fn=train_dataset.collate, num_workers=8)

# ------------ val data ------------
transform_val_img = Resize(resize_shape)
transform_val_x = Compose(ToTensor(), Normalize(mean=mean, std=std))
transform_val = Compose(transform_val_img, transform_val_x)
val_dataset = Dataset_Type(Dataset_Path[dataset_name], "val", transform_val)
val_loader = DataLoader(val_dataset, batch_size=8, collate_fn=val_dataset.collate, num_workers=4)

# ------------ preparation ------------
net = SCNN(resize_shape, pretrained=True)
net = net.to(device)
net = torch.nn.DataParallel(net)

optimizer = optim.SGD(net.parameters(), **exp_cfg['optim'])
lr_scheduler = PolyLR(optimizer, 0.9, **exp_cfg['lr_scheduler'])
best_val_loss = 1e6


def train(epoch):
    print("Train Epoch: {}".format(epoch))
    net.train()
    train_loss = 0
    train_loss_seg = 0
    train_loss_exist = 0
    progressbar = tqdm(range(len(train_loader)))

    for batch_idx, sample in enumerate(train_loader):
        img = sample['img'].to(device)
        segLabel = sample['segLabel'].to(device)
        exist = sample['exist'].to(device)
Example #3
0
model = deeplabv3plus_resnet50(num_classes=num_class,
                               output_stride=par.out_stride).cuda()

set_bn_momentum(model.backbone, momentum=0.01)

#=========================================================== Define Optimizer ================================================
import torch.optim as optim
train_params = [{
    'params': model.backbone.parameters(),
    'lr': par.lr * 0.1
}, {
    'params': model.classifier.parameters(),
    'lr': par.lr
}]
optimizer = optim.SGD(train_params, lr=par.lr, momentum=0.9, weight_decay=1e-4)
scheduler = PolyLR(optimizer, 10000, power=0.9)

# Define Criterion
# whether to use class balanced weights
weight = None
criterion = SegmentationLosses(weight=weight,
                               cuda=par.cuda).build_loss(mode=par.loss_type)

# Define Evaluator
evaluator = Evaluator(num_class)

#===================================================== Resuming checkpoint ====================================================
best_pred = 0.0
if par.resume is not None:
    if not os.path.isfile(par.resume):
        raise RuntimeError("=> no checkpoint found at '{}'".format(par.resume))
Example #4
0
                         drivable_path=bdd100k_val_dl_path)
val_bdd100k_dataset_loader = DataLoader(dataset=val_bdd100k, **params)

#Declare model & optimizers
net = SCNN(resize_shape, pretrained=True)
net = net.to(device)
#torch.distributed.init_process_group("gloo", rank=rank, world_size=world_size)
#torch.cuda.set_device()
#net = torch.nn.parallel.DistributedDataParallel(net)
#net = torch.nn.DataParallel(net)
#
#net.eval()
tensorboard = SummaryWriter(exp_dir + "tb/")

optimizer = optim.SGD(net.parameters(), **optim_set)
lr_scheduler = PolyLR(optimizer, 0.9, **lr_set)
best_val_loss = 1000


#@profile
def train(epoch):
    print("Train Epoch: {}".format(epoch))
    net.train()
    train_loss = 0
    train_loss_seg = 0
    ##train_loss_exist = 0
    epoch_accuracy = 0

    progressbar = tqdm(range(len(train_bdd100k_dataset_loader)))
    #Training loop
    for batch_idx, sample in enumerate(train_bdd100k_dataset_loader):