Пример #1
0
val_sampler = torch.utils.data.distributed.DistributedSampler(
    val_dataset, num_replicas=hvd.size(), rank=hvd.rank())
val_loader = DataLoader(val_dataset,
                        batch_size=val_batch_size,
                        collate_fn=val_dataset.collate,
                        sampler=val_sampler,
                        **kwargs)

# Horovod: print logs on the first worker.
verbose = 1 if hvd.rank() == 0 else 0

# ------------ preparation ------------
net = SCNN(resize_shape, pretrained=True)
lr_scaler = 1
if torch.cuda.is_available():
    net.cuda()
    # Horovod: Scale learning rate as per number of devices
    if hvd.nccl_built():
        lr_scaler = hvd.local_size()

net = torch.nn.DataParallel(net)
lr = exp_cfg['optim']['lr']
momentum = exp_cfg['optim']['momentum']
weight_decay = exp_cfg['optim']['weight_decay']
nesterov = exp_cfg['optim']['nesterov']

# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(net.parameters(),
                      lr=lr * lr_scaler,
                      momentum=momentum,
                      weight_decay=weight_decay,
Пример #2
0
import cv2
import torch

from model import SCNN
from utils.prob2lines import getLane
from utils.transforms import *
import time
from tqdm import tqdm

import pdb

image_resize_width = 512

net = SCNN(input_size=(image_resize_width, 288), pretrained=False)
if torch.cuda.is_available():
    net = net.cuda()

mean=(0.3598, 0.3653, 0.3662) # CULane mean, std
std=(0.2573, 0.2663, 0.2756)
transform = Compose(Resize((image_resize_width, 288)), ToTensor(),
                    Normalize(mean=mean, std=std))


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--img_path", '-i', type=str, default="demo/demo.jpg", help="Path to demo img")
    parser.add_argument("--weight_path", '-w', type=str, help="Path to model weights")
    parser.add_argument("--visualize", '-v', action="store_true", default=False, help="Visualize the result")
    args = parser.parse_args()
    return args