def preload_lanemarking(weights_path): net = SCNN(pretrained=False) mean = (0.3598, 0.3653, 0.3662) std = (0.2573, 0.2663, 0.2756) transform = Compose(Resize((800, 288)), ToTensor(), Normalize(mean=mean, std=std)) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") save_dict = torch.load(weights_path, map_location='cpu') net.load_state_dict(save_dict['net']) net.eval() net.to(device) return {'net': net, 'transform': transform, 'device': device}
transform = Compose(Resize(resize_shape), ToTensor(), Normalize(mean=mean, std=std)) dataset_name = exp_cfg['dataset'].pop('dataset_name') Dataset_Type = getattr(dataset, dataset_name) test_dataset = Dataset_Type(Dataset_Path['Tusimple'], "test", transform) test_loader = DataLoader(test_dataset, batch_size=32, collate_fn=test_dataset.collate, num_workers=4) net = SCNN(input_size=resize_shape, pretrained=False) save_name = os.path.join(exp_dir, exp_dir.split('/')[-1] + '_best.pth') save_dict = torch.load(save_name, map_location='cpu') print("\nloading", save_name, "...... From Epoch: ", save_dict['epoch']) net.load_state_dict(save_dict['net']) net = torch.nn.DataParallel(net.to(device)) net.eval() # ------------ test ------------ out_path = os.path.join(exp_dir, "coord_output") evaluation_path = os.path.join(exp_dir, "evaluate") if not os.path.exists(out_path): os.mkdir(out_path) if not os.path.exists(evaluation_path): os.mkdir(evaluation_path) dump_to_json = [] progressbar = tqdm(range(len(test_loader))) with torch.no_grad(): for batch_idx, sample in enumerate(test_loader): img = sample['img'].to(device)
val_loader = DataLoader(val_dataset, batch_size=8, collate_fn=val_dataset.collate, num_workers=4) # ------------ preparation ------------ if exp_cfg['model'] == "scnn": net = SCNN(resize_shape, pretrained=True) elif exp_cfg['model'] == "enet_sad": net = ENet_SAD(resize_shape, sad=True, dataset=dataset_name) else: raise Exception( "Model not match. 'model' in 'cfg.json' should be 'scnn' or 'enet_sad'." ) net = net.to(device) net = torch.nn.DataParallel(net) optimizer = optim.SGD(net.parameters(), **exp_cfg['optim']) lr_scheduler = PolyLR(optimizer, 0.9, **exp_cfg['lr_scheduler']) best_val_loss = 1e6 def train(epoch): print("Train Epoch: {}".format(epoch)) net.train() train_loss = 0 train_loss_seg = 0 train_loss_exist = 0 progressbar = tqdm(range(len(train_loader)))
Normalize(mean=mean, std=std)) # val_dataset = CULane(CULane_path, "val", transform) # val_loader = DataLoader(val_dataset, batch_size=8, collate_fn=val_dataset.collate, num_workers=4) test_dataset = CULane(CULane_path, "test", transform) test_loader = DataLoader(test_dataset, batch_size=8, collate_fn=test_dataset.collate, num_workers=4) net = SCNN(pretrained=False) save_name = os.path.join(exp_dir, exp_dir.split('/')[-1] + '_best.pth') save_name = "/home/lion/hanyibo/SCNN/experiments/vgg_SCNN_DULR_w9/vgg_SCNN_DULR_w9.pth" save_dict = torch.load(save_name, map_location='cpu') print("loading", save_name, "......") net.load_state_dict(save_dict['net']) net.to(device) net.eval() # ------------ test ------------ out_path = os.path.join(exp_dir, "coord_output") evaluation_path = os.path.join(exp_dir, "evaluate") if not os.path.exists(out_path): os.mkdir(out_path) if not os.path.exists(evaluation_path): os.mkdir(evaluation_path) progressbar = tqdm(range(len(test_loader))) with torch.no_grad(): for batch_idx, sample in enumerate(test_loader): img = sample['img'].to(device) img_name = sample['img_name']
transform_val_img = Resize(resize_shape) transform_val_x = Compose(ToTensor(), Normalize(mean=mean, std=std)) transform_val = Compose(transform_val_img, transform_val_x) val_dataset = Dataset_Type(Dataset_Path[dataset_name], "val", transform_val) val_loader = DataLoader(val_dataset, batch_size=8, collate_fn=val_dataset.collate, num_workers=4) # ------------ preparation ------------ if exp_cfg['model'] == "scnn": net = SCNN(resize_shape, pretrained=True) elif exp_cfg['model'] == "enet_sad": net = ENet_SAD(resize_shape, sad=True) else: raise Exception("Model not match. 'model' in 'cfg.json' should be 'scnn' or 'enet_sad'.") #net = net.to(device) net = net.to("cpu") net = torch.nn.DataParallel(net) optimizer = optim.SGD(net.parameters(), **exp_cfg['optim']) lr_scheduler = PolyLR(optimizer, 0.9, **exp_cfg['lr_scheduler']) best_val_loss = 1e6 """ def batch_processor(arg): b_queue, data_loader = arg while True: if b_queue.empty(): sample = next(data_loader) b_queue.put(sample) b_queue.join() """