def __init__(self, args): self.args = args self.img, self.target = VOCSegmentation().get() self.model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=False, norm_layer=nn.BatchNorm2d).to( args.device) self.criterion = MixSoftmaxCrossEntropyLoss( False, 0., ignore_label=-1).to(args.device) # for EncNet # self.criterion = EncNetLoss(nclass=21, ignore_label=-1).to(args.device) # for ICNet # self.criterion = ICNetLoss(nclass=21, ignore_index=-1).to(args.device) self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay) self.lr_scheduler = LRScheduler(mode='poly', base_lr=args.lr, nepochs=args.epochs, iters_per_epoch=1, power=0.9)
def __init__(self, args): self.args = args self.device = torch.device(args.device) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader val_dataset = get_segmentation_dataset(args.dataset, split='val', mode='testval', transform=input_transform) val_sampler = make_data_sampler(val_dataset, False, args.distributed) val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=1) self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True) # create network self.model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, pretrained=True, pretrained_base=False) if args.distributed: self.model = self.model.module self.model.to(self.device) self.metric = SegmentationMetric(val_dataset.num_class)
def __init__(self, args): self.args = args self.device = torch.device(args.device) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader val_dataset = get_segmentation_dataset(args.dataset, split='val', mode='testval', transform=input_transform) #val_dataset = get_segmentation_dataset(args.dataset, split='val_test', mode='testval', transform=input_transform) val_sampler = make_data_sampler(val_dataset, False, args.distributed) val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=1) self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True) # create network BatchNorm2d = nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d self.model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, pretrained=True, pretrained_base=False, local_rank=args.local_rank, norm_layer=BatchNorm2d).to(self.device) if args.distributed: self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[args.local_rank], output_device=args.local_rank) self.model.to(self.device) self.metric = SegmentationMetric(val_dataset.num_class)
def __init__(self, config): self.config = config self.run_config = config['run_config'] self.optim_config = config['optim_config'] self.data_config = config['data_config'] self.model_config = config['model_config'] self.device = torch.device(self.run_config["device"]) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader val_dataset = get_segmentation_dataset( self.data_config['dataset_name'], root=self.data_config['dataset_root'], split='test', mode='test', transform=input_transform) val_sampler = make_data_sampler(val_dataset, False, self.run_config['distributed']) val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=10, drop_last=False) self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=4, pin_memory=True) # create network BatchNorm2d = nn.SyncBatchNorm if self.run_config[ 'distributed'] else nn.BatchNorm2d self.model = get_segmentation_model( model=self.model_config['model'], dataset=self.data_config['dataset_name'], backbone=self.model_config['backbone'], aux=self.optim_config['aux'], jpu=self.model_config['jpu'], norm_layer=BatchNorm2d, root=run_config['path']['eval_model_root'], pretrained=run_config['eval_model'], pretrained_base=False, local_rank=self.run_config['local_rank']).to(self.device) if self.run_config['distributed']: self.model = nn.parallel.DistributedDataParallel( self.model, device_ids=[self.run_config['local_rank']], output_device=self.run_config['local_rank']) elif len(run_config['gpu_ids']) > 1: assert torch.cuda.is_available() self.model = nn.DataParallel(self.model) self.model.to(self.device) self.metric = SegmentationMetric(val_dataset.num_class)
def __init__(self, args): self.args = args # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader data_kwargs = {'transform': input_transform, 'base_size': args.base_size, 'crop_size': args.crop_size} train_dataset = get_segmentation_dataset(args.dataset, split=args.train_split, mode='train', **data_kwargs) val_dataset = get_segmentation_dataset(args.dataset, split='val', mode='val', **data_kwargs) self.train_loader = data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, drop_last=True, shuffle=True) self.val_loader = data.DataLoader(dataset=val_dataset, batch_size=1, drop_last=False, shuffle=False) # create network self.model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, norm_layer=nn.BatchNorm2d).to(args.device) # create criterion self.criterion = MixSoftmaxCrossEntropyLoss(args.aux, args.aux_weight, ignore_label=-1).to(args.device) # for multi-GPU # if torch.cuda.is_available(): # self.model = DataParallelModel(self.model).cuda() # self.criterion = DataParallelCriterion(self.criterion).cuda() # resume checkpoint if needed if args.resume: if os.path.isfile(args.resume): name, ext = os.path.splitext(args.resume) assert ext == '.pkl' or '.pth', 'Sorry only .pth and .pkl files supported.' print('Resuming training, loading {}...'.format(args.resume)) self.model.load_state_dict(torch.load(args.resume, map_location=lambda storage, loc: storage)) # optimizer self.optimizer = torch.optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # lr scheduling self.lr_scheduler = LRScheduler(mode='poly', base_lr=args.lr, nepochs=args.epochs, iters_per_epoch=len(self.train_loader), power=0.9) # evaluation metrics self.metric = SegmentationMetric(train_dataset.num_class) self.best_pred = 0.0
def eval(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # output folder outdir = 'test_result' if not os.path.exists(outdir): os.makedirs(outdir) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader test_dataset = get_segmentation_dataset(args.dataset, split='val', mode='testval', transform=input_transform) test_loader = data.DataLoader(dataset=test_dataset, batch_size=1, shuffle=False) # create network model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, pretrained=True, pretrained_base=False).to(device) print('Finished loading model!') metric = SegmentationMetric(test_dataset.num_class) model.eval() for i, (image, label) in enumerate(test_loader): image = image.to(device) with torch.no_grad(): outputs = model(image) pred = torch.argmax(outputs[0], 1) pred = pred.cpu().data.numpy() label = label.numpy() metric.update(pred, label) pixAcc, mIoU = metric.get() print('Sample %d, validation pixAcc: %.3f%%, mIoU: %.3f%%' % (i + 1, pixAcc * 100, mIoU * 100)) if args.save_result: predict = pred.squeeze(0) mask = get_color_pallete(predict, args.dataset) mask.save(os.path.join(outdir, 'seg_{}.png'.format(i)))
def __init__(self, args): self.args = args self.device = torch.device(args.device) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader data_kwargs = { 'transform': input_transform, 'base_size': args.base_size, 'crop_size': args.crop_size } val_dataset = get_segmentation_dataset(args.dataset, split='val', mode='testval', **data_kwargs) val_sampler = make_data_sampler(val_dataset, False, args.distributed) val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=1) self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True) # create network BatchNorm2d = nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d self.model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, norm_layer=BatchNorm2d).to( self.device) # resume checkpoint if needed if args.resume: if os.path.isfile(args.resume): name, ext = os.path.splitext(args.resume) assert ext == '.pkl' or '.pth', 'Sorry only .pth and .pkl files supported.' print('Resuming training, loading {}...'.format(args.resume)) self.model.load_state_dict( torch.load(args.resume, map_location=lambda storage, loc: storage)) ###... # self.model.to(self.device) if args.mutilgpu: self.model = nn.DataParallel(self.model, device_ids=args.gpu_ids) ##.... self.metric = SegmentationMetric(val_dataset.num_class)
def __init__(self, args): self.args = args self.device = torch.device(args.device) # Visualizer self.visualizer = TensorboardVisualizer(args, sys.argv) # image transform input_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]), ]) # dataset and dataloader data_kwargs = { 'transform': input_transform, 'base_size': args.base_size, 'crop_size': args.crop_size } train_dataset = get_segmentation_dataset(args.dataset, split='train', mode='train', **data_kwargs) val_dataset = get_segmentation_dataset(args.dataset, split='val', mode='val', **data_kwargs) args.iters_per_epoch = len(train_dataset) // (args.num_gpus * args.batch_size) args.max_iters = args.epochs * args.iters_per_epoch train_sampler = make_data_sampler(train_dataset, shuffle=True, distributed=args.distributed) train_batch_sampler = make_batch_data_sampler(train_sampler, args.batch_size, args.max_iters) val_sampler = make_data_sampler(val_dataset, False, args.distributed) val_batch_sampler = make_batch_data_sampler(val_sampler, args.batch_size) self.train_loader = data.DataLoader(dataset=train_dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, pin_memory=True) self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True) # create network BatchNorm2d = nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d self.model = get_segmentation_model(model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, norm_layer=BatchNorm2d).to( self.device) # jpu=args.jpu # resume checkpoint if needed if args.resume: if os.path.isfile(args.resume): name, ext = os.path.splitext(args.resume) assert ext == '.pkl' or '.pth', 'Sorry only .pth and .pkl files supported.' print('Resuming training, loading {}...'.format(args.resume)) self.model.load_state_dict( torch.load(args.resume, map_location=lambda storage, loc: storage)) # create criterion self.criterion = get_segmentation_loss(args.model, use_ohem=args.use_ohem, aux=args.aux, aux_weight=args.aux_weight, ignore_index=-1).to(self.device) # optimizer, for model just includes pretrained, head and auxlayer params_list = list() if hasattr(self.model, 'pretrained'): params_list.append({ 'params': self.model.pretrained.parameters(), 'lr': args.lr }) if hasattr(self.model, 'exclusive'): for module in self.model.exclusive: params_list.append({ 'params': getattr(self.model, module).parameters(), 'lr': args.lr * 10 }) self.optimizer = torch.optim.SGD(params_list, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # lr scheduling self.lr_scheduler = WarmupPolyLR(self.optimizer, max_iters=args.max_iters, power=0.9, warmup_factor=args.warmup_factor, warmup_iters=args.warmup_iters, warmup_method=args.warmup_method) if args.distributed: self.model = nn.parallel.DistributedDataParallel( self.model, device_ids=[args.local_rank], output_device=args.local_rank) # evaluation metrics self.metric = SegmentationMetric(train_dataset.num_class) self.best_pred = 0.0
def __init__(self, args, logger): self.args = args self.logger = logger if get_rank() == 0: TBWriter.init( os.path.join(args.project_dir, args.task_dir, "tbevents") ) self.device = torch.device(args.device) self.meters = MetricLogger(delimiter=" ") # image transform input_transform = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] ), ] ) # dataset and dataloader data_kwargs = { "transform": input_transform, "base_size": args.base_size, "crop_size": args.crop_size, "root": args.dataroot, } train_dataset = get_segmentation_dataset( args.dataset, split="train", mode="train", **data_kwargs ) val_dataset = get_segmentation_dataset( args.dataset, split="val", mode="val", **data_kwargs ) args.iters_per_epoch = len(train_dataset) // ( args.num_gpus * args.batch_size ) args.max_iters = args.epochs * args.iters_per_epoch train_sampler = make_data_sampler( train_dataset, shuffle=True, distributed=args.distributed ) train_batch_sampler = make_batch_data_sampler( train_sampler, args.batch_size, args.max_iters ) val_sampler = make_data_sampler(val_dataset, False, args.distributed) val_batch_sampler = make_batch_data_sampler( val_sampler, args.batch_size ) self.train_loader = data.DataLoader( dataset=train_dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, pin_memory=True, ) self.val_loader = data.DataLoader( dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True, ) # create network BatchNorm2d = nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d self.model = get_segmentation_model( model=args.model, dataset=args.dataset, backbone=args.backbone, aux=args.aux, jpu=args.jpu, norm_layer=BatchNorm2d, ).to(self.device) # resume checkpoint if needed if args.resume: if os.path.isfile(args.resume): name, ext = os.path.splitext(args.resume) assert ( ext == ".pkl" or ".pth" ), "Sorry only .pth and .pkl files supported." print("Resuming training, loading {}...".format(args.resume)) self.model.load_state_dict( torch.load( args.resume, map_location=lambda storage, loc: storage ) ) # create criterion self.criterion = get_segmentation_loss( args.model, use_ohem=args.use_ohem, aux=args.aux, aux_weight=args.aux_weight, ignore_index=-1, ).to(self.device) # optimizer, for model just includes pretrained, head and auxlayer params_list = list() if hasattr(self.model, "pretrained"): params_list.append( {"params": self.model.pretrained.parameters(), "lr": args.lr} ) if hasattr(self.model, "exclusive"): for module in self.model.exclusive: params_list.append( { "params": getattr(self.model, module).parameters(), "lr": args.lr * args.lr_scale, } ) self.optimizer = torch.optim.SGD( params_list, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, ) # lr scheduling self.lr_scheduler = get_lr_scheduler(self.optimizer, args) if args.distributed: self.model = nn.parallel.DistributedDataParallel( self.model, device_ids=[args.local_rank], output_device=args.local_rank, ) # evaluation metrics self.metric = SegmentationMetric(train_dataset.num_class) self.best_pred = 0.0