示例#1
0
    def __init__(self, args):
        self.args = args
        self.vs = vs(args.nice)

        #Dataloader
        kwargs = {"num_workers": args.workers, 'pin_memory': True}
        if self.args.dataset == 'bdd':
            _, _, self.test_loader, self.nclass = make_data_loader(
                args, **kwargs)
        else:  #self.args.dataset == 'nice':
            self.test_loader, self.nclass = make_data_loader(args, **kwargs)
        #else:
        #	raise NotImplementedError

        ### Load models
        #backs = ["resnet", "resnet152"]
        backs = ["resnet", "ibn", "resnet152"]
        check = './ckpt'
        checks = ["herbrand.pth.tar", "ign85.12.pth.tar", "r152_85.20.pth.tar"]
        self.models = []
        self.M = len(backs)
        # define models
        for i in range(self.M):
            model = DeepLab(num_classes=self.nclass,
                            backbone=backs[i],
                            output_stride=16,
                            Norm=gn,
                            freeze_bn=False)
            self.models.append(model)
            self.models[i] = torch.nn.DataParallel(
                self.models[i], device_ids=self.args.gpu_ids)
            patch_replication_callback(self.models[i])
            self.models[i] = self.models[i].cuda()
        # load checkpoints
        for i in range(self.M):
            resume = os.path.join(check, checks[i])
            if not os.path.isfile(resume):
                raise RuntimeError(
                    "=> no checkpoint found at '{}'".format(resume))
            checkpoint = torch.load(resume)
            dicts = checkpoint['state_dict']
            model_dict = {}
            state_dict = self.models[i].module.state_dict()
            for k, v in dicts.items():
                if k in state_dict:
                    model_dict[k] = v
            state_dict.update(model_dict)
            self.models[i].module.load_state_dict(state_dict)
            print("{} loaded successfully".format(checks[i]))
示例#2
0
文件: eval.py 项目: jamycheung/ISSAFE
 def __init__(self, args, logger):
     self.args = args
     self.logger = logger
     self.time_train = []
     self.args.evaluate = True
     self.args.merge = True
     kwargs = {'num_workers': args.workers, 'pin_memory': False}
     _, self.val_loader, _, self.num_class = make_data_loader(
         args, **kwargs)
     print('un_classes:' + str(self.num_class))
     self.resize = args.crop_size if args.crop_size else [512, 1024]
     self.evaluator = Evaluator(self.num_class, self.logger)
     self.model = EDCNet(self.args.rgb_dim,
                         args.event_dim,
                         num_classes=self.num_class,
                         use_bn=True)
     if args.cuda:
         self.model = torch.nn.DataParallel(self.model,
                                            device_ids=self.args.gpu_ids)
         self.model = self.model.to(self.args.device)
         cudnn.benchmark = True
     print('Model loaded successfully!')
     assert os.path.exists(
         args.weight_path), 'weight-path:{} doesn\'t exit!'.format(
             args.weight_path)
     self.new_state_dict = torch.load(os.path.join(args.weight_path),
                                      map_location='cuda:0')
     self.model = load_my_state_dict(self.model.module,
                                     self.new_state_dict['state_dict'])
示例#3
0
文件: eval.py 项目: syed-cbot/RFNet
    def __init__(self, args):
        self.args = args
        self.time_train = []

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': False}
        _, self.val_loader, _, self.num_class = make_data_loader(
            args, **kwargs)
        print('un_classes:' + str(self.num_class))

        # Define evaluator
        self.evaluator = Evaluator(self.num_class)

        # Define network
        self.resnet = resnet18(pretrained=True, efficient=False, use_bn=True)
        self.model = RFNet(self.resnet,
                           num_classes=self.num_class,
                           use_bn=True)

        if args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            self.model = self.model.cuda()
            cudnn.benchmark = True  # accelarate speed
        print('Model loaded successfully!')

        # Load weights
        assert os.path.exists(
            args.weight_path), 'weight-path:{} doesn\'t exit!'.format(
                args.weight_path)
        self.new_state_dict = torch.load(
            os.path.join(args.weight_path, 'model_best.pth'))

        self.model = load_my_state_dict(self.model.module,
                                        self.new_state_dict['state_dict'])
    def __init__(self, args):
        self.args = args

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        _, _, self.test_loader, self.nclass = make_data_loader(args, **kwargs)

        self.model = None
        # Define network
        if self.args.backbone == 'unet':
            self.model = UNet(in_channels=4, n_classes=self.nclass)
            print("using UNet")
        if self.args.backbone == 'unetNested':
            self.model = UNetNested(in_channels=4, n_classes=self.nclass)
            print("using UNetNested")

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()

        if not os.path.isfile(args.checkpoint_file):
            raise RuntimeError("=> no checkpoint found at '{}'".format(
                args.checkpoint_file))
        checkpoint = torch.load(args.checkpoint_file)

        self.model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(args.checkpoint_file))
示例#5
0
    def __init__(self, args):
        self.args = args

        # configure datasetpath
        self.baseroot = None
        if args.dataset == 'pascal':
            self.baseroot = '/path/to/your/VOCdevkit/VOC2012/'
        ''' no support,
        # if you want train on these
        # you need modefy here 
        # refer to /dataloader/datasets/pascal to 
        #implement the corresponding constructor to dataset

        elif args.dataset == 'cityscapes':
            self.baseroot = '/path/to/your/cityscapes/'
        elif args.dataset == 'sbd':
            self.baseroot = '/path/to/your/sbd/'
        elif args.dataset == 'coco':
            self.baseroot = '/path/to/your/coco/'
        '''

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.test_loader, self.nclass = make_data_loader(
            self.baseroot, args, **kwargs)

        #define net model
        self.model = DeepLab(num_classes=self.nclass,
                             backbone=args.backbone,
                             output_stride=args.out_stride,
                             sync_bn=False,
                             freeze_bn=False).cuda()

        # self.model.module.load_state_dict(torch.load('./model_best.pth.tar', map_location='cpu'))
        self.evaluator = Evaluator(self.nclass)

        if args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        self.best_pred = 0.0

        if not os.path.isfile(args.resume):
            raise RuntimeError("=> no checkpoint found at '{}'".format(
                args.resume))
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        if args.cuda:
            self.model.module.load_state_dict(checkpoint['state_dict'])
        else:
            self.model.load_state_dict(checkpoint['state_dict'])

        self.best_pred = checkpoint['best_pred']
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.resume, checkpoint['epoch']))
示例#6
0
    def __init__(self):
        init_seeds(opt.seed)
        self.best_pred = 0.
        self.cutoff = -1  # backbone reaches to cutoff layer

        # Define Saver
        self.saver = Saver(opt, hyp, mode='val')

        # visualize
        if opt.visualize:
            self.summary = TensorboardSummary(self.saver.experiment_dir)
            self.writer = self.summary.create_summary()

        if 'pw' not in opt.arc:  # remove BCELoss positive weights
            hyp['cls_pw'] = 1.
            hyp['obj_pw'] = 1.

        self.img_size = opt.img_size

        # Define Dataloader
        self.val_dataset, self.val_loader = make_data_loader(opt,
                                                             hyp,
                                                             train=False)
        self.num_classes = self.val_dataset.num_classes
        self.vnb = len(self.val_loader)

        # Initialize model
        self.model = Darknet(opt.cfg, self.img_size, opt.arc).to(opt.device)
        self.model.nc = self.num_classes  # attach number of classes to model
        self.model.arc = opt.arc  # attach yolo architecture
        self.model.hyp = hyp  # attach hyperparameters to model

        # load weight
        if os.path.isfile(opt.pre):
            print("=> loading checkpoint '{}'".format(opt.pre))
            checkpoint = torch.load(opt.pre)
            self.epoch = checkpoint['epoch']
            self.best_pred = checkpoint['best_pred']
            self.model.load_state_dict(checkpoint['state_dict'])
            # self.optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                opt.pre, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(opt.pre))

        # Mixed precision training https://github.com/NVIDIA/apex
        if mixed_precision:
            self.model, self.optimizer = amp.initialize(self.model,
                                                        self.optimizer,
                                                        opt_level='O1',
                                                        verbosity=0)

        # Initialize distributed training
        if len(opt.gpu_id) > 1:
            print("Using multiple gpu")
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=opt.gpu_id)
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            args, **kwargs)

        # Define weight
        self.temporal_weight = args.temporal_weight
        self.spatial_weight = args.spatial_weight

        # Define network
        temporal_model = Model(name='vgg16_bn', num_classes=101,
                               is_flow=True).get_model()
        spatial_model = Model(name='vgg16_bn', num_classes=101,
                              is_flow=False).get_model()

        # Define Optimizer
        #optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        temporal_optimizer = torch.optim.Adam(temporal_model.parameters(),
                                              lr=args.temporal_lr)
        spatial_optimizer = torch.optim.Adam(spatial_model.parameters(),
                                             lr=args.spatial_lr)

        # Define Criterion
        self.temporal_criterion = nn.BCELoss().cuda()
        self.spatial_criterion = nn.BCELoss().cuda()

        self.temporal_model, self.temporal_optimizer = temporal_model, temporal_optimizer
        self.spatial_model, self.spatial_optimizer = spatial_model, spatial_optimizer

        # Define Evaluator
        self.top1_eval = Evaluator(self.nclass)

        # Using cuda
        if args.cuda:
            self.temporal_model = torch.nn.DataParallel(
                self.temporal_model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.temporal_model)
            self.temporal_model = self.temporal_model.cuda()

            self.spatial_model = torch.nn.DataParallel(
                self.spatial_model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.spatial_model)
            self.spatial_model = self.spatial_model.cuda()

        # Resuming checkpoint
        self.best_accuracy = 0.0
        '''
示例#8
0
def main():
    args = arguments()
    seed(args)

    model = DeepLab(backbone='mobilenet',
                    output_stride=16,
                    num_classes=21,
                    sync_bn=False)
    model.eval()

    from aimet_torch import batch_norm_fold
    from aimet_torch import utils
    args.input_shape = (1, 3, 513, 513)
    batch_norm_fold.fold_all_batch_norms(model, args.input_shape)
    utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6,
                                              torch.nn.ReLU)

    if args.checkpoint_path:
        model.load_state_dict(torch.load(args.checkpoint_path))
    else:
        raise ValueError('checkpoint path {} must be specified'.format(
            args.checkpoint_path))

    data_loader_kwargs = {'worker_init_fn': work_init, 'num_workers': 0}
    train_loader, val_loader, test_loader, num_class = make_data_loader(
        args, **data_loader_kwargs)
    eval_func_quant = model_eval(args, val_loader)
    eval_func = model_eval(args, val_loader)

    from aimet_common.defs import QuantScheme
    from aimet_torch.quantsim import QuantizationSimModel
    if hasattr(args, 'quant_scheme'):
        if args.quant_scheme == 'range_learning_tf':
            quant_scheme = QuantScheme.training_range_learning_with_tf_init
        elif args.quant_scheme == 'range_learning_tfe':
            quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init
        elif args.quant_scheme == 'tf':
            quant_scheme = QuantScheme.post_training_tf
        elif args.quant_scheme == 'tf_enhanced':
            quant_scheme = QuantScheme.post_training_tf_enhanced
        else:
            raise ValueError("Got unrecognized quant_scheme: " +
                             args.quant_scheme)
        kwargs = {
            'quant_scheme': quant_scheme,
            'default_param_bw': args.default_param_bw,
            'default_output_bw': args.default_output_bw,
            'config_file': args.config_file
        }
    print(kwargs)
    sim = QuantizationSimModel(model.cpu(),
                               input_shapes=args.input_shape,
                               **kwargs)
    sim.compute_encodings(eval_func_quant, (1024, True))
    post_quant_top1 = eval_func(sim.model.cuda(), (99999999, True))
    print("Post Quant mIoU :", post_quant_top1)
示例#9
0
    def __init__(self, args):
        self.args = args
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        self.logger = self.saver.create_logger()

        kwargs = {'num_workers': args.workers, 'pin_memory': False}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)
        self.model = EDCNet(args.rgb_dim, args.event_dim, num_classes=self.nclass, use_bn=True)
        train_params = [{'params': self.model.random_init_params(),
                         'lr': 10*args.lr, 'weight_decay': 10*args.weight_decay},
                        {'params': self.model.fine_tune_params(),
                         'lr': args.lr, 'weight_decay': args.weight_decay}]
        self.optimizer = torch.optim.Adam(train_params, lr=args.lr, weight_decay=args.weight_decay)
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.to(self.args.device)
        if args.use_balanced_weights:
            root_dir = Path.db_root_dir(args.dataset)[0] if isinstance(Path.db_root_dir(args.dataset), list) else Path.db_root_dir(args.dataset)
            classes_weights_path = os.path.join(root_dir,
                                                args.dataset + '_classes_weights.npy')
            if os.path.isfile(classes_weights_path):
                weight = np.load(classes_weights_path)
            else:
                weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass, classes_weights_path)
            weight = torch.from_numpy(weight.astype(np.float32))
        else:
            weight = None

        self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
        self.criterion_event = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode='event')
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs, len(self.train_loader), warmup_epochs=5)

        self.evaluator = Evaluator(self.nclass, self.logger)
        self.saver.save_model_summary(self.model)
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cuda:0')
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))

        if args.ft:
            args.start_epoch = 0
示例#10
0
 def __init__(self, args):
     kwargs = {'num_workers': 4, 'pin_memory': True}
     self.source_loader, self.target_loader, self.test_loader, self.nclass = make_data_loader(
         args, **kwargs)
     self.tbar = tqdm(self.test_loader, desc='\r')
     self.trainer = adda_trainer(args, 2)
     self.evaluator = Evaluator(2)
     self.best_IoU = {'disc': 0.77, 'cup': 0.65}
     self.attempt = 3
     self.validation(args, self.trainer.target_model, self.tbar)
     self.trainer_dda(args)
示例#11
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        # PATH = args.path
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)

        # Define network
        model = SCNN(nclass=self.nclass,backbone=args.backbone,output_stride=args.out_stride,cuda = args.cuda)

        # Define Optimizer
        optimizer = torch.optim.SGD(model.parameters(),args.lr, momentum=args.momentum,
                                    weight_decay=args.weight_decay, nesterov=args.nesterov)

        # Define Criterion
        weight = None
        self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
        self.model, self.optimizer = model, optimizer
        
        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)
        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
                                            args.epochs, len(self.train_loader))

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            # patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
示例#12
0
def create_dataloader(opt, path, imgsz, stride, hyp=None, pad=0.0, prefix='',
                      void_classes=[], valid_classes=[], cache=False, rect=False, rank=-1):
    cfg = get_cfg_defaults()
    cfg.merge_from_file('../dataloaders/configs/sunrgbd.yaml')
    cfg.merge_from_list(['DATASET.ANNOTATION_TYPE', 'bbox',
                         'DATASET.NO_TRANSFORMS', True,
                         'TRAIN.BATCH_SIZE', 1])

    train_loader, val_loader, test_loader, num_class = make_data_loader(cfg)
    dataloader = train_loader
    return dataloader, dataloader.dataset
示例#13
0
    def __init__(self, args):
        self.args = args
        self.path = Path()

        # Saver
        self.saver = Saver(args, self.path.event)  # Define Saver

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader = make_data_loader(args, **kwargs)

        # Define network
        self.model = mymodels.resnet152(pretrained=True)
        # self.model = mymodels.resnet50(pretrained=True)
        # if args.pretrained:
        # 	self.model = mymodels.resnet101(pretrained=True)
        # else:
        # 	self.model = mymodels.resnet101()

        # Binary classification
        num_ftrs = self.model.fc.in_features
        self.model.fc = nn.Linear(num_ftrs, 2)  # len(class_names) = 2
        del self.model.maxpool

        # Resuming checkpoint
        if args.resume is not None:  # path to resuming file
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            state_dict = torch.load(args.resume)
            print('=>load pre-trained model from')
            print(args.resume)
            best_acc = state_dict['best_acc']  # accuracy
            print('=>model top 1 accuracy: %0.3f' % best_acc)
            self.model.load_state_dict(state_dict['state_dict'])

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()

        # define loss function (criterion) and optimizer
        self.criterion = nn.CrossEntropyLoss().cuda()  #交叉熵损失函数
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         args.lr,
                                         momentum=args.momentum,
                                         weight_decay=args.weight_decay)

        # Define lr scheduler
        self.scheduler = lr_scheduler.StepLR(self.optimizer,
                                             step_size=10,
                                             gamma=0.1)

        # Record
        self.best_acc = 0.0
示例#14
0
def test_model(cfg, report_file, confusion_file=None):
    torch.manual_seed(cfg.SYSTEM.SEED)
    train_loader, val_loader, test_loader, num_classes = make_data_loader(cfg)
    tester = Tester(cfg)
    output, mat, metrics = tester.run(val_loader, num_classes)

    with open(report_file, 'w') as f:
        f.write(output)

    if confusion_file is not None:
        sio.savemat(confusion_file, {'confusion': mat})

    return metrics
示例#15
0
    def __init__(self,args):
        warnings.filterwarnings('ignore')
        assert torch.cuda.is_available()
        torch.backends.cudnn.benchmark = True
        model_fname = 'data/deeplab_{0}_{1}_v3_{2}_epoch%d.pth'.format(args.backbone, args.dataset, args.exp)
        if args.dataset == 'pascal':
            raise NotImplementedError
        elif args.dataset == 'cityscapes':
            kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True}
            dataset_loader, num_classes = dataloaders.make_data_loader(args, **kwargs)
            args.num_classes = num_classes
        elif args.dataset == 'marsh' :
            kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True}
            dataset_loader,val_loader, test_loader, num_classes = dataloaders.make_data_loader(args, **kwargs)
            args.num_classes = num_classes
        else:
            raise ValueError('Unknown dataset: {}'.format(args.dataset))

        if args.backbone == 'autodeeplab':
            model = Retrain_Autodeeplab(args)
            model.load_state_dict(torch.load(r"./run/marsh/deeplab-autodeeplab/model_best.pth.tar")['state_dict'], strict=False)
        else:
            raise ValueError('Unknown backbone: {}'.format(args.backbone))
示例#16
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            args, **kwargs)

        # Define network
        self.model = DeepLab(num_classes=self.nclass,
                             backbone=args.backbone,
                             output_stride=args.out_stride,
                             sync_bn=args.sync_bn,
                             freeze_bn=args.freeze_bn)

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0
示例#17
0
    def __init__(self, args):
        self.args = args
        
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)

        # # Define network
        model = DeepLab(num_classes=self.nclass,
                        backbone=args.backbone)
        # model = DeepLab(num_classes=self.nclass,
        #                 backbone=args.backbone,
        #                 output_stride=args.out_stride,
        #                 sync_bn=args.sync_bn,
        #                 freeze_bn=args.freeze_bn)
        self.model = model
        
        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
            if args.cuda:
                checkpoint = torch.load(args.resume)
            else:
                checkpoint = torch.load(args.resume, map_location='cpu')
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])

            # if not args.ft:
            #     self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
示例#18
0
    def __init__(self, args):
        if not os.path.isfile(args.model):
            raise RuntimeError("no checkpoint found at '{}'".fromat(args.model))
        self.args = args
        self.color_map = get_pascal_labels()
        self.test_loader, self.ids, self.nclass = make_data_loader(args)

        #Define model
        model = DeepLab(num_classes=self.nclass,
                        backbone=args.backbone,
                        output_stride=args.out_stride,
                        sync_bn=False,
                        freeze_bn=False)
        
        self.model = model
        device = torch.device('cpu')
        checkpoint = torch.load(args.model, map_location=device)
        self.model.load_state_dict(checkpoint['state_dict'])
        self.evaluator = Evaluator(self.nclass)
示例#19
0
	def __init__(self, args):
		self.args = args
		path = Path()

		# image to patch
		n_patch = int(16*12) #192
		eval_per = int(n_patch/args.batch_size) # 

		# path
		self.event_dir = path.event
		self.model_dir = os.path.join(self.event_dir, 'run', 'checkpoint.pth.tar')

		test_list = args.test_list
		with open(test_list) as f:
			self.lines = f.readlines()

		# range
		self.ids  = [i.split(' ')[0] for i in self.lines]
		self.mins = [float(i.split(' ')[1]) for i in self.lines]
		self.maxs = [float(i.split(' ')[2]) for i in self.lines]

		# Define Dataloader
		kwargs = {'num_workers': args.workers, 'pin_memory': True}
		self.test_loader = make_data_loader(args, **kwargs)

		# Define network
		self.model = mymodels.resnet152()

		# Binary classification
		num_ftrs = self.model.fc.in_features
		self.model.fc = nn.Linear(num_ftrs, 2) # len(class_names) = 2
		del self.model.maxpool

		# Resuming checkpoint
		state_dict = torch.load(self.model_dir)
		print('=>load pre-trained model')
		best_acc = state_dict['best_acc'] # accuracy
		print('=>model accuracy: %0.3f' %  best_acc)
		self.model.load_state_dict(state_dict['state_dict'])

		# Using cuda
		if args.cuda:
			self.model = self.model.cuda()
示例#20
0
def test(args):
    kwargs = {'num_workers': 1, 'pin_memory': True}
    train_loader, val_loader, test_loader, nclass = make_data_loader(args, **kwargs)
    model = DeepLab(num_classes=nclass,
                    backbone=args.backbone,
                    output_stride=args.out_stride,
                    sync_bn=False)
    model.load_state_dict(torch.load(args.pretrained, map_location=device)['state_dict'])
    model.eval()
    tbar = tqdm(test_loader) ## train test dev
    for i, sample in enumerate(tbar):
        image, target = sample['image'], sample['label']
        # original_image = image
        if args.use_mixup:
            image, targets_a, targets_b, lam = mixup_data(image, target,
                                                          args.mixup_alpha, use_cuda=False)
        # mixed_image = image
        # image = norm(image.permute(0,2,3,1)).permute(0,3,1,2)
        output = model(image)
示例#21
0
    def __init__(self, cfg):
        self.cfg = cfg

        # Define Dataloader
        kwargs = {
            'num_workers': self.cfg.SYSTEM.NUM_WORKERS,
            'pin_memory': True
        }

        # Define Model and Load from File
        self.model = load_model(cfg)

        # Define Criterion
        # whether to use class balanced weights
        if self.cfg.TRAIN.USE_BALANCED_WEIGHTS:
            classes_weights_path = os.path.join(
                cfg.DATASET.ROOT, cfg.DATASET.NAME + '_classes_weights.npy')
            if os.path.isfile(classes_weights_path):
                weight = np.load(classes_weights_path)
            else:
                train_loader = make_data_loader(cfg, **kwargs)[0]
                weight = calculate_weights_labels(cfg.DATASET.ROOT,
                                                  cfg.DATASET.NAME,
                                                  train_loader,
                                                  cfg.DATASET.N_CLASSES)
            weight = torch.from_numpy(weight.astype(np.float32))
        else:
            weight = None
        self.criterion = SegmentationLosses(
            weight=weight, cuda=self.cfg.SYSTEM.CUDA).build_loss(
                mode=self.cfg.MODEL.LOSS_TYPE)

        # Using cuda
        if self.cfg.SYSTEM.CUDA:
            print("Using CUDA")
            self.model = torch.nn.DataParallel(
                self.model, device_ids=self.cfg.SYSTEM.GPU_IDS)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        self.evaluator = SegmentationEvaluator(cfg.DATASET.N_CLASSES)
        self.img_evaluator = ImageSegmentationEvaluator(cfg.DATASET.N_CLASSES)
示例#22
0
def test(args):
    kwargs = {'num_workers': 1, 'pin_memory': True}
    _, val_loader, _, nclass = make_data_loader(args, **kwargs)

    checkpoint = torch.load(args.ckpt)
    if checkpoint is None:
        raise ValueError

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = DeepLab(num_classes=nclass,
                    backbone='resnet',
                    output_stride=16,
                    sync_bn=True,
                    freeze_bn=False)
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model.to(device)
    torch.set_grad_enabled(False)

    tbar = tqdm(val_loader)
    num_img_tr = len(val_loader)
    for i, sample in enumerate(tbar):
        x1, x2, y1, y2 = [
            int(item) for item in sample['img_meta']['bbox_coord']
        ]  # bbox coord
        w, h = x2 - x1, y2 - y1
        img = sample['img_meta']['image'].squeeze().cpu().numpy()
        img_w, img_h = img.shape[:2]

        inputs = sample['image'].cuda()
        output = model(inputs).squeeze().cpu().numpy()
        pred = np.argmax(output, axis=0)
        result = decode_segmap(pred, dataset=args.dataset, plot=False)

        result = imresize(result, (w, h))
        result_padding = np.zeros(img.shape, dtype=np.uint8)
        result_padding[y1:y2, x1:x2] = result
        result = img // 2 + result_padding * 127
        result[result > 255] = 255
        plt.imsave(
            os.path.join('run', args.dataset, 'deeplab-resnet', 'output',
                         str(i)), result)
示例#23
0
def main(cfg):
    datasets = make_data_loader(cfg)
    for dataset in datasets[:3]:
        img_list = []
        if dataset is not None:
            for ii, sample in enumerate(tqdm(dataset)):
                for jj in range(len(sample["id"])):
                    if cfg.DATASET.NAME == 'cityscapes':
                        filepath = sample['id'][jj].replace(
                            'leftImg8bit',
                            'normalized_depth').replace('png', 'txt')
                        img_list.append(sample['id'][jj])
                    elif cfg.DATASET.NAME in ['sunrgbd', 'coco']:
                        id = dataset.dataset.coco_id_index[sample['id']
                                                           [jj].item()]
                        img_path, depth_path, img_id = dataset.dataset.get_path(
                            id)
                        assert img_id == sample['id'][jj].item()
                        filepath = 'normalized_depth'.join(
                            img_path.rsplit('image', 1))
                        filepath = os.path.splitext(filepath)[0] + '.jpg'
                        img_list.append(
                            dataset.dataset.coco.loadImgs(img_id)[0]
                            ['file_name'])

                    dir = os.path.dirname(filepath)
                    if not os.path.exists(dir):
                        os.makedirs(dir)
                    #if not os.path.exists(filepath):
                    depth = ((sample['depth'][jj].numpy() + 1) * 125)
                    depth[depth < 0] = 0
                    depth[depth > 255] = 255

                    depth_img = Image.fromarray(depth.astype(np.uint8))
                    with open(filepath, 'wb') as fp:
                        depth_img.save(fp)

            f = '{}/image_list_{}.txt'.format(cfg.DATASET.ROOT,
                                              dataset.dataset.split)
            with open(f, 'w') as fp:
                fp.write('\n'.join(img_list))
示例#24
0
def test(model_path):
    args = makeargs()
    kwargs = {'num_workers': args.workers, 'pin_memory': True}
    train_loader, val_loader, test_loader, nclass = make_data_loader(args, **kwargs)
    print('Loading model...')
    model = DeepLab(num_classes=8, backbone='drn', output_stride=args.output_stride,
                    sync_bn=args.sync_bn, freeze_bn=args.freeze_bn)
    model.eval()
    checkpoint = torch.load(model_path)
    model = model.cuda()
    model.load_state_dict(checkpoint['state_dict'])
    print('Done')
    criterion = SegmentationLosses(weight=None, cuda=args.cuda).build_loss(mode=args.loss_type)
    evaluator = Evaluator(nclass)
    evaluator.reset()

    print('Model infering')
    test_dir = 'test_example1'
    test_loss = 0.0
    tbar = tqdm(test_loader, desc='\r')
    for i, sample in enumerate(tbar):
        image, target = sample['image'], sample['label']
        image, target = image.cuda(), target.cuda()

        with torch.no_grad():  #
            output = model(image)
        loss = criterion(output, target)
        test_loss += loss.item()
        tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
        pred = output.data.cpu().numpy()
        target = target.cpu().numpy()
        pred = np.argmax(pred, axis=1)
        evaluator.add_batch(target, pred)


    print(image.shape)
    Acc = evaluator.Pixel_Accuracy()
    mIoU = evaluator.Mean_Intersection_over_Union()
    print('testing:')
    print("Acc:{}, mIoU:{},".format(Acc, mIoU))
    print('Loss: %.3f' % test_loss)
示例#25
0
    def __init__(self):
        self.backbone = 'resnet34'
        self.lr = 1e-4
        self.batch = 64
        self.crop_size = 384
        logger = logging.getLogger('train')

        # print settings
        logger.info('\nTRAINING SETTINGS')
        logger.info('###########################')
        logger.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        logger.info('backbone:{}, lr:{}, batch:{}, crop size:{}'.format(self.backbone, self.lr, self.batch, self.crop_size))
        logger.info('###########################\n')

        # model define
        self.train_loader, self.test_loader = make_data_loader(batch=self.batch, crop_size=self.crop_size)
        model = smp.PAN(encoder_name=self.backbone, encoder_weights='imagenet', in_channels=3, classes=2)
        self.model = nn.DataParallel(model.cuda())
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
        self.criterion = nn.CrossEntropyLoss()
        self.epoch = 0
示例#26
0
    def initialize_model(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(self.args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        # Define Dataloader
        kwargs = {'num_worker': self.args.worker, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
            self.args, **kwargs)
        # Define network
        model = DeepLab(num_classes=self.nclass,
                        backbone=self.args.backbone,
                        output_stride=self.args.out_stride,
                        sync_bn=self.args.sync_bn,
                        freeze_bn=self.args.freeze_bn)
        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)

        # Using cuda
        if self.args.cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        if not os.path.isfile(self.args.resume):
            raise RuntimeError("=> no checkpoint found at '{}'".format(
                self.args.resume))
        checkpoint = torch.load(self.args.resume)
        self.args.start_epoch = checkpoint['epoch']
        if self.args.cuda:
            self.model.module.load_state_dict(checkpoint['state_dict'])
        else:
            self.model.load_state_dict(checkpoint['state_dict'])
        self.model.eval()
        self.evaluator.reset()
示例#27
0
def test():
    # ---- create model ---------- ---------- ---------- ---------- ----------#
    net = make_model(args.model, num_classes=12).cuda()
    net = torch.nn.DataParallel(net)  # Multi-GPU

    # ---- load pretrained model --------- ---------- ----------#
    if os.path.isfile(args.resume):
        print("=> loading checkpoint '{}'".format(args.resume))
        cp_states = torch.load(args.resume)
        net.load_state_dict(cp_states['state_dict'], strict=True)
    else:
        raise Exception("=> NO checkpoint found at '{}'".format(args.resume))

    # ---- Data loader
    train_loader, val_loader = make_data_loader(args)

    torch.cuda.empty_cache()

    # ---- Evaluation
    v_prec, v_recall, v_iou, v_acc, v_ssc_iou, v_mean_iou = validate_on_dataset_stsdf(net, val_loader)
    print('Validate with TSDF:, p {:.1f}, r {:.1f}, IoU {:.1f}'.format(v_prec*100.0, v_recall*100.0, v_iou*100.0))
    print('pixel-acc {:.4f}, mean IoU {:.1f}, SSC IoU:{}'.format(v_acc*100.0, v_mean_iou*100.0, v_ssc_iou*100.0))
示例#28
0
def main(cfg):
    datasets = make_data_loader(cfg)
    for dataset in datasets[:3]:
        img_list = []
        if dataset is not None:
            for ii, sample in enumerate(tqdm(dataset)):
                for jj in range(len(sample["id"])):
                    if cfg.DATASET.NAME == 'cityscapes':
                        filepath = sample['id'][jj].replace(
                            'leftImg8bit', 'bbox').replace('png', 'txt')
                        img_list.append(sample['id'][jj])
                    elif cfg.DATASET.NAME in ['sunrgbd', 'coco']:
                        id = dataset.dataset.coco_id_index[sample['id']
                                                           [jj].item()]
                        img_path, depth_path, img_id = dataset.dataset.get_path(
                            id)
                        assert img_id == sample['id'][jj].item()
                        filepath = 'bbox'.join(img_path.rsplit('image', 1))
                        filepath = os.path.splitext(filepath)[0] + '.txt'
                        img_list.append(
                            dataset.dataset.coco.loadImgs(img_id)[0]
                            ['file_name'])

                    dir = os.path.dirname(filepath)
                    if not os.path.exists(dir):
                        os.makedirs(dir)
                    #if not os.path.exists(filepath):
                    np.savetxt(
                        filepath,
                        sample['label'][jj],
                        delimiter=",",
                        fmt=['%d', '%10.8f', '%10.8f', '%10.8f', '%10.8f'])

            f = '{}/image_list_{}.txt'.format(cfg.DATASET.ROOT,
                                              dataset.dataset.split)
            with open(f, 'w') as fp:
                fp.write('\n'.join(img_list))
示例#29
0
    def __init__(self, args):
        self.args = args

        # Define network
        model = DeepLab(num_classes=args.num_classes,
                        backbone=args.backbone,
                        output_stride=args.out_stride,
                        sync_bn=args.sync_bn,
                        freeze_bn=args.freeze_bn)

        self.model = model
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        _, self.valid_loader = make_data_loader(args, **kwargs)
        self.pred_remap = args.pred_remap
        self.gt_remap = args.gt_remap

        # Define Evaluator
        self.evaluator = Evaluator(args.eval_num_classes)

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
 def __init__(self, hparams):
     super().__init__()
     self.hparams = hparams
     kwargs = {'num_workers': hparams.workers, 'pin_memory': True}
     self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(
         hparams, **kwargs)
     self.num_img_tr = len(self.train_loader)
     self.pretrained_net = get_efficientunet_b4(out_channels=self.nclass,
                                                concat_input=True,
                                                pretrained=True)
     if hparams.use_balanced_weights:
         parameters_dir = "/work/scratch/lei/MyProject/t_chucai/models_and_parameters/parameters/classes_weights"
         classes_weights_path = os.path.join(
             parameters_dir, hparams.dataset + '_classes_weights.npy')
         if os.path.isfile(classes_weights_path):
             weight = np.load(classes_weights_path)
         else:
             weight = calculate_weigths_labels(hparams.dataset,
                                               self.train_loader,
                                               self.nclass)
         self.weight = torch.from_numpy(weight.astype(np.float32))
     else:
         self.weight = None
     self.evaluator = Evaluator(self.nclass)
示例#31
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()
        
        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)

        # Define network
        model = DeepLab(num_classes=self.nclass,
                        backbone=args.backbone,
                        output_stride=args.out_stride,
                        sync_bn=args.sync_bn,
                        freeze_bn=args.freeze_bn)

        train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
                        {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]

        # Define Optimizer
        optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
                                    weight_decay=args.weight_decay, nesterov=args.nesterov)

        # Define Criterion
        # whether to use class balanced weights
        if args.use_balanced_weights:
            classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')
            if os.path.isfile(classes_weights_path):
                weight = np.load(classes_weights_path)
            else:
                weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)
            weight = torch.from_numpy(weight.astype(np.float32))
        else:
            weight = None
        self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
        self.model, self.optimizer = model, optimizer
        
        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)
        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
                                            args.epochs, len(self.train_loader))

        # Using cuda
        if args.cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
            patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0