Beispiel #1
0
    def __init__(self, args):
        self.args = args
        self.path = Path()

        # Saver
        self.saver = Saver(args, self.path.event)  # Define Saver

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader = make_data_loader(args, **kwargs)

        # Define network
        self.model = mymodels.resnet152(pretrained=True)
        # self.model = mymodels.resnet50(pretrained=True)
        # if args.pretrained:
        # 	self.model = mymodels.resnet101(pretrained=True)
        # else:
        # 	self.model = mymodels.resnet101()

        # Binary classification
        num_ftrs = self.model.fc.in_features
        self.model.fc = nn.Linear(num_ftrs, 2)  # len(class_names) = 2
        del self.model.maxpool

        # Resuming checkpoint
        if args.resume is not None:  # path to resuming file
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            state_dict = torch.load(args.resume)
            print('=>load pre-trained model from')
            print(args.resume)
            best_acc = state_dict['best_acc']  # accuracy
            print('=>model top 1 accuracy: %0.3f' % best_acc)
            self.model.load_state_dict(state_dict['state_dict'])

        # Using cuda
        if args.cuda:
            self.model = self.model.cuda()

        # define loss function (criterion) and optimizer
        self.criterion = nn.CrossEntropyLoss().cuda()  #交叉熵损失函数
        self.optimizer = torch.optim.SGD(self.model.parameters(),
                                         args.lr,
                                         momentum=args.momentum,
                                         weight_decay=args.weight_decay)

        # Define lr scheduler
        self.scheduler = lr_scheduler.StepLR(self.optimizer,
                                             step_size=10,
                                             gamma=0.1)

        # Record
        self.best_acc = 0.0
Beispiel #2
0
 def __init__(self, args):
     super().__init__()
     path = Path()
     self._base_dir = path.detect
     self.all_patch_list = []
     self.image_ids = args.image_ids
     for image_id in self.image_ids:
         image = cv2.imread(os.path.join(self._base_dir, image_id))
         image_pad = self.pad_image(image)
         patch_list = self.image_to_patch(image_pad)
         self.all_patch_list += patch_list
     assert len(self.all_patch_list) == len(self.image_ids) * 16 * 12
 def __init__(self, cfg, args, **kwargs):
     self.cfg = cfg
     self.distributed = dist.is_initialized()
     # dataloader
     dsval = CityscapesSegmentation(args,
                                    root=Path().db_root_dir(args.dataset),
                                    split='reval')
     sampler = None
     if self.distributed:
         sampler = torch.utils.data.distributed.DistributedSampler(dsval)
     self.dl = DataLoader(dsval,
                          batch_size=cfg.eval_batchsize,
                          sampler=sampler,
                          shuffle=False,
                          num_workers=cfg.eval_n_workers,
                          drop_last=False)
Beispiel #4
0
    def __init__(self):
        super().__init__()
        with open('test_list.txt') as f:
            test_list = f.readlines()

        self.image_ids = [i.split(' ')[0] for i in test_list]

        path = Path()
        self._base_dir = path.harmo
        self.all_patch_list = []
        for image_id in self.image_ids:
            image = cv2.imread(os.path.join(self._base_dir, image_id))
            #print(os.path.join(self._base_dir, image_id))
            #assert os.path.exists(os.path.join(self._base_dir, image_id))
            image_pad = self.pad_image(image)
            patch_list = self.image_to_patch(image_pad)
            self.all_patch_list += patch_list
        assert len(self.all_patch_list) == len(self.image_ids) * 16 * 12
Beispiel #5
0
	def __init__(self, args):
		self.args = args
		path = Path()

		# image to patch
		n_patch = int(16*12) #192
		eval_per = int(n_patch/args.batch_size) # 

		# path
		self.event_dir = path.event
		self.model_dir = os.path.join(self.event_dir, 'run', 'checkpoint.pth.tar')

		test_list = args.test_list
		with open(test_list) as f:
			self.lines = f.readlines()

		# range
		self.ids  = [i.split(' ')[0] for i in self.lines]
		self.mins = [float(i.split(' ')[1]) for i in self.lines]
		self.maxs = [float(i.split(' ')[2]) for i in self.lines]

		# Define Dataloader
		kwargs = {'num_workers': args.workers, 'pin_memory': True}
		self.test_loader = make_data_loader(args, **kwargs)

		# Define network
		self.model = mymodels.resnet152()

		# Binary classification
		num_ftrs = self.model.fc.in_features
		self.model.fc = nn.Linear(num_ftrs, 2) # len(class_names) = 2
		del self.model.maxpool

		# Resuming checkpoint
		state_dict = torch.load(self.model_dir)
		print('=>load pre-trained model')
		best_acc = state_dict['best_acc'] # accuracy
		print('=>model accuracy: %0.3f' %  best_acc)
		self.model.load_state_dict(state_dict['state_dict'])

		# Using cuda
		if args.cuda:
			self.model = self.model.cuda()
Beispiel #6
0
    def __init__(self, split='train'):
        super().__init__()

        assert split in ['train', 'val']
        self.split = split

        path = Path()
        self._base_dir = path.patch

        self.split_fn = os.path.join(self._base_dir, self.split + '.txt')

        self.im_ids = []
        self.im_lbs = []  # label:0/1
        with open(self.split_fn, 'r') as f:
            lines = f.read().splitlines()

        for line in lines:
            im_id, im_lb = line.split(' ')
            self.im_ids.append(im_id)
            self.im_lbs.append(im_lb)
        assert len(self.im_ids) == len(self.im_lbs)