Beispiel #1
0
 def __init__(self, root, ann_root, scale_size=None):
     super(DRDetectionDS_predict_xml, self).__init__()
     self.root = root
     self.ann_root = ann_root
     self.scale_size = scale_size
     self.transform = transforms.Compose([
         PILColorJitter(),
         transforms.ToTensor(),
         # Lighting(alphastd=0.01, eigval=eigen_values, eigvec=eigen_values),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     self.classid = ['optic_dis', 'macular']
     ann_list = glob(os.path.join(ann_root, '*.xml'))
     img_list = glob(os.path.join(root, '*.png'))
     img_list = [i.split('.')[0] for i in img_list]
     ann_list = [i.split('.')[0] for i in ann_list]
     self.data_list = []
     self.ann_info_list = []
     self.bboxs_list = []
     self.bboxs_c_list = []
     for ann in ann_list:
         if ann in img_list:
             self.data_list.append(ann)
     for index in self.data_list:
         anns, bbox, bbox_c = self.__read_xml(index)
         self.ann_info_list.append(anns)
         self.bboxs_list.append(bbox)
         self.bboxs_c_list.append(bbox_c)
Beispiel #2
0
 def __init__(self, root, config, crop_size, scale_size, baseline=False):
     super(MultiTaskClsDataSet, self).__init__()
     self.root = root
     self.config = config
     self.crop_size = crop_size
     self.scale_size = scale_size
     self.baseline = baseline
     df = pd.DataFrame.from_csv(config)
     self.images_list = []
     for index, row in df.iterrows():
         self.images_list.append(row)
     with open('info.json', 'r') as fp:
         info = json.load(fp)
     mean_values = torch.from_numpy(np.array(info['mean'], dtype=np.float32) / 255)
     std_values = torch.from_numpy(np.array(info['std'], dtype=np.float32) / 255)
     eigen_values = torch.from_numpy(np.array(info['eigval'], dtype=np.float32))
     eigen_vectors = torch.from_numpy(np.array(info['eigvec'], dtype=np.float32))
     if baseline:
         self.transform = transforms.Compose([
             transforms.RandomCrop(crop_size),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(),
             transforms.Normalize(mean=mean_values, std=std_values),
         ])
     else:
         self.transform = transforms.Compose([
             transforms.RandomCrop(crop_size),
             transforms.RandomHorizontalFlip(),
             PILColorJitter(),
             transforms.ToTensor(),
             Lighting(alphastd=0.01, eigval=eigen_values, eigvec=eigen_values),
             transforms.Normalize(mean=mean_values, std=std_values),
         ])
Beispiel #3
0
	def __init__(self, crop_size, scale_size, baseline):
		super(kaggleClsTrain1, self).__init__()
		self.image = ['data/kaggle1/train_images/train/' + line.strip() + '_' + str(scale_size) + '.png' for line in open('data/kaggle1/train_images/train/train_images.txt', 'r')]
		self.label = torch.from_numpy(np.array(np.loadtxt('data/kaggle1/train_images/train/train_labels.txt'), np.int))
		with open('data/kaggle/info.json', 'r') as fp:
			info = json.load(fp)
		mean_values = torch.from_numpy(np.array(info['mean'], dtype=np.float32) / 255)
		std_values = torch.from_numpy(np.array(info['std'], dtype=np.float32) / 255)
		eigen_values = torch.from_numpy(np.array(info['eigval'], dtype=np.float32))
		eigen_vectors = torch.from_numpy(np.array(info['eigvec'], dtype=np.float32))
		if baseline:
			self.transform = transforms.Compose([
				transforms.RandomCrop(crop_size),
				transforms.RandomHorizontalFlip(),
				transforms.ToTensor(),
				transforms.Normalize(mean=mean_values, std=std_values),
			])
		else:
			self.transform = transforms.Compose([
				transforms.RandomSizedCrop(crop_size),
				transforms.RandomHorizontalFlip(),
				PILColorJitter(),
				transforms.ToTensor(),
				#ColorJitter(),
				Lighting(alphastd=0.1, eigval=eigen_values, eigvec=eigen_vectors),
				#Affine(rotation_range=180, translation_range=None, shear_range=None, zoom_range=None),
				transforms.Normalize(mean=mean_values, std=std_values),
			])
Beispiel #4
0
 def __init__(self, root, scale_size=None):
     self.root = root
     self.scale_size = scale_size
     self.transform = transforms.Compose([
         PILColorJitter(),
         transforms.ToTensor(),
         # Lighting(alphastd=0.01, eigval=eigen_values, eigvec=eigen_values),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     self.img_list = glob(os.path.join(root, '*.png'))
Beispiel #5
0
def DRDetection_predict_single_image(imagepath, scale_size=None):
    transform = transforms.Compose([
        PILColorJitter(),
        transforms.ToTensor(),
        # Lighting(alphastd=0.01, eigval=eigen_values, eigvec=eigen_values),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    img = Image.open(imagepath)
    img, l, u, ratio = scale_image(img, scale_size)
    img = transform(img)
    return img, imagepath, [l, u, ratio]
Beispiel #6
0
 def __init__(self, root, scale_size=None):
     super(DRDetectionDS_predict, self).__init__()
     self.root = root
     self.scale_size = scale_size
     self.transform = transforms.Compose([
         PILColorJitter(),
         transforms.ToTensor(),
         # Lighting(alphastd=0.01, eigval=eigen_values, eigvec=eigen_values),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     self.img_list = glob(os.path.join(root, '*.png'))
     self.data_list = []
     self.ann_info_list = []
     self.bboxs_list = []
     self.bboxs_c_list = []