def __getitem__(self, index): img_x, img_y = self.imgs[index] shift = 10 rotate = 10 scale = 0.1 intensity = 0.2 flip = True img_x = np.expand_dims(img_x, axis=0) img_x = img_x.transpose([0,2,3,1]) img_y = np.expand_dims(img_y, axis=0) if self.augment and np.random.uniform() > self.aug_rate: img_x, img_y = data_augmenter(img_x, img_y, shift=shift, rotate=rotate, scale=scale, intensity=intensity, flip=flip) # labels_onehot = convert_to_one_hot(img_y).astype(np.float32) # labels_onehot = labels_onehot.transpose([1, 0, 2, 3]) M = img_x.copy() M[img_y == 0] = 0 # cv2.imwrite('./test.png', M.squeeze()*255) M = M.transpose([0, 3, 1, 2]) M = torch.from_numpy(M).float() img_x = img_x.transpose([0, 3, 1, 2]) img_x = torch.from_numpy(img_x).float() img_y = torch.from_numpy(img_y).float() return img_x.squeeze(), img_y, M.squeeze()
def __getitem__(self, index): img_x, img_y = self.imgs[index] shift = 10 rotate = 15 scale = 0.2 intensity = 0.1 flip = True img_x = np.expand_dims(img_x, axis=0) img_x = img_x.transpose([0, 2, 3, 1]) img_y = np.expand_dims(img_y, axis=0) if self.augment and np.random.uniform() > 0.5: img_x, img_y = data_augmenter(img_x, img_y, shift=shift, rotate=rotate, scale=scale, intensity=intensity, flip=flip) labels_onehot = convert_to_one_hot(img_y).astype(np.float32) labels_onehot = labels_onehot.transpose([1, 0, 2, 3]) img_x = img_x.transpose([0, 3, 1, 2]) img_x = torch.from_numpy(img_x).float() img_y = torch.from_numpy(labels_onehot) return img_x.squeeze(), img_y.squeeze()
def __getitem__(self, index): img_x, img_y = self.imgs[index] shift = 10 rotate = 15 scale = 0.2 intensity = 0.1 flip = True if self.augment: aug = A.Compose([ A.ElasticTransform(alpha=200, sigma=200 * 0.05, alpha_affine=200 * 0.03, p=0.4), # A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50), # A.RandomBrightnessContrast(), # A.RandomGamma(), # A.CLAHE(), # A.RGBShift(p=0.2), # A.Blur(blur_limit=3), # A.GaussNoise(p=0.2), # A.Flip(), # A.RandomRotate90(), ]) img_x = img_x.transpose([1, 2, 0]) augmented = aug(image=img_x, mask=img_y) img_x = augmented['image'] img_y = augmented['mask'] img_x = img_x.transpose([2, 0, 1]) img_x = np.expand_dims(img_x, axis=0) img_x = img_x.transpose([0, 2, 3, 1]) img_y = np.expand_dims(img_y, axis=0) if self.augment: img_x, img_y = data_augmenter(img_x, img_y, shift=shift, rotate=rotate, scale=scale, intensity=intensity, flip=flip) # img_x = np.expand_dims(img_x, axis=0) # img_x = img_x.transpose([0, 2, 3, 1]) # img_y = np.expand_dims(img_y, axis=0) labels_onehot = convert_to_one_hot(img_y).astype(np.float32) labels_onehot = labels_onehot.transpose([1, 0, 2, 3]) M = img_x.copy() M[img_y == 0] = 0 # cv2.imwrite('./test.png', M.squeeze()*255) M = M.transpose([0, 3, 1, 2]) M = torch.from_numpy(M).float() img_x = img_x.transpose([0, 3, 1, 2]) img_x = torch.from_numpy(img_x).float() img_y = torch.from_numpy(labels_onehot) return img_x.squeeze(), img_y.squeeze(), M.squeeze()
def get_epoch_batch(data_list, batch_size, iteration, idx, image_size=192, data_augmentation=False, shift=0.0, rotate=0.0, scale=0.0, intensity=0.0, flip=False, norm=True, aug_rate=0.5): eds, ed_gts, ess, es_gts = [], [], [], [] for i in range(iteration * batch_size, (iteration + 1) * batch_size): es_name, es_gt_name, ed_name, ed_gt_name = data_list[idx[i]] if np.random.uniform() > 0.5: temp = es_name es_name = ed_name ed_name = temp temp = es_gt_name es_gt_name = ed_gt_name ed_gt_name = temp if os.path.exists(es_name) and os.path.exists(es_gt_name): # print(' Select {0} {1}'.format(image_name, label_name)) # Read image and label # print(es_name) es = np.load(es_name) es_gt = np.load(es_gt_name) ed = np.load(ed_name) ed_gt = np.load(ed_gt_name) # Handle exceptions if es.shape != es_gt.shape: print('Error: mismatched size, image.shape = {0}, label.shape = {1}'.format(image.shape, label.shape)) print('Skip {0}, {1}'.format(es_name, es_gt_name)) continue if es.max() < 1e-6: print('Error: blank image, image.max = {0}'.format(es.max())) print('Skip {0} {1}'.format(es_name, es_gt_name)) continue # Append the image slices to the batch # Use list for appending, which is much faster than numpy array Z = es.shape[0] if Z > 8: r = Z - 8 start = random.randint(0, r) for z in range(start, start + 8): temp1 = es[z, :, :] temp2 = ed[z, :, :] if norm: temp1 = rescale_intensity(temp1, (0.5, 99.5)) temp2 = rescale_intensity(temp2, (0.5, 99.5)) ess += [temp1] eds += [temp2] es_gts += [es_gt[z, :, :]] ed_gts += [ed_gt[z, :, :]] else: for z in range(Z): temp1 = es[z, :, :] temp2 = ed[z, :, :] if norm: temp1 = rescale_intensity(temp1, (0.5, 99.5)) temp2 = rescale_intensity(temp2, (0.5, 99.5)) ess += [temp1] eds += [temp2] es_gts += [es_gt[z, :, :]] ed_gts += [ed_gt[z, :, :]] # Convert to a numpy array ESs = np.array(ess, dtype=np.float32) ES_gts = np.array(es_gts, dtype=np.float32) EDs = np.array(eds, dtype=np.float32) ED_gts = np.array(ed_gts, dtype=np.float32) # Add the channel dimension # tensorflow by default assumes NHWC format (batch_size, 128, 128, 1) ESs = np.expand_dims(ESs, axis=3) EDs = np.expand_dims(EDs, axis=3) # Perform data augmentation if data_augmentation and np.random.uniform() > aug_rate: ESs, ES_gts = data_augmenter(ESs, ES_gts, shift=shift, rotate=rotate, scale=scale, intensity=intensity, flip=flip) EDs, ED_gts = data_augmenter(EDs, ED_gts, shift=shift, rotate=rotate, scale=scale, intensity=intensity, flip=flip) ESs_onehot = convert_to_one_hot(ES_gts).astype(np.float32) ESs_onehot = ESs_onehot.transpose([1, 0, 2, 3]) ESs_onehot = torch.from_numpy(ESs_onehot) EDs_onehot = convert_to_one_hot(ED_gts).astype(np.float32) EDs_onehot = EDs_onehot.transpose([1, 0, 2, 3]) EDs_onehot = torch.from_numpy(EDs_onehot) ES_M = ESs.copy() ES_M[ES_gts == 0] = 0 ED_M = EDs.copy() ED_M[ED_gts == 0] = 0 ESs = torch.from_numpy(ESs.transpose((0, 3, 1, 2))) ES_M = torch.from_numpy(ES_M.transpose((0, 3, 1, 2))) EDs = torch.from_numpy(EDs.transpose((0, 3, 1, 2))) ED_M = torch.from_numpy(ED_M.transpose((0, 3, 1, 2))) return {'ED': EDs, 'ED_gt': EDs_onehot,'ED_M': ED_M, 'ES': ESs, 'ES_gt': ESs_onehot,'ES_M': ES_M}
def get_epoch_batch(data_list, batch_size, iteration, idx, image_size=192, data_augmentation=False, shift=0.0, rotate=0.0, scale=0.0, intensity=0.0, flip=False, norm=True): images, labels = [], [] for i in range(iteration * batch_size, (iteration + 1) * batch_size): image_name, label_name = data_list[idx[i]] if os.path.exists(image_name) and os.path.exists(label_name): # print(' Select {0} {1}'.format(image_name, label_name)) # Read image and label image = np.load(image_name) label = np.load(label_name) # Handle exceptions if image.shape != label.shape: print( 'Error: mismatched size, image.shape = {0}, label.shape = {1}' .format(image.shape, label.shape)) print('Skip {0}, {1}'.format(image_name, label_name)) continue if image.max() < 1e-6: print('Error: blank image, image.max = {0}'.format( image.max())) print('Skip {0} {1}'.format(image_name, label_name)) continue # Append the image slices to the batch # Use list for appending, which is much faster than numpy array Z = image.shape[0] if Z > 8: r = Z - 8 start = random.randint(0, r) for z in range(start, start + 8): temp = image[z, :, :] if norm: temp = rescale_intensity(temp, (0.5, 99.5)) images += [temp] labels += [label[z, :, :]] else: for z in range(Z): temp = image[z, :, :] if norm: temp = rescale_intensity(temp, (0.5, 99.5)) images += [temp] labels += [label[z, :, :]] # Convert to a numpy array images = np.array(images, dtype=np.float32) labels = np.array(labels, dtype=np.float32) # Add the channel dimension # tensorflow by default assumes NHWC format (batch_size, 128, 128, 1) images = np.expand_dims(images, axis=3) # Perform data augmentation if data_augmentation: images, labels = data_augmenter(images, labels, shift=shift, rotate=rotate, scale=scale, intensity=intensity, flip=flip) labels_onehot = convert_to_one_hot(labels).astype(np.float32) labels_onehot = labels_onehot.transpose([1, 0, 2, 3]) labels_onehot = torch.from_numpy(labels_onehot) M = images.copy() M[labels == 0] = 0 # images2 = np.concatenate([images, M], axis=3) # images2 = torch.from_numpy(images.transpose((0, 3, 1, 2))) images = torch.from_numpy(images.transpose((0, 3, 1, 2))) labels = np.expand_dims(labels, axis=1) labels = torch.from_numpy(labels) M = torch.from_numpy(M.transpose((0, 3, 1, 2))) return {'A': images, 'B': labels_onehot, 'M': M}