def __load(self, index): if self.patches_from_current_image > self.patches_from_single_image: self.patches_from_current_image = 0 self.current_image_index = index p, f = self.series[index] self.image, self.label, affine = loader_helper.read_multimodal(p, f, self.annotation_path, True) mask = self.image > 0 num_voxels = np.sum(mask,axis=(1,2,3)) #print(self.image[mask].shape) mean = np.sum(self.image / num_voxels[:,None,None,None], axis=(1,2,3)) mean2 = np.sum(np.square(self.image) / num_voxels[:,None,None,None], axis=(1,2,3)) std = np.sqrt(mean2 - mean * mean) #std1 = self.image.std(axis=(1,2,3)) self.image = (self.image - mean.reshape((self.image.shape[0],1,1,1))) / std.reshape((self.image.shape[0],1,1,1))#self.image.std(axis=(1,2,3), keepdims=True)#(self.image - self.image.mean(axis=(1,2,3), keepdims=True)) / self.image.std(axis=(1,2,3), keepdims=True) #self.image[~mask] = -10 self.patches_from_current_image += 1
def __getitem__(self, index): image, label, affine = loader_helper.read_multimodal(data_path=self.path, series=self.series[index], read_annotation=True) #image = image - image.mean() #image = image / image.var() ** 0.5 #image = image / 1000. old_shape = image.shape new_shape = tuple([loader_helper.closest_to_k(i,16) for i in old_shape[1:]]) new_image = np.full(shape=(old_shape[0],)+new_shape, fill_value=0., dtype=np.float32) new_label = np.zeros(shape=new_shape, dtype=np.float32) new_image[:,:old_shape[1],:old_shape[2],:old_shape[3]] = image new_label[:old_shape[1],:old_shape[2],:old_shape[3]] = label mask = new_image > 0 num_voxels = np.sum(mask, axis=(1, 2, 3)) mean = np.sum(new_image / num_voxels[:,None,None,None], axis=(1, 2, 3)) mean2 = np.sum(np.square(new_image)/ num_voxels[:,None,None,None], axis=(1, 2, 3)) std = np.sqrt(mean2 - mean * mean) new_image = (new_image - mean.reshape((new_image.shape[0],1,1,1)))/ std.reshape((new_image.shape[0], 1, 1, 1)) #new_image[~mask] = -10 #new_image = new_image / new_image.std(axis=(1, 2, 3),keepdims=True) #(new_image - new_image.mean(axis=(1, 2, 3), keepdims=True)) / new_image.std(axis=(1, 2, 3),keepdims=True) new_label_out = (np.eye(4)[new_label.astype(np.int32)]).transpose((3,0,1,2)) #new_label_out = (new_label > 0)[None] wt = np.sum(new_label_out [1:], axis=0, keepdims=True) tc = np.sum(new_label_out [[1,3]], axis=0, keepdims=True) et = new_label_out[3,None] new_label_out = np.concatenate([wt,tc,et],axis=0) labels_torch = torch.from_numpy(new_label_out.copy()).float() return [torch.from_numpy(new_image).float(), ], \ [ labels_torch, ]
def __cache(self): # cache locations of the labels (bounding boxes) inside the images print('Cache files') for p, f in tqdm.tqdm(self.series): image, label, affine = loader_helper.read_multimodal(p, f, self.annotation_path, True) bbox = loader_helper.bbox3(label>0) borders = np.array(label.shape) borders_low = np.array(self.patch_size) / 2.0 + 1 borders_high = borders - np.array(self.patch_size) / 2.0 - 1 bbox[0] = np.maximum(bbox[0]-50, borders_low) bbox[1] = np.minimum(bbox[1]+50, borders_high) self.labels_location.append(bbox)
path_input = opt.data_path path_output = opt.predictions_path series = [f for f in os.listdir(path_input) if os.path.isdir(os.path.join(path_input, f))] series.sort() #series = series_val19 dice = metrics.Dice(input_index=0) dicewt = metrics.DiceWT(input_index=0) sum = 0 for f in series: image, label, affine = loader_helper.read_multimodal(path_input, f, True) predict = loader_helper.read_nii(os.path.join(path_output,f+'.nii.gz')).astype(np.uint8) predict[predict==4] = 3 result = np.zeros(shape=(4)) for i in range(1, 4): p = (predict== i).astype(np.float32) g = (label == i).astype(np.float32) numerator = (p * g).sum() denominator = (p + g).sum() r = 2 * numerator / denominator
models_root=opt.models_path, rewrite=False, connect_tb=False) trainer.load_best() trainer.state.cuda = True series = [ f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f)) ] series.sort() print(series) for f in series: image, label, affine = loader_helper.read_multimodal( data_path=path, series=f, read_annotation=False) bbox = get_bbox(image) image_crop = image[:, bbox[0, 0]:bbox[1, 0], bbox[0, 1]:bbox[1, 1], bbox[0, 2]:bbox[1, 2]] #data_crop = median_filter(data_crop, 3) #========================================= old_shape_crop = image_crop.shape[1:] new_shape_crop = tuple( [loader_helper.closest_to_k(i, 16) for i in old_shape_crop]) diff = np.array(new_shape_crop) - np.array(old_shape_crop) pad_left = diff // 2 pad_right = diff - pad_left