def test_preprocess(metaimage_path): nodule_list = [{"z": 556, "y": 100, "x": 0}] image_itk = sitk.ReadImage(metaimage_path) image = sitk.GetArrayFromImage(image_itk) spacing = np.array(image_itk.GetSpacing())[::-1] origin = np.array(image_itk.GetOrigin())[::-1] image = lum_trans(image) image = resample(image, spacing, np.array([1, 1, 1]), order=1)[0] crop = SimpleCrop() for nodule in nodule_list: nod_location = np.array([np.float32(nodule[s]) for s in ["z", "y", "x"]]) nod_location = np.ceil((nod_location - origin) / 1.) cropped_image, coords = crop(image[np.newaxis], nod_location) # New style ct_array, meta = load_ct.load_ct(metaimage_path) preprocess = preprocess_ct.PreprocessCT(clip_lower=-1200., clip_upper=600., min_max_normalize=True, scale=255, dtype='uint8') ct_array, meta = preprocess(ct_array, meta) preprocess = preprocess_ct.PreprocessCT(spacing=1., order=1) ct_array, meta = preprocess(ct_array, meta) cropped_image_new, coords_new = crop_patches.patches_from_ct(ct_array, meta, 96, nodule_list, stride=4, pad_value=160)[0] assert np.abs(cropped_image_new - cropped_image).sum() == 0 assert np.abs(coords_new - coords).sum() == 0
def _ct_preprocess(self, ct_path): preprocess = preprocess_ct.PreprocessCT(to_hu=True, clip_lower=self.clip_lower, clip_upper=self.clip_upper, spacing=[.9, .7, .7], min_max_normalize=False) ct_array, meta = preprocess(*load_ct.load_ct(ct_path)) return ct_array, meta
def test_preprocess_dicom_min_max_scale(dicom_path): preprocess = preprocess_ct.PreprocessCT(clip_lower=-1000, clip_upper=400, min_max_normalize=True) dicom_array, _ = preprocess(*load_ct.load_ct(dicom_path)) assert isinstance(dicom_array, np.ndarray) assert dicom_array.max() <= 1 assert dicom_array.min() >= 0
def test_preprocess_dicom_pure(dicom_path): preprocess = preprocess_ct.PreprocessCT() dicom_array, meta = load_ct.load_dicom(dicom_path) assert isinstance(dicom_array, np.ndarray) dicom_array, _ = preprocess(*load_ct.load_dicom(dicom_path)) assert isinstance(dicom_array, np.ndarray)
def test_segmentation_over_LIDC(full_dicom_path): """ Function is needed for fast loading and seeing DICOM or LUNA. Segmentation and separation of the lungs are provided with function "improved_lung_segmentation". """ preprocess = preprocess_ct.PreprocessCT(to_hu=True) patient, _ = preprocess(*load_ct.load_ct(full_dicom_path)) lung, lung_left, lung_right, trachea = improved_lung_segmentation(patient)
def test_lum_trans(metaimage_path): ct_array, meta = load_ct.load_ct(metaimage_path) lumed = lum_trans(ct_array) functional = preprocess_ct.PreprocessCT(clip_lower=-1200., clip_upper=600., min_max_normalize=True, scale=255, dtype='uint8') processed, _ = functional(ct_array, meta) assert np.abs(lumed - processed).sum() == 0
def test_resample(metaimage_path): ct_array, meta = load_ct.load_ct(metaimage_path) resampled, _ = resample(ct_array, np.array(load_ct.MetaData(meta).spacing), np.array([1, 1, 1]), order=1) preprocess = preprocess_ct.PreprocessCT(spacing=True, order=1) processed, _ = preprocess(ct_array, meta) assert np.abs(resampled - processed).sum() == 0
def predict(ct_path, nodule_list, model_path="src/algorithms/classify/assets/gtr123_model.ckpt"): """ Args: ct_path (str): path to a MetaImage or DICOM data. nodule_list: List of nodules model_path: Path to the torch model (Default value = "src/algorithms/classify/assets/gtr123_model.ckpt") Returns: List of nodules, and probabilities """ if not nodule_list: return [] casenet = CaseNet() casenet.load_state_dict(torch.load(model_path)) casenet.eval() if torch.cuda.is_available(): casenet = torch.nn.DataParallel(casenet).cuda() # else: # casenet = torch.nn.parallel.DistributedDataParallel(casenet) preprocess = preprocess_ct.PreprocessCT(clip_lower=-1200., clip_upper=600., spacing=1., order=1, min_max_normalize=True, scale=255, dtype='uint8') ct_array, meta = preprocess(*load_ct.load_ct(ct_path)) patches = crop_patches.patches_from_ct(ct_array, meta, config['crop_size'], nodule_list, stride=config['stride'], pad_value=config['filling_value']) results = [] for nodule, (cropped_image, coords) in zip(nodule_list, patches): cropped_image = Variable( torch.from_numpy(cropped_image[np.newaxis, np.newaxis]).float()) cropped_image.volatile = True coords = Variable(torch.from_numpy(coords[np.newaxis]).float()) coords.volatile = True _, pred, _ = casenet(cropped_image, coords) results.append({ "x": nodule["x"], "y": nodule["y"], "z": nodule["z"], "p_concerning": float(pred.data.cpu().numpy()) }) return results
def test_preprocess_dicom_min_max_scale(dicom_path): params = preprocess_ct.Params(clip_lower=-1000, clip_upper=400, min_max_normalize=True) preprocess = preprocess_ct.PreprocessCT(params) dicom_array, meta = load_ct.load_ct(dicom_path) meta = load_ct.MetaData(meta) dicom_array = preprocess(dicom_array, meta) assert isinstance(dicom_array, np.ndarray) assert dicom_array.max() <= 1 assert dicom_array.min() >= 0
def test_preprocess_dicom_clips(dicom_path): params = preprocess_ct.Params(clip_lower=-1, clip_upper=40) preprocess = preprocess_ct.PreprocessCT(params) dicom_array, meta = load_ct.load_ct(dicom_path) meta = load_ct.MetaData(meta) dicom_array = preprocess(dicom_array, meta) assert isinstance(dicom_array, np.ndarray) assert dicom_array.max() <= 40 assert dicom_array.min() >= -1
def test_preprocess_dicom_pure(dicom_path): params = preprocess_ct.Params() preprocess = preprocess_ct.PreprocessCT(params) dicom_array, meta = load_ct.load_dicom(dicom_path) assert isinstance(dicom_array, np.ndarray) dicom_array, meta = load_ct.load_dicom(dicom_path) meta = load_ct.MetaData(meta) dicom_array = preprocess(dicom_array, meta) assert isinstance(dicom_array, np.ndarray)
def test_preprocess(metaimage_path): nodule_list = [{"z": 556, "y": 100, "x": 0}] image_itk = sitk.ReadImage(metaimage_path) image = sitk.GetArrayFromImage(image_itk) spacing = np.array(image_itk.GetSpacing())[::-1] origin = np.array(image_itk.GetOrigin())[::-1] image = lum_trans(image) image = resample(image, spacing, np.array([1, 1, 1]), order=1)[0] spacing = np.array([1, 1, 1]) image = image.astype('uint8') crop = SimpleCrop() for nodule in nodule_list: nod_location = np.array( [np.float32(nodule[s]) for s in ["z", "y", "x"]]) # N-dimensional array coordinates for the point in real world should be computed in the way below: nod_location = (nod_location - origin) / spacing cropped_image, coords = crop(image, nod_location) preprocess = preprocess_ct.PreprocessCT(clip_lower=-1200., clip_upper=600., min_max_normalize=True, scale=255, spacing=True, order=1, dtype='uint8') ct_array, meta = load_ct.load_ct(metaimage_path) ct_array, meta = preprocess(ct_array, meta) cropped_image_new, coords_new = crop_patches.patches_from_ct( ct_array, meta, 96, nodule_list, stride=4, pad_value=160)[0] assert np.abs(cropped_image_new - cropped_image).sum() == 0 assert np.abs(coords_new - coords).sum() == 0
def predict(ct_path, model_path=None): """ Args: image_itk: ITK Image in Hu units model_path: Path to the file containing the model state (Default value = "src/algorithms/identify/assets/dsb2017_detector.ckpt") Returns: List of Nodule locations and probabilities """ if not model_path: INDENTIFY_DIR = path.join(Config.ALGOS_DIR, 'identify') model_path = path.join(INDENTIFY_DIR, 'assets', 'dsb2017_detector.ckpt') ct_array, meta = load_ct.load_ct(ct_path) meta = load_ct.MetaImage(meta) spacing = np.array(meta.spacing) masked_image, mask = filter_lungs(ct_array) # masked_image = image net = Net() net.load_state_dict(torch.load(model_path)["state_dict"]) if torch.cuda.is_available(): net = torch.nn.DataParallel(net).cuda() split_comber = SplitComb(side_len=int(144), margin=32, max_stride=16, stride=4, pad_value=170) # We have to use small batches until the next release of PyTorch, as bigger ones will segfault for CPU # split_comber = SplitComb(side_len=int(32), margin=16, max_stride=16, stride=4, pad_value=170) # Transform image to the 0-255 range and resample to 1x1x1mm preprocess = preprocess_ct.PreprocessCT(clip_lower=-1200., clip_upper=600., spacing=1., order=1, min_max_normalize=True, scale=255, dtype='uint8') ct_array, meta = preprocess(ct_array, meta) ct_array = ct_array[np.newaxis, ...] imgT, coords, nzhw = split_data(ct_array, split_comber=split_comber) results = [] # Loop over the image chunks for img, coord in zip(imgT, coords): var = Variable(img[np.newaxis]) var.volatile = True coord = Variable(coord[np.newaxis]) coord.volatile = True resvar = net(var, coord) res = resvar.data.cpu().numpy() results.append(res) results = np.concatenate(results, 0) results = split_comber.combine(results, nzhw=nzhw) pbb = GetPBB() # First index of proposals is the propabillity. Then x, y z, and radius proposals, _ = pbb(results, ismask=True) # proposals = proposals[proposals[:,4] < 40] proposals = nms(proposals) # Filter out proposals outside the actual lung # prop_int = proposals[:, 1:4].astype(np.int32) # wrong = [imgs[0, x[0], x[1], x[2]] > 180 for x in prop_int] # proposals = proposals[np.logical_not(wrong)] # Do sigmoid to get propabillities proposals[:, 0] = expit(proposals[:, 0]) # Remove really weak proposals? # proposals = proposals[proposals[:,0] > 0.5] # Rescale back to image space coordinates proposals[:, 1:4] /= spacing[np.newaxis] return [{ "x": int(p[3]), "y": int(p[2]), "z": int(p[1]), "p_nodule": float(p[0]) } for p in proposals]
def test_preprocess_dicom_clips(dicom_path): preprocess = preprocess_ct.PreprocessCT(clip_lower=-1, clip_upper=40) dicom_array, _ = preprocess(*load_ct.load_ct(dicom_path)) assert isinstance(dicom_array, np.ndarray) assert dicom_array.max() <= 40 assert dicom_array.min() >= -1