def inference(args, loader, model, transforms): src = args.inference dst = args.save model.eval() nvols = reduce(operator.mul, target_split, 1) # assume single GPU / batch size 1 for data in loader: data, series, origin, spacing = data[0] shape = data.size() # convert names to batch tensor if args.cuda: data.pin_memory() data = data.cuda() data = Variable(data, volatile=True) output = model(data) _, output = output.max(1) output = output.view(shape) output = output.cpu() # merge subvolumes and save results = output.chunk(nvols) results = map( lambda var: torch.squeeze(var.data).numpy().astype(np.int16), results) volume = utils.merge_image([*results], target_split) print("save {}".format(series)) utils.save_updated_image(volume, os.path.join(dst, series + ".mhd"), origin, spacing)
def normalize_lung_CT(**kwargs): mean_values = [] var_values = [] MIN_BOUND = -1000 MAX_BOUND = 400 Z_MAX, Y_MAX, X_MAX = kwargs['Z_MAX'], kwargs['Y_MAX'], kwargs['X_MAX'] vox_spacing = kwargs['vox_spacing'] utils.init_dims3D(Z_MAX, Y_MAX, X_MAX, vox_spacing) luna_subset_path = kwargs['src'] luna_save_path = kwargs['dst'] file_list=glob(luna_subset_path + "/" + "*.mhd") img_spacing = (vox_spacing, vox_spacing, vox_spacing) for img_file in file_list: itk_img = sitk.ReadImage(img_file) (x_space, y_space, z_space) = itk_img.GetSpacing() spacing_old = (z_space, y_space, x_space) img_array = sitk.GetArrayFromImage(itk_img) # indexes are z,y,x (notice the ordering) img, mu, var = utils.resample_volume(img_array, spacing_old, img_spacing, bounds=(MIN_BOUND, MAX_BOUND)) utils.save_updated_image(img, luna_save_path+os.path.basename(img_file), itk_img.GetOrigin(), img_spacing) mean_values.append(mu) var_values.append(var) dataset_mean = np.mean(mean_values) dataset_stddev = np.sqrt(np.mean(var_values)) return (dataset_mean, dataset_stddev)
def inference(params, args, loader, model): src = params['ModelParams']['dirInfer'] dst = params['ModelParams']['dirResult'] model.eval() # assume single GPU / batch size 1 for batch_idx, data in enumerate(loader): data, id = data id = id[0] itk_img = sitk.ReadImage(os.path.join(src, id)) origin = np.array(list(reversed(itk_img.GetOrigin()))) spacing = np.array(list(reversed(itk_img.GetSpacing()))) # pdb.set_trace() _, _, z, y, x = data.shape # need to subset shape of 3-d. by Chao. # convert names to batch tensor if args.cuda: data.pin_memory() data = data.cuda() with torch.no_grad(): data = Variable(data) output = model(data) _, output = output.max(1) output = output.view((x, y, z)) # pdb.set_trace() output = output.cpu() print("save {}".format(id)) utils.save_updated_image(output, os.path.join(dst, id + "_predicted.mhd"), origin, spacing)
def normalize_lung_mask(**kwargs): Z_MAX, Y_MAX, X_MAX = kwargs['Z_MAX'], kwargs['Y_MAX'], kwargs['X_MAX'] vox_spacing = kwargs['vox_spacing'] utils.init_dims3D(Z_MAX, Y_MAX, X_MAX, vox_spacing) luna_seg_lungs_path = kwargs['src'] luna_seg_lungs_save_path = kwargs['dst'] file_list=glob(os.path.join(luna_seg_lungs_path, "*.mhd")) img_spacing = (vox_spacing, vox_spacing, vox_spacing) for img_file in file_list: itk_img = sitk.ReadImage(img_file) (x_space, y_space, z_space) = itk_img.GetSpacing() spacing_old = (z_space, y_space, x_space) img_array = sitk.GetArrayFromImage(itk_img) # indexes are z,y,x (notice the ordering) img, _, _ = utils.resample_volume(img_array, spacing_old, img_spacing) img[img < 1] = 0 utils.save_updated_image(img, os.path.join(luna_seg_lungs_save_path, os.path.basename(img_file)), itk_img.GetOrigin(), img_spacing)