def execution(self, context): csd_model = load(self.csd_model.fullPath()) csd_coeff = aims.read(self.fibre_odf_sh_coeff.fullPath()) header = csd_coeff.header() sh_coeff = np.asarray(csd_coeff) mask_vol = aims.read(self.mask.fullPath()) mask = vol_to_array(mask_vol) mask = array_to_mask(mask) try: S0 = aims.read(self.S0_signal.fullPath()) except: context.write("No B0 volume provided, I assume the non ponderated signal value is 1.0 in all voxels") S0 = 1 csd_fit = SphHarmFit(csd_model,sh_coeff, mask) prediction = csd_fit.predict(gtab=None, S0=S0) prediction_volume = array_to_vol(prediction, header) aims.write(prediction_volume,self.predicted_signal.fullPath()) #Handling referentials transformManager = getTransformationManager() # Mandatory parameters transformManager.copyReferential(self.fibre_odf_sh_coeff, self.predicted_signal) pass
def execution(self, context): data_vol = aims.read(self.dwi_data.fullPath()) header = data_vol.header() data = vol_to_array(data_vol) sigma = piesno(data, self.coil_number, alpha=self.alpha, l=self.trials, itermax=ITERMAX, eps=EPS, return_mask=False) sigma_arr = sigma*np.ones(data.shape[:-1], dtype=np.float32) sigma_vol = array_to_vol(sigma_arr, header=header) aims.write(sigma_vol, self.sigma.fullPath())
def execution(self, context): context.write("Loading input files") data_vol = aims.read(self.diffusion_data.fullPath()) hdr = data_vol.header() data = vol_to_array(data_vol) del data_vol if self.mask is not None: mask_vol = aims.read(self.mask.fullPath()) mask = vol_to_array(mask_vol) del mask_vol mask = array_to_mask(mask) else: mask = self.mask tensor = load(self.tensor_model.fullPath()) context.write("Input files loaded successfully") context.write( "Fitting Diffusion Tensor model on data...it migh take some time") tenfit = tensor.fit(data, mask=mask) context.write("Diffusion Tensor Model fitted successfully") tensor_coefficients = tenfit.model_params vol_tensor = array_to_vol(tensor_coefficients, header=hdr) context.write('Writing coefficient volume on disk') aims.write(vol_tensor, self.tensor_coefficients.fullPath()) #saving other metadata self.tensor_coefficients.setMinf('model_uuid', self.tensor_model.uuid()) self.tensor_coefficients.setMinf('data_uuid', self.diffusion_data.uuid()) try: assert self.mask is not None self.tensor_coefficients.setMinf('mask_uuid', self.mask.uuid()) except Exception: self.tensor_coefficients.setMinf('mask_uuid', 'None') transformManager = getTransformationManager() transformManager.copyReferential(self.diffusion_data, self.tensor_coefficients) context.write("Processed Finished") pass
def execution(self, context): data_vol = aims.read(self.dwi_data.fullPath()) header = data_vol.header() data = vol_to_array(data_vol) sigma_vol = aims.read(self.sigma.fullPath()) sigma = vol_to_array(sigma_vol) if self.brain_mask is not None: brain_mask_vol = aims.read(self.brain_mask.fullPath()) brain_mask = vol_to_array(brain_mask_vol) else: brain_mask = None denoised_data = nlmeans(data, sigma, mask=brain_mask, patch_radius=self.patch_radius, block_radius=self.block_radius, rician=self.rician_noise) denoised_data_vol = array_to_vol(denoised_data, header=header) aims.write(denoised_data_vol, self.denoised_dwi_data.fullPath())
def execution(self, context): data_vol = aims.read(self.dwi_data.fullPath()) header = data_vol.header() data = vol_to_array(data_vol) sigma_vol = aims.read(self.sigma.fullPath()) sigma = vol_to_array(sigma_vol) if self.brain_mask is not None: brain_mask_vol = aims.read(self.brain_mask.fullPath()) brain_mask = vol_to_array(brain_mask_vol) else: brain_mask = None denoised_data = localpca(data, sigma, mask=brain_mask, pca_method=self.method, patch_radius=self.patch_radius, tau_factor=self.tau_factor) denoised_data_vol = array_to_vol(denoised_data, header=header) aims.write(denoised_data_vol, self.denoised_dwi_data.fullPath())
def execution(self, context): #reading object from the lightest to the biggest in memory model = load(self.csd_model.fullPath()) sphere = read_sphere(self.sphere.fullPath()) mask_vol = aims.read(self.mask.fullPath()) mask = vol_to_array(mask_vol) mask = array_to_mask(mask) sh_coeff_vol = aims.read(self.fibre_odf_sh_coeff.fullPath()) hdr = sh_coeff_vol.header() sh_coeff = np.asarray(sh_coeff_vol) context.write("Data were successfully loaded.") spharmfit = SphHarmFit(model, sh_coeff, mask) odf = extract_odf(spharmfit, mask, sphere) #odf = spharmfit.odf(sphere) #do not use the classical dipy function because it compute odf for the whole volume by default and take far too much #memory. odf_vol = array_to_vol(odf, header=hdr) aims.write(odf_vol, self.fibre_odf.fullPath()) pass
def execution(self, context): tensor_coeff = aims.read(self.tensor_coefficients.fullPath()) tensor_params = np.asarray(tensor_coeff) tensor_model = load(self.tensor_model.fullPath()) gtab = tensor_model.gtab #Loading base signal S0 = aims.read(self.S0_signal.fullPath()) S0 = vol_to_array(S0) tenfit = TensorFit(tensor_model, tensor_params) pred_sign = tenfit.predict(gtab=gtab, S0=S0) hdr = tensor_coeff.header() pred_vol = array_to_vol(pred_sign, header=hdr) aims.write(pred_vol, self.predicted_signal.fullPath()) #Handling metada transformManager = getTransformationManager() transformManager.copyReferential(self.predicted_signal, self.tensor_coefficients) context.write("Process finish successfully") pass
def execution(self, context): #if an existing tensor has already been fitted dont compute a new one . if self.tensor_coefficients is not None and self.tensor_model is not None: context.write('Fitted Tensor already exists ! Let s use it !') tensor_coeff_vol = aims.read(self.tensor_coefficients.fullPath()) tensor_coeff = np.asarray(tensor_coeff_vol) hdr = tensor_coeff_vol.header() tensor_model = load(self.tensor_model.fullPath()) tenfit = TensorFit(tensor_model, tensor_coeff) if self.mask is not None: mask_vol = aims.read(self.mask.fullPath()) mask = vol_to_array(mask_vol) mask = array_to_mask(mask) else: context.write( 'No mask provided ! Estimating impulsionnal response from the whole volume or brain is not really accurate ! A default mask based on Fractionnal Anisotropy is computed. ' ) fa = tenfit.fa # just to avoid nan is case of wrong fitting fa = np.clip(fa, 0, 1) #high FA vale is associated with single fiber direction voxel mask = fa > self.fa_threshold mask = mask.astype(bool) #code extracted from dipy response_from_mask function indices = np.where(mask > 0) sub_tenfit = tenfit[indices] lambdas = sub_tenfit.evals[:, :2] gtab = sub_tenfit.model.gtab vol = aims.read(self.diffusion_data.fullPath()) data = np.asarray(vol) S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]] response, ratio = _get_response(S0s, lambdas) else: context.write('No Tensor Fitted Yet! Compute a new one') gtab = load(self.gradient_table.fullPath()) if is_multi_shell(gtab): context.warning( "The DWI scheme for this data is multishell: bvalues", shells(gtab), ". CSD implementation used in Diffuse currently only handle single shell DWI scheme. By default the higher shell bval", max_shell(gtab), " is selected") context.warning( "Even if only the outer shell is use for deconvolution, the following estimation method will use the full DWI scheme for response estimation. It might be inaccurate if the deconvolved shell bvalue is too high (b5000)" ) vol = aims.read(self.diffusion_data.fullPath()) data = np.asarray(vol) if self.mask is not None: mask_vol = aims.read(self.mask.fullPath()) mask = vol_to_array(mask_vol) mask = array_to_mask(mask) response, ratio = response_from_mask(gtab, data, mask) else: context.warning( "No mask provided ! Compute a high-FA based mask: FA higher than " + str(self.fa_threshold) + " are considered as single direction voxels") #default tensor model --> we dont store it for now tensor = TensorModel(gtab) #whole volume fit tenfit = tensor.fit(data) fa = tenfit.fa # just to avoid nan is case of wrong fitting fa = np.clip(fa, 0, 1) # high FA vale is associated with single fiber direction voxel mask = fa > self.fa_threshold mask = mask.astype(bool) indices = np.where(mask) # code extracted from dipy response_from_mask function sub_tenfit = tenfit[indices] lambdas = sub_tenfit.evals[:, :2] gtab = sub_tenfit.model.gtab vol = aims.read(self.diffusion_data.fullPath()) data = np.asarray(vol) S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]] response, ratio = _get_response(S0s, lambdas) #store the response dump(response, self.response.fullPath())