Example #1
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 params_file=None,
                 verbose=True):
        """
        A base-class for models based on DWI data.

        Parameters
        ----------

        scaling_factor: int, defaults to 1000.
           To get the units in the S/T equation right, how much do we need to
           scale the bvalues provided.
        
        """
        # DWI should already have everything we need:
        DWI.__init__(self,
                     data,
                     bvecs,
                     bvals,
                     affine=affine,
                     mask=mask,
                     scaling_factor=scaling_factor,
                     sub_sample=sub_sample,
                     verbose=verbose)

        # Sometimes you might want to not store the params in a file:
        if params_file == 'temp':
            self.params_file = 'temp'
        else:
            # Introspect to figure out what name the current class has:
            this_class = str(self.__class__).split("'")[-2].split('.')[-1]
            self.params_file = params_file_resolver(self,
                                                    this_class,
                                                    params_file=params_file)
Example #2
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 params_file=None,
                 verbose=True):
        """
        A base-class for models based on DWI data.

        Parameters
        ----------

        scaling_factor: int, defaults to 1000.
           To get the units in the S/T equation right, how much do we need to
           scale the bvalues provided.
        
        """
        # DWI should already have everything we need: 
        DWI.__init__(self,
                         data,
                         bvecs,
                         bvals,
                         affine=affine,
                         mask=mask,
                         scaling_factor=scaling_factor,
                         sub_sample=sub_sample,
                         verbose=verbose)

        # Sometimes you might want to not store the params in a file: 
        if params_file == 'temp':
            self.params_file='temp'
        else:
            # Introspect to figure out what name the current class has:
            this_class = str(self.__class__).split("'")[-2].split('.')[-1]
            self.params_file = params_file_resolver(self,
                                                    this_class,
                                                    params_file=params_file)
Example #3
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 solver=None,
                 solver_params=None,
                 params_file=None,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 over_sample=None,
                 mode='relative_signal',
                 verbose=True,
                 force_recompute=False):
        """
        Initialize SparseDeconvolutionModel class instance.
        """
        # Initialize the super-class:
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=axial_diffusivity,
                                      radial_diffusivity=radial_diffusivity,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=over_sample,
                                      mode=mode,
                                      verbose=verbose)
        
        # Name the params file, if needed: 
        this_class = str(self.__class__).split("'")[-2].split('.')[-1]
        self.params_file = params_file_resolver(self,
                                                this_class,
                                                params_file=params_file)

        # Deal with the solver stuff: 
        # For now, the default is ElasticNet:
        if solver is None:
            this_solver = sklearn_solvers['ElasticNet']
        # Assume it's a key into the dict: 
        elif isinstance(solver, str):
            this_solver = sklearn_solvers[solver]
        # Assume it's a class: 
        else:
            this_solver = solver
        
        # This will be passed as kwarg to the solver initialization:
        if solver_params is None:
            # This seems to be good for our data:
            alpha = 0.0005
            l1_ratio = 0.6
            self.solver_params = dict(alpha=alpha,
                                      l1_ratio=l1_ratio,
                                      fit_intercept=True,
                                      positive=True)
        else:
            self.solver_params = solver_params

        # We reuse the same class instance in all voxels: 
        self.solver = this_solver(**self.solver_params)

        # This is only here for now, but should be implemented in the
        # base-class (all the way up?) and generalized in a wrapper to model
        # params, I believe. 
        self.force_recompute = force_recompute
Example #4
0
    def model_params(self):
        """
        Fitting the weights for the TissueFractionModel is done as a second
        stage, after done fitting the SparseDeconvolutionModel.
        
        The logic is as follows:

        The isotropic weight calculated in the previous stage subsumes two
        different components: one is the free water isotropic component and the
        other is a hindered tissue water component.

        .. math::

            \w_{iso} = \w_2 D_g + \w_3 D_{csf}
            
        Where $\w_{iso}$ is the weight for the isotropic component fit for
        the initial fit and $\w_{2,3}$ are the weights of tissue water and
        free water respectively. $D_g \approx 1$ and $D_{csf} \approx 3$ are
        the diffusivities of gray and white matter, respectively. 

        In addition, we know that the tissue water, together with the weights
        on fibers should account for the tissue fraction measurement:

        .. math::
        
            TF = \w_1 * \lambda_1 + \w_2 * \lambda_2 

        Where $\w_1$ is the weight for the canonical tensor found in
        CanonicalTensorModel and $\w_2$ is the weight on the tissue isotropic
        component. $\lambda_{1,2}$ are additional relative weights of the two
        components within the tissue  (canonical tensor and tissue
        water). Implicitly, $\lambda_3 = 0$, reflecting the fact that the free
        water is not part of the tissue fraction at all. To find \lambda{i}, we
        perform a grid search over plausible values of these and choose the
        ones that best account for the diffusion and TF signal.

        To find $\w_2$ and $\w_3$, we follow these steps:

        0. We find $\w_1 = \w_{tensor}$ using the CanonicalTensorModel
        
        1. We fix the values of \lambda_1 and \lambda_2 and solve for \w_2:

            \w_2 = \frac{TF - \lambda_1 \w_1}{\lambda2} =

        2. From the first equation above, we can then solve for \w_3:

            \w_3 = 1 - \w_{iso} - \w_2
            
        3. We go back to the expanded model and predict the diffusion and the
        TF data for these values of     

        """

        # Start by getting the params for the underlying
        # SparseDeconvolutionModel:
        temp_p_file = self.params_file
        self.params_file = params_file_resolver(self,
                                                'SparseDeconvolutionModel')
        
        tensor_params = super(TissueFractionModel, self).model_params
        w2 = self.non_fiber_iso

        # Restore order: 
        self.params_file = temp_p_file

        # The tensor weight is the sum of fiber parameters in each voxel: 
        w_ten = np.sum(tensor_params[self.mask] , -1)
        # And the isotropic weight is the 
        w_iso = self.iso_regressor[0]

        w2 = (self._flat_tf - self.alpha1 * w_ten) / self.alpha2
        w3 = (1 - w_ten - w2)

        w2_out = ozu.nans(self.shape[:3])
        w3_out = ozu.nans(self.shape[:3])

        w2_out[self.mask] = w2
        w3_out[self.mask] = w3

        # Return tensor_idx, w1, w2, w3 
        return tensor_params[...,0],tensor_params[...,1], w2_out, w3_out
Example #5
0
    def model_params(self):
        """
        Fitting the weights for the TissueFractionModel is done as a second
        stage, after done fitting the SparseDeconvolutionModel.
        
        The logic is as follows:

        The isotropic weight calculated in the previous stage subsumes two
        different components: one is the free water isotropic component and the
        other is a hindered tissue water component.

        .. math::

            \w_{iso} = \w_2 D_g + \w_3 D_{csf}
            
        Where $\w_{iso}$ is the weight for the isotropic component fit for
        the initial fit and $\w_{2,3}$ are the weights of tissue water and
        free water respectively. $D_g \approx 1$ and $D_{csf} \approx 3$ are
        the diffusivities of gray and white matter, respectively. 

        In addition, we know that the tissue water, together with the weights
        on fibers should account for the tissue fraction measurement:

        .. math::
        
            TF = \w_1 * \lambda_1 + \w_2 * \lambda_2 

        Where $\w_1$ is the weight for the canonical tensor found in
        CanonicalTensorModel and $\w_2$ is the weight on the tissue isotropic
        component. $\lambda_{1,2}$ are additional relative weights of the two
        components within the tissue  (canonical tensor and tissue
        water). Implicitly, $\lambda_3 = 0$, reflecting the fact that the free
        water is not part of the tissue fraction at all. To find \lambda{i}, we
        perform a grid search over plausible values of these and choose the
        ones that best account for the diffusion and TF signal.

        To find $\w_2$ and $\w_3$, we follow these steps:

        0. We find $\w_1 = \w_{tensor}$ using the CanonicalTensorModel
        
        1. We fix the values of \lambda_1 and \lambda_2 and solve for \w_2:

            \w_2 = \frac{TF - \lambda_1 \w_1}{\lambda2} =

        2. From the first equation above, we can then solve for \w_3:

            \w_3 = 1 - \w_{iso} - \w_2
            
        3. We go back to the expanded model and predict the diffusion and the
        TF data for these values of     

        """

        # Start by getting the params for the underlying
        # SparseDeconvolutionModel:
        temp_p_file = self.params_file
        self.params_file = params_file_resolver(self,
                                                'SparseDeconvolutionModel')

        tensor_params = super(TissueFractionModel, self).model_params
        w2 = self.non_fiber_iso

        # Restore order:
        self.params_file = temp_p_file

        # The tensor weight is the sum of fiber parameters in each voxel:
        w_ten = np.sum(tensor_params[self.mask], -1)
        # And the isotropic weight is the
        w_iso = self.iso_regressor[0]

        w2 = (self._flat_tf - self.alpha1 * w_ten) / self.alpha2
        w3 = (1 - w_ten - w2)

        w2_out = ozu.nans(self.shape[:3])
        w3_out = ozu.nans(self.shape[:3])

        w2_out[self.mask] = w2
        w3_out[self.mask] = w3

        # Return tensor_idx, w1, w2, w3
        return tensor_params[..., 0], tensor_params[..., 1], w2_out, w3_out
Example #6
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 params_file=None,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 mode='relative_signal',
                 iso_diffusivity=3.0,
                 model_form='flexible',
                 verbose=True):
        r"""
        Initialize a CanonicalTensorModelOpt class instance.

        Same inputs except we do not accept over-sampling, since it has no
        meaning here.

        Parameters
        ----------

        model_form: A string that chooses between different forms of the
        model. Per default, this is set to 'flexible'.

        'flexible': In this case, we fit the parameters of the following model: 

        .. math::

           \frac{S}{ S_0}= \beta_0 e^{-bD}+\beta_1 e^{-b \theta R_i Q \theta^t}

        In this model, the diffusivity of the sphere is assumed to be the
        diffusivity of water and Q is taken from the axial_diffusivity and
        radial_diffusivity inputs. We fit $\beta_0$, $\beta_1$ and $R_i$ (the
        rotation matrix, which is defined by two parameters: for the azimuth
        and elevation)
        
        'constrained': We will optimize for the following model:

        .. math::
        
           \frac{S}{ S_0}= (1-\beta) e^{-bD}+\beta e^{-b \theta R_i Q \theta^t}

        That is, a model in which the sum of the weights on isotropic and
        anisotropic components is always 1.

        'ball_and_stick': The form of the model in this case is:

        .. math::

            \frac{S}{ S_0}= (1-\beta)e^{-b d}+\beta e^{-b \theta R_idQ\theta^t}

        Note that in this case, d is a fit parameter and

        .. math::

             Q = \begin{pmatrix} 1 & 0 & 0 \\
                                 0 & 0 & 0 \\
				 0 & 0  & 0\\
				 \end{pmatrix} 
        
        Is a tensor with $FA=1$. That is, without any radial component.

        """
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=axial_diffusivity,
                                      radial_diffusivity=radial_diffusivity,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=None,  # Always None
                                      mode=mode,
                                      iso_diffusivity=iso_diffusivity,
                                      verbose=verbose)


        self.model_form = model_form
        self.iso_pred_sig = np.exp(-self.bvals[self.b_idx][0] * iso_diffusivity)

        # Over-ride the setting of the params file name in the super-class, so
        # that we can add the model form into the file name (and run on all
        # model-forms for the same data...):

        # Introspect to figure out what name the current class has:
        this_class = str(self.__class__).split("'")[-2].split('.')[-1]

        # Go on and set it: 
        self.params_file = params_file_resolver(self,
                                                this_class + model_form,
                                                params_file=params_file)


        # Choose the prediction function based on the model form:
        if self.model_form == 'constrained':
            self.pred_func = self._pred_sig_constrained
        elif self.model_form=='flexible':
            self.pred_func = self._pred_sig_flexible
        elif self.model_form == 'ball_and_stick':
            self.pred_func = self._pred_sig_ball_and_stick

        if self.mode == 'relative_signal':
            self.fit_signal = self._flat_relative_signal
        elif self.mode == 'signal_attenuation':
            self.fit_signal = 1-self._flat_relative_signal
        elif self.mode == 'normalize':
            e_s = "Mode normalize doesn't make sense in CanonicalTensorModelOpt"
            raise ValueError(e_s)
        else:
            e_s = "Mode %s not recognized"
            raise ValueError(e_s)            
Example #7
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 params_file=None,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 mode='relative_signal',
                 iso_diffusivity=3.0,
                 model_form='flexible',
                 verbose=True):
        r"""
        Initialize a CanonicalTensorModelOpt class instance.

        Same inputs except we do not accept over-sampling, since it has no
        meaning here.

        Parameters
        ----------

        model_form: A string that chooses between different forms of the
        model. Per default, this is set to 'flexible'.

        'flexible': In this case, we fit the parameters of the following model: 

        .. math::

           \frac{S}{ S_0}= \beta_0 e^{-bD}+\beta_1 e^{-b \theta R_i Q \theta^t}

        In this model, the diffusivity of the sphere is assumed to be the
        diffusivity of water and Q is taken from the axial_diffusivity and
        radial_diffusivity inputs. We fit $\beta_0$, $\beta_1$ and $R_i$ (the
        rotation matrix, which is defined by two parameters: for the azimuth
        and elevation)
        
        'constrained': We will optimize for the following model:

        .. math::
        
           \frac{S}{ S_0}= (1-\beta) e^{-bD}+\beta e^{-b \theta R_i Q \theta^t}

        That is, a model in which the sum of the weights on isotropic and
        anisotropic components is always 1.

        'ball_and_stick': The form of the model in this case is:

        .. math::

            \frac{S}{ S_0}= (1-\beta)e^{-b d}+\beta e^{-b \theta R_idQ\theta^t}

        Note that in this case, d is a fit parameter and

        .. math::

             Q = \begin{pmatrix} 1 & 0 & 0 \\
                                 0 & 0 & 0 \\
				 0 & 0  & 0\\
				 \end{pmatrix} 
        
        Is a tensor with $FA=1$. That is, without any radial component.

        """
        CanonicalTensorModel.__init__(
            self,
            data,
            bvecs,
            bvals,
            params_file=params_file,
            axial_diffusivity=axial_diffusivity,
            radial_diffusivity=radial_diffusivity,
            affine=affine,
            mask=mask,
            scaling_factor=scaling_factor,
            sub_sample=sub_sample,
            over_sample=None,  # Always None
            mode=mode,
            iso_diffusivity=iso_diffusivity,
            verbose=verbose)

        self.model_form = model_form
        self.iso_pred_sig = np.exp(-self.bvals[self.b_idx][0] *
                                   iso_diffusivity)

        # Over-ride the setting of the params file name in the super-class, so
        # that we can add the model form into the file name (and run on all
        # model-forms for the same data...):

        # Introspect to figure out what name the current class has:
        this_class = str(self.__class__).split("'")[-2].split('.')[-1]

        # Go on and set it:
        self.params_file = params_file_resolver(self,
                                                this_class + model_form,
                                                params_file=params_file)

        # Choose the prediction function based on the model form:
        if self.model_form == 'constrained':
            self.pred_func = self._pred_sig_constrained
        elif self.model_form == 'flexible':
            self.pred_func = self._pred_sig_flexible
        elif self.model_form == 'ball_and_stick':
            self.pred_func = self._pred_sig_ball_and_stick

        if self.mode == 'relative_signal':
            self.fit_signal = self._flat_relative_signal
        elif self.mode == 'signal_attenuation':
            self.fit_signal = 1 - self._flat_relative_signal
        elif self.mode == 'normalize':
            e_s = "Mode normalize doesn't make sense in CanonicalTensorModelOpt"
            raise ValueError(e_s)
        else:
            e_s = "Mode %s not recognized"
            raise ValueError(e_s)