Пример #1
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 params_file=None,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 over_sample=None,
                 verbose=True,
                 mode='relative_signal',
                 n_canonicals=2):
        """
        Initialize a MultiCanonicalTensorModel class instance.
        """
        # Initialize the super-class:
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=axial_diffusivity,
                                      radial_diffusivity=radial_diffusivity,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=over_sample,
                                      mode=mode,
                                      verbose=verbose)

        self.n_canonicals = n_canonicals
 def __init__(self,
              data,
              bvecs,
              bvals,
              params_file=None,
              axial_diffusivity=AD,
              radial_diffusivity=RD,
              affine=None,
              mask=None,
              scaling_factor=SCALE_FACTOR,
              sub_sample=None,
              over_sample=None,
              verbose=True,
              mode='relative_signal',
              n_canonicals=2):
     """
     Initialize a MultiCanonicalTensorModel class instance.
     """
     # Initialize the super-class:
     CanonicalTensorModel.__init__(self,
                                   data,
                                   bvecs,
                                   bvals,
                                   params_file=params_file,
                                   axial_diffusivity=axial_diffusivity,
                                   radial_diffusivity=radial_diffusivity,
                                   affine=affine,
                                   mask=mask,
                                   scaling_factor=scaling_factor,
                                   sub_sample=sub_sample,
                                   over_sample=over_sample,
                                   mode=mode,
                                   verbose=verbose)
     
     self.n_canonicals = n_canonicals
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 calibration_roi,
                 params_file=None,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 over_sample=None,
                 verbose=True):
        """
        Initialize a CalibratedCanonicalTensorModel instance.

        Parameters
        ----------

        calibration_roi: full path to a nifti file containing zeros everywhere,
        except ones where the calibration ROI is defined. Should be already
        registered and xformed to the DWI data resolution/alignment. 

        """
        # Initialize the super-class, we set AD and RD to None, to prevent
        # things from going forward before calibration has occurred. This will
        # probably cause an error to be thrown, if calibration doesn't
        # happen. We might want to catch that error and explain it to the
        # user... 
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=None,
                                      radial_diffusivity=None,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=over_sample,
                                      verbose=verbose)


        # This is used to initialize the optimization in each voxel.
        # The orientation parameters are chosen to be close to horizontal.
        
        self.start_params = np.pi/2, 0, 0.5, 1.5, 0
                           #theta, phi, beta, lambda1, lambda2
        self.calibration_roi = calibration_roi
Пример #4
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 calibration_roi,
                 params_file=None,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 over_sample=None,
                 verbose=True):
        """
        Initialize a CalibratedCanonicalTensorModel instance.

        Parameters
        ----------

        calibration_roi: full path to a nifti file containing zeros everywhere,
        except ones where the calibration ROI is defined. Should be already
        registered and xformed to the DWI data resolution/alignment. 

        """
        # Initialize the super-class, we set AD and RD to None, to prevent
        # things from going forward before calibration has occurred. This will
        # probably cause an error to be thrown, if calibration doesn't
        # happen. We might want to catch that error and explain it to the
        # user...
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=None,
                                      radial_diffusivity=None,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=over_sample,
                                      verbose=verbose)

        # This is used to initialize the optimization in each voxel.
        # The orientation parameters are chosen to be close to horizontal.

        self.start_params = np.pi / 2, 0, 0.5, 1.5, 0
        #theta, phi, beta, lambda1, lambda2
        self.calibration_roi = calibration_roi
Пример #5
0
def test_predict():
    """
    Test the CanonicalTensorModel predict method
    """
    # 1000 'voxels' with constant data in each one in all directions (+b0):
    data = (np.random.rand(10 * 10 * 10).reshape(10 * 10 * 10, 1) + np.zeros(
        (10 * 10 * 10, 160))).reshape(10, 10, 10, 160)

    CTM = CanonicalTensorModel(data,
                               data_path + 'dwi.bvecs',
                               data_path + 'dwi.bvals',
                               params_file=tempfile.NamedTemporaryFile().name)

    bvecs = CTM.bvecs[:, CTM.b_idx]
    new_bvecs = bvecs[:, :4]
    prediction = CTM.predict(new_bvecs)
    npt.assert_array_equal(prediction, CTM.fit[..., :4])
def test_predict():
    """
    Test the CanonicalTensorModel predict method
    """
    # 1000 'voxels' with constant data in each one in all directions (+b0): 
    data = (np.random.rand(10 * 10 * 10).reshape(10 * 10 * 10, 1) +
            np.zeros((10 * 10 * 10, 160))).reshape(10,10,10,160)

    CTM = CanonicalTensorModel(data,
                                   data_path + 'dwi.bvecs',
                                   data_path + 'dwi.bvals',
        params_file=tempfile.NamedTemporaryFile().name)

    bvecs = CTM.bvecs[:, CTM.b_idx]
    new_bvecs = bvecs[:,:4]
    prediction = CTM.predict(new_bvecs)
    npt.assert_array_equal(prediction, CTM.fit[...,:4])
Пример #7
0
    def __init__(self,
                 tissue_fraction,
                 data,
                 bvecs,
                 bvals,
                 alpha1,
                 alpha2,
                 water_D=3,
                 gray_D=1,
                 params_file=None,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 over_sample=None,
                 verbose=True):
        
        # Initialize the super-class:
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=axial_diffusivity,
                                      radial_diffusivity=radial_diffusivity,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=over_sample,
                                      verbose=verbose)

        self.tissue_fraction = ni.load(tissue_fraction).get_data()

        # Convert the diffusivity constants to signal attenuation:
        self.gray_D = np.exp(-self.bvals[self.b_idx][0] * gray_D)
        self.water_D = np.exp(-self.bvals[self.b_idx][0] * water_D)

        # We're going to grid-search over these:
        self.alpha1 = alpha1
        self.alpha2 = alpha2
Пример #8
0
    def __init__(self,
                 data,
                 bvecs,
                 bvals,
                 solver=None,
                 solver_params=None,
                 params_file=None,
                 axial_diffusivity=AD,
                 radial_diffusivity=RD,
                 affine=None,
                 mask=None,
                 scaling_factor=SCALE_FACTOR,
                 sub_sample=None,
                 over_sample=None,
                 mode='relative_signal',
                 verbose=True,
                 force_recompute=False):
        """
        Initialize SparseDeconvolutionModel class instance.
        """
        # Initialize the super-class:
        CanonicalTensorModel.__init__(self,
                                      data,
                                      bvecs,
                                      bvals,
                                      params_file=params_file,
                                      axial_diffusivity=axial_diffusivity,
                                      radial_diffusivity=radial_diffusivity,
                                      affine=affine,
                                      mask=mask,
                                      scaling_factor=scaling_factor,
                                      sub_sample=sub_sample,
                                      over_sample=over_sample,
                                      mode=mode,
                                      verbose=verbose)
        
        # Name the params file, if needed: 
        this_class = str(self.__class__).split("'")[-2].split('.')[-1]
        self.params_file = params_file_resolver(self,
                                                this_class,
                                                params_file=params_file)

        # Deal with the solver stuff: 
        # For now, the default is ElasticNet:
        if solver is None:
            this_solver = sklearn_solvers['ElasticNet']
        # Assume it's a key into the dict: 
        elif isinstance(solver, str):
            this_solver = sklearn_solvers[solver]
        # Assume it's a class: 
        else:
            this_solver = solver
        
        # This will be passed as kwarg to the solver initialization:
        if solver_params is None:
            # This seems to be good for our data:
            alpha = 0.0005
            l1_ratio = 0.6
            self.solver_params = dict(alpha=alpha,
                                      l1_ratio=l1_ratio,
                                      fit_intercept=True,
                                      positive=True)
        else:
            self.solver_params = solver_params

        # We reuse the same class instance in all voxels: 
        self.solver = this_solver(**self.solver_params)

        # This is only here for now, but should be implemented in the
        # base-class (all the way up?) and generalized in a wrapper to model
        # params, I believe. 
        self.force_recompute = force_recompute
Пример #9
0
def test_CanonicalTensorModel():
    """

    Test the simple canonical + sphere model.

    """
    # 1000 'voxels' with constant data in each one in all directions (+b0):
    data = (np.random.rand(10 * 10 * 10).reshape(10 * 10 * 10, 1) + np.zeros(
        (10 * 10 * 10, 160))).reshape(10, 10, 10, 160)

    CTM = CanonicalTensorModel(data,
                               data_path + 'dwi.bvecs',
                               data_path + 'dwi.bvals',
                               params_file=tempfile.NamedTemporaryFile().name)

    # XXX Smoke testing only
    npt.assert_equal(CTM.fit.shape, CTM.signal.shape)

    mask_array = np.zeros(ni.load(data_path + 'small_dwi.nii.gz').shape[:3])
    # Only two voxels:
    mask_array[1:3, 1:3, 1:3] = 1
    # Fit this on some real dwi data
    for mode in ['signal_attenuation', 'relative_signal', 'normalize', 'log']:
        for params_file in [None, tempfile.NamedTemporaryFile().name, 'temp']:
            CTM = CanonicalTensorModel(data_path + 'small_dwi.nii.gz',
                                       data_path + 'dwi.bvecs',
                                       data_path + 'dwi.bvals',
                                       mask=mask_array,
                                       params_file=params_file,
                                       mode=mode)

            # XXX Smoke testing only:
            npt.assert_equal(CTM.fit.shape, CTM.signal.shape)
            npt.assert_equal(CTM.principal_diffusion_direction.shape,
                             CTM.signal.shape[:3] + (3, ))
            npt.assert_equal(CTM.fractional_anisotropy.shape,
                             CTM.signal.shape[:3])
        # Test over-sampling:
        for over_sample in [362, 246]:  # Over-sample from dipy and from
            # camino-points
            CTM = CanonicalTensorModel(
                data_path + 'small_dwi.nii.gz',
                data_path + 'dwi.bvecs',
                data_path + 'dwi.bvals',
                mask=mask_array,
                params_file=tempfile.NamedTemporaryFile().name,
                over_sample=over_sample,
                mode=mode)

            # XXX Smoke testing only:
            npt.assert_equal(CTM.fit.shape, CTM.signal.shape)

    # This shouldn't be possible, because we don't have a sphere with 151
    # samples handy:
    npt.assert_raises(
        ValueError, CanonicalTensorModel, data_path + 'small_dwi.nii.gz',
        data_path + 'dwi.bvecs', data_path + 'dwi.bvals',
        **dict(mask=mask_array,
               params_file=tempfile.NamedTemporaryFile().name,
               over_sample=151))

    # If you provide an unrecognized mode, you get an error:
    npt.assert_raises(
        ValueError, CanonicalTensorModel, data_path + 'small_dwi.nii.gz',
        data_path + 'dwi.bvecs', data_path + 'dwi.bvals',
        **dict(mask=mask_array,
               mode='crazy_mode',
               params_file=tempfile.NamedTemporaryFile().name))