def estimate_snr_wm_mask(input_data, wm_mask):
    unweighted_ind = input_data.protocol.get_unweighted_indices()
    unweighted_volumes = input_data.signal4d[..., unweighted_ind]

    snr = np.mean(unweighted_volumes, axis=-1) / np.std(unweighted_volumes,
                                                        axis=-1)
    return mdt.create_roi(snr, wm_mask)
Ejemplo n.º 2
0
    def test_lls_multishell_b6k_max(self):
        known_values = {
            'CHARMED_r1': {
                'LogLikelihood': {
                    'mean': -8038.29248046875,
                    'std': 4703.6005859375
                }
            },
            'CHARMED_r2': {
                'LogLikelihood': {
                    'mean': -7001.50927734375,
                    'std': 4062.600341796875
                }
            },
            'CHARMED_r3': {
                'LogLikelihood': {
                    'mean': -6514.17529296875,
                    'std': 3659.7138671875
                }
            }
        }

        for model_name in ['CHARMED_r1', 'CHARMED_r2', 'CHARMED_r3']:
            pjoin = mdt.make_path_joiner(
                os.path.join(self._tmp_dir, self._tmp_dir_subdir,
                             'multishell_b6k_max'))

            user_volumes = mdt.load_volume_maps(
                pjoin('output', 'multishell_b6k_max_example_slices_24_38_mask',
                      model_name))

            msg_prefix = 'b1k_b2k - {}'.format(model_name)
            roi = mdt.create_roi(
                user_volumes['LogLikelihood'],
                pjoin('multishell_b6k_max_example_slices_24_38_mask'))

            for map_name, test_values in known_values[model_name].items():
                np.testing.assert_allclose(test_values['mean'],
                                           np.mean(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - mean'.format(
                                               msg_prefix, map_name))
                np.testing.assert_allclose(test_values['std'],
                                           np.std(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - std'.format(
                                               msg_prefix, map_name))
Ejemplo n.º 3
0
    def test_lls_multishell_b6k_max(self):
        known_values = {
            'CHARMED_r1': {
                'LogLikelihood': {
                    'mean': -447.95446,
                    'std': 36.09088
                }
            },
            'CHARMED_r2': {
                'LogLikelihood': {
                    'mean': -436.13861,
                    'std': 25.13305
                }
            },
            'CHARMED_r3': {
                'LogLikelihood': {
                    'mean': -432.15878,
                    'std': 21.87481
                }
            }
        }

        for model_name in ['CHARMED_r1', 'CHARMED_r2', 'CHARMED_r3']:
            pjoin = mdt.make_path_joiner(
                os.path.join(self._tmp_dir, self._tmp_dir_subdir,
                             'multishell_b6k_max'))

            user_volumes = mdt.load_volume_maps(
                pjoin('output', 'multishell_b6k_max_example_slices_24_38_mask',
                      model_name))

            msg_prefix = 'b1k_b2k - {}'.format(model_name)
            roi = mdt.create_roi(
                user_volumes['LogLikelihood'],
                pjoin('multishell_b6k_max_example_slices_24_38_mask'))

            for map_name, test_values in known_values[model_name].items():
                np.testing.assert_allclose(test_values['mean'],
                                           np.mean(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - mean'.format(
                                               msg_prefix, map_name))
                np.testing.assert_allclose(test_values['std'],
                                           np.std(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - std'.format(
                                               msg_prefix, map_name))
Ejemplo n.º 4
0
    def estimate(self, problem_data, **kwargs):
        """Calculate the standard deviation of the error using all unweighted volumes.

        This calculates per voxel (in the brain mas) the std over all unweighted volumes
        and takes the mean of those estimates as the standard deviation of the noise.

        The method is taken from Camino (http://camino.cs.ucl.ac.uk/index.php?n=Man.Estimatesnr).
        """
        unweighted_indices = problem_data.protocol.get_unweighted_indices()
        unweighted_volumes = problem_data.dwi_volume[..., unweighted_indices]

        if len(unweighted_indices) < 2:
            raise NoiseStdEstimationNotPossible(
                'Not enough unweighted volumes for this estimator.')

        voxel_list = create_roi(unweighted_volumes, problem_data.mask)
        return np.mean(np.std(voxel_list, axis=1))
Ejemplo n.º 5
0
    def test_lls_b1k_b2k(self):
        known_values = {
            'BallStick_r1': {
                'LogLikelihood': {
                    'mean': -327297.53125,
                    'std': 359791.5625
                }
            },
            'Tensor': {
                'LogLikelihood': {
                    'mean': -5891.28515625,
                    'std': 7691.93408203125
                }
            },
            'NODDI': {
                'LogLikelihood': {
                    'mean': -19352.4609375,
                    'std': 14489.8623046875
                }
            }
        }

        for model_name in ['BallStick_r1', 'Tensor', 'NODDI']:
            pjoin = mdt.make_path_joiner(
                os.path.join(self._tmp_dir, self._tmp_dir_subdir, 'b1k_b2k'))

            user_volumes = mdt.load_volume_maps(
                pjoin('output', 'b1k_b2k_example_slices_24_38_mask',
                      model_name))

            msg_prefix = 'b1k_b2k - {}'.format(model_name)
            roi = mdt.create_roi(user_volumes['LogLikelihood'],
                                 pjoin('b1k_b2k_example_slices_24_38_mask'))

            for map_name, test_values in known_values[model_name].items():
                np.testing.assert_allclose(test_values['mean'],
                                           np.mean(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - mean'.format(
                                               msg_prefix, map_name))
                np.testing.assert_allclose(test_values['std'],
                                           np.std(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - std'.format(
                                               msg_prefix, map_name))
Ejemplo n.º 6
0
    def test_lls_b1k_b2k(self):
        known_values = {
            'BallStick_r1': {
                'LogLikelihood': {
                    'mean': -1215.52355,
                    'std': 924.27117
                }
            },
            'Tensor': {
                'LogLikelihood': {
                    'mean': -182.73164,
                    'std': 20.02311
                }
            },
            'NODDI': {
                'LogLikelihood': {
                    'mean': -451.16198,
                    'std': 37.77514
                }
            }
        }

        for model_name in ['BallStick_r1', 'Tensor', 'NODDI']:
            pjoin = mdt.make_path_joiner(
                os.path.join(self._tmp_dir, self._tmp_dir_subdir, 'b1k_b2k'))

            user_volumes = mdt.load_volume_maps(
                pjoin('output', 'b1k_b2k_example_slices_24_38_mask',
                      model_name))

            msg_prefix = 'b1k_b2k - {}'.format(model_name)
            roi = mdt.create_roi(user_volumes['LogLikelihood'],
                                 pjoin('b1k_b2k_example_slices_24_38_mask'))

            for map_name, test_values in known_values[model_name].items():
                np.testing.assert_allclose(test_values['mean'],
                                           np.mean(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - mean'.format(
                                               msg_prefix, map_name))
                np.testing.assert_allclose(test_values['std'],
                                           np.std(roi),
                                           rtol=1e-4,
                                           err_msg='{} - {} - std'.format(
                                               msg_prefix, map_name))
Ejemplo n.º 7
0
    def estimate(self, problem_data, **kwargs):
        """Calculate the standard deviation of the error using the first two unweighted volumes/

        This subtracts the values of the first two unweighted volumes from each other, calculates the std over
        the results and divides that by sqrt(2).

        The method is taken from Camino (http://camino.cs.ucl.ac.uk/index.php?n=Man.Estimatesnr).

        Returns:
            float: single value representing the sigma for the given volume
        """
        unweighted_indices = problem_data.protocol.get_unweighted_indices()
        unweighted_volumes = problem_data.dwi_volume[..., unweighted_indices]

        if len(unweighted_indices) < 2:
            raise NoiseStdEstimationNotPossible('Not enough unweighted volumes for this estimator.')

        diff = unweighted_volumes[..., 0] - unweighted_volumes[..., 1]
        voxel_values = create_roi(diff, problem_data.mask)
        return np.std(voxel_values) / np.sqrt(2)
def single_subject_results(subject_info):
    output_folder = subject_info.data_folder[:-1] + '_output'
    subject_id = subject_info.subject_id

    input_data = subject_info.get_input_data()
    wm_mask = output_folder + '/' + subject_id + '/wm_mask'
    snrs = estimate_snr_wm_mask(input_data, wm_mask)
    results_pjoin = mdt.make_path_joiner(output_folder + '/' + subject_id)

    results = {}

    for model in model_names:
        results_per_method = {}
        for method in ['sample', 'optimization']:
            results_per_snr = {}

            if method == 'sample':
                std_data = get_mcmc_std(results_pjoin, model)
            else:
                std_data = get_mle_std(results_pjoin, model)

            std_data = mdt.create_roi(std_data, wm_mask)

            for snr in noise_snrs:
                bin_width = 2.5
                std_values_in_snr = std_data[np.where(
                    (snr - bin_width <= snrs) & (snrs <= snr + bin_width))[0]]

                cutoff = 1
                std_values_in_snr = std_values_in_snr[
                    np.isfinite(std_values_in_snr)
                    & (std_values_in_snr < cutoff) & (std_values_in_snr > 0)]

                results_per_snr.update({snr: np.mean(std_values_in_snr)})

            results_per_method[method] = results_per_snr
        results[model] = results_per_method
    return results
        # wm_mask *= (mcmc >= 0.1)[..., 0]
        # wm_mask *= (mcmc <= 0.9)[..., 0]

        # wm_mask = mle >= 0.1

        # wm_mask *= (np.abs(mle_std - mcmc_std) < 0.01)[..., 0]


        # items = {'mle': mle, 'mle_std': mle_std, 'mcmc': mcmc, 'mcmc_std': mcmc_std,
        #          'p_diff': np.abs(mle - mcmc), 'std_diff': np.abs(mle_std - mcmc_std),
        #          'std_threshold_map': np.abs(mle_std - mcmc_std) > 0.01
        #          }
        # mdt.view_maps(items)
        # exit(0)

        items = mdt.create_roi({'mle': mle, 'mle_std': mle_std, 'mcmc': mcmc, 'mcmc_std': mcmc_std}, wm_mask)
        #
        # mle_stats = []
        # mcmc_stats = []
        #
        # for i in range(30):
        #     ind_mle = np.random.rand(len(items['mle_std'])) < 0.01
        #     ind_mcmc = np.random.rand(len(items['mle_std'])) < 0.01
        #
        #     mle_stats.append(np.mean(items['mle_std'][ind_mle]))
        #     mcmc_stats.append(np.mean(items['mcmc_std'][ind_mcmc]))
        #
        # print(ttest_ind(mle_stats, mcmc_stats))


        diff = items['mle_std'] - items['mcmc_std']