Exemple #1
0
 def test_freesurfer_pipeline(self):
     study = self.create_study(
         T1T2Study, 'freesurfer', inputs=[
             DatasetMatch('t1', nifti_gz_format, 'mprage'),
             DatasetMatch('t2', nifti_gz_format, 'flair')])
     study.freesurfer_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('fs_recon_all.fs.zip')
Exemple #2
0
 def test_preprocess(self):
     study = self.create_study(DiffusionStudy, 'preprocess', [
         DatasetMatch('primary', mrtrix_format, 'r_l_dwi_b700_30'),
         DatasetMatch('dwi_reference', mrtrix_format, 'l_r_dwi_b0_6')
     ])
     preproc = study.data('preproc')[0]
     self.assertTrue(os.path.exists(preproc.path))
Exemple #3
0
 def test_t2_registration_pipeline(self):
     study = self.create_study(
         T1T2Study, 't2_registration', inputs=[
             DatasetMatch('t1', nifti_gz_format, 'mprage'),
             DatasetMatch('t2', nifti_gz_format, 'flair')])
     study.t2_registration_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('t2_coreg.nii.gz', study.name)
 def test_registration_to_matrix(self):
     study = self.create_study(
         CoregisteredToMatrixStudy, 'registration_to_matrix', {
             DatasetMatch('to_register', nifti_gz_format, 'flair'),
             DatasetMatch('reference', nifti_gz_format, 'mprage'),
             DatasetMatch('matrix', text_matrix_format, 'matrix')})
     study.linear_registration_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('registered.nii.gz', study.name)
Exemple #5
0
 def test_extract_b0(self):
     study = self.create_study(DiffusionStudy, 'extract_b0', [
         DatasetMatch('preproc', nifti_gz_format, 'preproc'),
         DatasetMatch('grad_dirs', fsl_bvecs_format, 'gradient_dirs'),
         DatasetMatch('bvalues', fsl_bvals_format, 'bvalues')
     ])
     study.extract_b0_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('primary.nii.gz', study.name)
Exemple #6
0
 def test_bias_correct(self):
     study = self.create_study(DiffusionStudy, 'bias_correct', [
         DatasetMatch('preproc', nifti_gz_format, 'preproc'),
         DatasetMatch('grad_dirs', fsl_bvecs_format, 'gradient_dirs'),
         DatasetMatch('bvalues', fsl_bvals_format, 'bvalues')
     ])
     study.bias_correct_pipeline(mask_tool='mrtrix').run(
         work_dir=self.work_dir)
     self.assertDatasetCreated('bias_correct.nii.gz', study.name)
Exemple #7
0
 def test_tensor(self):
     study = self.create_study(DiffusionStudy, 'tensor', [
         DatasetMatch('bias_correct', nifti_gz_format, 'bias_correct'),
         DatasetMatch('brain_mask', nifti_gz_format, 'brain_mask'),
         DatasetMatch('grad_dirs', fsl_bvecs_format, 'gradient_dirs'),
         DatasetMatch('bvalues', fsl_bvals_format, 'bvalues')
     ])
     study.tensor_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('tensor.nii.gz', study.name)
Exemple #8
0
 def test_response(self):
     study = self.create_study(DiffusionStudy, 'response', [
         DatasetMatch('bias_correct', nifti_gz_format, 'bias_correct'),
         DatasetMatch('brain_mask', nifti_gz_format, 'brain_mask'),
         DatasetMatch('grad_dirs', fsl_bvecs_format, 'gradient_dirs'),
         DatasetMatch('bvalues', fsl_bvals_format, 'bvalues')
     ])
     study.response_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('response.txt', study.name)
Exemple #9
0
 def test_ute(self):
     study = self.create_study(
         UTEStudy, 'pipeline', {
             DatasetMatch('ute_echo1', dicom_format, 'ute_echo1'),
             DatasetMatch('ute_echo2', dicom_format, 'ute_echo2'),
             DatasetMatch('umap_ute', dicom_format, 'umap_ute')
         })
     study.conversion_to_dicom_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('sute_cont_dicoms', study.name)
     self.assertDatasetCreated('sute_fix_dicoms', study.name)
 def test_registration(self):
     study = self.create_study(
         CoregisteredStudy, 'registration',
         inputs=[
             DatasetMatch('to_register', nifti_gz_format, 'flair'),
             DatasetMatch('reference', nifti_gz_format, 'mprage')])
     pipeline = study.linear_registration_pipeline()
     pipeline.run(work_dir=self.work_dir)
     self.assertDatasetCreated('registered.nii.gz', study.name)
     self.assertDatasetCreated('matrix.mat', study.name)
Exemple #11
0
 def test_coreg_and_brain_mask(self):
     study = self.create_study(TestCoregStudy,
                               'coreg_and_mask_study',
                               inputs=[
                                   DatasetMatch('ref_primary',
                                                nifti_gz_format, 'mprage'),
                                   DatasetMatch('tocoreg_primary',
                                                nifti_gz_format, 'flair')
                               ])
     coreg_brain = study.data('tocoreg_coreg_brain')[0]
     self.assertDatasetsEqual(coreg_brain, self.reference('coreg_brain'))
Exemple #12
0
 def test_brain_extraction_pipelines(self):
     study = self.create_study(
         T1T2Study, 'brain_mask', inputs=[
             DatasetMatch('t1', nifti_gz_format, 'mprage'),
             DatasetMatch('t2', nifti_gz_format, 'flair'),
             DatasetMatch('manual_wmh_mask', nifti_gz_format,
                          'manual_wmh_mask')])
     study.t1_brain_extraction_pipeline().run(work_dir=self.work_dir)
     study.manual_wmh_mask_registration_pipeline().run(
         work_dir=self.work_dir)
     for fname in ('t1_brain.nii.gz', 't2_brain.nii.gz',
                   'brain_mask.nii.gz', 'manual_wmh_mask_coreg.nii.gz'):
         self.assertDatasetCreated(fname, study.name)
Exemple #13
0
 def test_concatenate(self):
     study = self.create_study(NODDIStudy,
                               'concatenate',
                               inputs=[
                                   DatasetMatch('low_b_dw_scan',
                                                mrtrix_format,
                                                'r_l_dwi_b700_30'),
                                   DatasetMatch('high_b_dw_scan',
                                                mrtrix_format,
                                                'r_l_dwi_b2000_60')
                               ])
     study.concatenate_pipeline().run(work_dir=self.work_dir)
     self.assertDatasetCreated('dwi_scan.mif', study.name)
Exemple #14
0
 def test_suvr(self):
     study = self.create_study(StaticPETStudy,
                               'suvr',
                               inputs=[
                                   DatasetMatch('registered_volume',
                                                nifti_gz_format,
                                                'suvr_registered_volume'),
                                   DatasetMatch('base_mask',
                                                nifti_gz_format,
                                                'cerebellum_mask')
                               ])
     study.suvr_pipeline().run(work_dir=self.work_dir, plugin='Linear')
     self.assertDatasetCreated('SUVR_image.nii.gz', study.name)
Exemple #15
0
 def test_reg(self):
     study = self.create_study(
         DynamicPETStudy,
         'reg',
         inputs=[DatasetMatch('pet_volumes', nifti_gz_format, 'pet_image')])
     study.ICA_pipeline().run(work_dir=self.work_dir, plugin='Linear')
     self.assertDatasetCreated('decomposed_file.nii.gz', study.name)
Exemple #16
0
class TestDicomTagMatch(BaseTestCase):

    IMAGE_TYPE_TAG = ('0008', '0008')
    GRE_PATTERN = 'gre_field_mapping_3mm.*'
    PHASE_IMAGE_TYPE = ['ORIGINAL', 'PRIMARY', 'P', 'ND']
    MAG_IMAGE_TYPE = ['ORIGINAL', 'PRIMARY', 'M', 'ND', 'NORM']
    DICOM_MATCH = [
        DatasetMatch('gre_phase',
                     dicom_format,
                     GRE_PATTERN,
                     dicom_tags={IMAGE_TYPE_TAG: PHASE_IMAGE_TYPE},
                     is_regex=True),
        DatasetMatch('gre_mag',
                     dicom_format,
                     GRE_PATTERN,
                     dicom_tags={IMAGE_TYPE_TAG: MAG_IMAGE_TYPE},
                     is_regex=True)
    ]

    def test_dicom_match(self):
        study = self.create_study(TestMatchStudy,
                                  'test_dicom',
                                  inputs=self.DICOM_MATCH)
        phase = study.data('gre_phase')[0]
        mag = study.data('gre_mag')[0]
        self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase')
        self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag')

    def test_order_match(self):
        study = self.create_study(TestMatchStudy,
                                  'test_dicom',
                                  inputs=[
                                      DatasetMatch('gre_phase',
                                                   dicom_format,
                                                   pattern=self.GRE_PATTERN,
                                                   order=1,
                                                   is_regex=True),
                                      DatasetMatch('gre_mag',
                                                   dicom_format,
                                                   pattern=self.GRE_PATTERN,
                                                   order=0,
                                                   is_regex=True)
                                  ])
        phase = study.data('gre_phase')[0]
        mag = study.data('gre_mag')[0]
        self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase')
        self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag')
 def test_pipeline_prerequisites(self):
     study = self.create_study(ConversionStudy, 'conversion', [
         DatasetMatch('mrtrix', text_format, 'mrtrix'),
         DatasetMatch('nifti_gz', text_format, 'nifti_gz'),
         DatasetMatch('dicom', dicom_format, 't1_mprage_sag_p2_iso_1_ADNI'),
         DatasetMatch('directory', directory_format,
                      't1_mprage_sag_p2_iso_1_ADNI'),
         DatasetMatch('zip', zip_format, 'zip')
     ])
     study.data('nifti_gz_from_dicom')
     study.data('mrtrix_from_nifti_gz')
     study.data('nifti_from_mrtrix')
     study.data('directory_from_zip')
     study.data('zip_from_directory')
     self.assertDatasetCreated('nifti_gz_from_dicom.nii.gz', study.name)
     self.assertDatasetCreated('mrtrix_from_nifti_gz.mif', study.name)
     self.assertDatasetCreated('nifti_from_mrtrix.nii', study.name)
     self.assertDatasetCreated('directory_from_zip', study.name)
     self.assertDatasetCreated('zip_from_directory.zip', study.name)
Exemple #18
0
 def test_order_match(self):
     study = self.create_study(TestMatchStudy,
                               'test_dicom',
                               inputs=[
                                   DatasetMatch('gre_phase',
                                                dicom_format,
                                                pattern=self.GRE_PATTERN,
                                                order=1,
                                                is_regex=True),
                                   DatasetMatch('gre_mag',
                                                dicom_format,
                                                pattern=self.GRE_PATTERN,
                                                order=0,
                                                is_regex=True)
                               ])
     phase = study.data('gre_phase')[0]
     mag = study.data('gre_mag')[0]
     self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase')
     self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag')
 def test_dcm2niix(self):
     study = self.create_study(DummyStudy,
                               'concatenate',
                               inputs=[
                                   DatasetMatch('input_dataset',
                                                dicom_format,
                                                't2_tse_tra_p2_448')
                               ])
     study.data('output_dataset')[0]
     self.assertDatasetCreated('output_dataset.nii.gz', study.name)
Exemple #20
0
 def test_qsm_de_pipeline(self):
     study = self.create_study(
         T2StarStudy,
         'qsm',
         inputs=[DatasetMatch('coils', zip_format, 'swi_coils')])
     study.qsm_pipeline().run(work_dir=self.work_dir)
     for fname in ('qsm.nii.gz', 'tissue_phase.nii.gz',
                   'tissue_mask.nii.gz', 'qsm_mask.nii.gz'):
         self.assertDatasetCreated(dataset_name=fname,
                                   study_name=study.name)
Exemple #21
0
 def test_average_response(self):
     study = self.create_study(
         DiffusionStudy, 'response',
         {DatasetMatch('response', text_format, 'response')})
     study.average_response_pipeline().run(work_dir=self.work_dir)
     for subject_id in self.subject_ids:
         for visit_id in self.visit_ids(subject_id):
             self.assertDatasetCreated('avg_response.txt',
                                       study.name,
                                       subject=subject_id,
                                       visit=visit_id)
Exemple #22
0
 def test_intensity_normalization(self):
     study = self.create_study(DiffusionStudy, 'intens_norm', [
         DatasetMatch('bias_correct', nifti_gz_format, 'biascorrect'),
         DatasetMatch('brain_mask', nifti_gz_format, 'brainmask'),
         DatasetMatch('grad_dirs', fsl_bvecs_format, 'gradientdirs'),
         DatasetMatch('bvalues', fsl_bvals_format, 'bvalues')
     ])
     study.intensity_normalisation_pipeline().run(work_dir=self.work_dir)
     for subject_id in self.subject_ids:
         for visit_id in self.visit_ids(subject_id):
             self.assertDatasetCreated('norm_intensity.mif',
                                       study.name,
                                       subject=subject_id,
                                       visit=visit_id)
     self.assertDatasetCreated('norm_intens_fa_template.mif',
                               study.name,
                               frequency='per_project')
     self.assertDatasetCreated('norm_intens_wm_mask.mif',
                               study.name,
                               frequency='per_project')
Exemple #23
0
WORK_PATH = os.path.join('/scratch', 'dq13', 'aspree', 'qsm')
CACHE_PROJECT_PATH = os.path.join(WORK_PATH, 'project.pkl')
try:
    os.makedirs(WORK_PATH)
except OSError as e:
    if e.errno != errno.EEXIST:
        raise
session_ids_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                '..', 'resources',
                                'old_swi_coils_remaining.txt')
print(session_ids_path)
with open(session_ids_path) as f:
    ids = f.read().split()

PROJECT_ID = 'MRH017'
datasets = {DatasetMatch('coils', zip_format, 'swi_coils')}
visit_ids = visit_ids['MR01']

repository = XnatRepository(cache_dir='/scratch/dq13/xnat_cache3')

if args.cache_project:
    project = repository.project(PROJECT_ID,
                                 subject_ids=ids,
                                 visit_ids=visit_ids)
    with open(CACHE_PROJECT_PATH, 'w') as f:
        pkl.dump(project, f)
else:
    with open(CACHE_PROJECT_PATH) as f:
        project = pkl.load(f)

repository.cache(PROJECT_ID,
Exemple #24
0
repository_path = os.path.abspath(
    os.path.join(os.environ['HOME'], 'Data', 'MBI', 'noddi'))
BASE_WORK_PATH = os.path.abspath(
    os.path.join(os.environ['HOME'], 'Data', 'MBI', 'work'))

NODDI_PROJECT = 'pilot'
NODDI_SUBJECT = 'SUBJECT1'
NODDI_SESSION = 'SESSION1'
WORK_PATH = os.path.abspath(os.path.join(BASE_WORK_PATH, 'noddi'))
SESSION_DIR = os.path.join(repository_path, NODDI_PROJECT, NODDI_SUBJECT,
                           NODDI_SESSION)
DATASET_NAME = 'noddi'

shutil.rmtree(WORK_PATH, ignore_errors=True)
os.makedirs(WORK_PATH)
study = NODDIStudy(name=DATASET_NAME,
                   project_id=NODDI_PROJECT,
                   repository=LocalRepository(repository_path),
                   input_scans=[
                       DatasetMatch('low_b_dw_scan', mrtrix_format,
                                    'r_l_noddi_b700_30_directions'),
                       DatasetMatch('high_b_dw_scan', mrtrix_format,
                                    'r_l_noddi_b2000_60_directions'),
                       DatasetMatch('forward_rpe', mrtrix_format,
                                    'r_l_noddi_b0_6'),
                       DatasetMatch('reverse_rpe', mrtrix_format,
                                    'l_r_noddi_b0_6')
                   ])
study.noddi_fitting_pipeline().run(work_dir=WORK_PATH)
Exemple #25
0
def create_fmri_study_class(name,
                            t1,
                            epis,
                            epi_number,
                            echo_spacing,
                            fm_mag=None,
                            fm_phase=None,
                            run_regression=False):

    inputs = []
    dct = {}
    data_specs = []
    parameter_specs = []
    output_files = []
    distortion_correction = False

    if fm_mag and fm_phase:
        logger.info('Both magnitude and phase field map images provided. EPI '
                    'ditortion correction will be performed.')
        distortion_correction = True
    elif fm_mag or fm_phase:
        logger.info(
            'In order to perform EPI ditortion correction both magnitude '
            'and phase field map images must be provided.')
    else:
        logger.info(
            'No field map image provided. Distortion correction will not be'
            'performed.')

    study_specs = [SubStudySpec('t1', T1Study)]
    ref_spec = {'t1_brain': 'coreg_ref_brain'}
    inputs.append(
        DatasetMatch('t1_primary', dicom_format, t1, is_regex=True, order=0))
    epi_refspec = ref_spec.copy()
    epi_refspec.update({
        't1_wm_seg': 'coreg_ref_wmseg',
        't1_preproc': 'coreg_ref_preproc',
        'train_data': 'train_data'
    })
    study_specs.append(SubStudySpec('epi_0', FunctionalMRIStudy, epi_refspec))
    if epi_number > 1:
        epi_refspec.update({
            't1_wm_seg': 'coreg_ref_wmseg',
            't1_preproc': 'coreg_ref_preproc',
            'train_data': 'train_data',
            'epi_0_coreg_to_atlas_warp': 'coreg_to_atlas_warp',
            'epi_0_coreg_to_atlas_mat': 'coreg_to_atlas_mat'
        })
        study_specs.extend(
            SubStudySpec('epi_{}'.format(i), FunctionalMRIStudy, epi_refspec)
            for i in range(1, epi_number))

    for i in range(epi_number):
        inputs.append(
            DatasetMatch('epi_{}_primary'.format(i),
                         dicom_format,
                         epis,
                         order=i,
                         is_regex=True))
        parameter_specs.append(
            ParameterSpec('epi_{}_fugue_echo_spacing'.format(i), echo_spacing))

    if distortion_correction:
        inputs.extend(
            DatasetMatch('epi_{}_field_map_mag'.format(i),
                         dicom_format,
                         fm_mag,
                         dicom_tags={IMAGE_TYPE_TAG: MAG_IMAGE_TYPE},
                         is_regex=True,
                         order=0) for i in range(epi_number))
        inputs.extend(
            DatasetMatch('epi_{}_field_map_phase'.format(i),
                         dicom_format,
                         fm_phase,
                         dicom_tags={IMAGE_TYPE_TAG: PHASE_IMAGE_TYPE},
                         is_regex=True,
                         order=0) for i in range(epi_number))
    if run_regression:
        output_files.extend('epi_{}_smoothed_ts'.format(i)
                            for i in range(epi_number))
    else:
        output_files.extend('epi_{}_fix_dir'.format(i)
                            for i in range(epi_number))

    dct['add_sub_study_specs'] = study_specs
    dct['add_data_specs'] = data_specs
    dct['add_parameter_specs'] = parameter_specs
    dct['__metaclass__'] = MultiStudyMetaClass
    return (MultiStudyMetaClass(name, (FunctionalMRIMixin, ),
                                dct), inputs, output_files)