def test_preprocess(self): study = self.create_study(DwiStudy, 'preprocess', [ InputFilesets('magnitude', 'r_l_dwi_b700_30', mrtrix_image_format), InputFilesets('dwi_reference', 'l_r_dwi_b0_6', mrtrix_image_format) ]) preproc = list(study.data('mag_preproc'))[0] self.assertTrue(os.path.exists(preproc.path))
def test_freesurfer_pipeline(self): study = self.create_study( T1T2Study, 'freesurfer', inputs=[ InputFilesets('t1', 'mprage', nifti_gz_format), InputFilesets('t2', 'flair', nifti_gz_format)]) study.freesurfer_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('fs_recon_all.fs.zip')
def test_full_multi_study(self): study = self.create_study(FullMultiStudy, 'full', [ InputFilesets('a', 'ones', text_format), InputFilesets('b', 'ones', text_format), InputFilesets('c', 'ones', text_format) ], parameters=[Parameter('required_op', 'mul')]) d, e, f = study.data(('d', 'e', 'f'), subject_id='SUBJECT', visit_id='VISIT') self.assertContentsEqual(d, 2.0) self.assertContentsEqual(e, 3.0) self.assertContentsEqual(f, 6.0) # Test parameter values in MultiStudy self.assertEqual(study._get_parameter('p1').value, 100) self.assertEqual(study._get_parameter('p2').value, '200') self.assertEqual(study._get_parameter('p3').value, 300.0) self.assertEqual(study._get_parameter('q1').value, 150) self.assertEqual(study._get_parameter('q2').value, '250') self.assertEqual(study._get_parameter('required_op').value, 'mul') # Test parameter values in SubStudy ss1 = study.substudy('ss1') self.assertEqual(ss1._get_parameter('o1').value, 100) self.assertEqual(ss1._get_parameter('o2').value, '200') self.assertEqual(ss1._get_parameter('o3').value, 300.0) ss2 = study.substudy('ss2') self.assertEqual(ss2._get_parameter('o1').value, 150) self.assertEqual(ss2._get_parameter('o2').value, '250') self.assertEqual(ss2._get_parameter('o3').value, 300.0) self.assertEqual(ss2._get_parameter('product_op').value, 'mul')
def test_partial_multi_study(self): study = self.create_study( PartialMultiStudy, 'partial', [ InputFilesets('a', 'ones', text_format), InputFilesets('b', 'ones', text_format), InputFilesets('c', 'ones', text_format) ], parameters=[Parameter('ss2_product_op', 'mul')]) ss1_z = study.data('ss1_z', subject_id='SUBJECT', visit_id='VISIT') ss2_z = list(study.data('ss2_z'))[0] self.assertContentsEqual(ss1_z, 2.0) self.assertContentsEqual(study.data('ss2_y'), 3.0) self.assertContentsEqual(ss2_z, 6.0) # Test parameter values in MultiStudy self.assertEqual(study._get_parameter('p1').value, 1000) self.assertEqual(study._get_parameter('ss1_o2').value, '2') self.assertEqual(study._get_parameter('ss1_o3').value, 3.0) self.assertEqual(study._get_parameter('ss2_o2').value, '20') self.assertEqual(study._get_parameter('ss2_o3').value, 30.0) self.assertEqual(study._get_parameter('ss2_product_op').value, 'mul') # Test parameter values in SubStudy ss1 = study.substudy('ss1') self.assertEqual(ss1._get_parameter('o1').value, 1000) self.assertEqual(ss1._get_parameter('o2').value, '2') self.assertEqual(ss1._get_parameter('o3').value, 3.0) ss2 = study.substudy('ss2') self.assertEqual(ss2._get_parameter('o1').value, 1000) self.assertEqual(ss2._get_parameter('o2').value, '20') self.assertEqual(ss2._get_parameter('o3').value, 30.0) self.assertEqual(ss2._get_parameter('product_op').value, 'mul')
def test_t2_registration_pipeline(self): study = self.create_study( T1T2Study, 't2_registration', inputs=[ InputFilesets('t1', 'mprage', nifti_gz_format), InputFilesets('t2', 'flair', nifti_gz_format)]) study.t2_registration_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('t2_coreg.nii.gz', study.name)
def test_extract_b0(self): study = self.create_study(DwiStudy, 'extract_b0', [ InputFilesets('mag_preproc', 'mag_preproc', nifti_gz_format), InputFilesets('grad_dirs', 'gradient_dirs', fsl_bvecs_format), InputFilesets('bvalues', 'bvalues', fsl_bvals_format) ]) study.extract_b0_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('primary.nii.gz', study.name)
def test_bias_correct(self): study = self.create_study(DwiStudy, 'bias_correct', [ InputFilesets('mag_preproc', 'mag_preproc', nifti_gz_format), InputFilesets('grad_dirs', 'gradient_dirs', fsl_bvecs_format), InputFilesets('bvalues', 'bvalues', fsl_bvals_format) ]) study.bias_correct_pipeline(mask_tool='mrtrix').run( work_dir=self.work_dir) self.assertFilesetCreated('bias_correct.nii.gz', study.name)
def test_registration_to_matrix(self): study = self.create_study( CoregisteredToMatrixStudy, 'registration_to_matrix', { InputFilesets('to_register', 'flair', nifti_gz_format), InputFilesets('reference', 'mprage', nifti_gz_format), InputFilesets('matrix', 'matrix', text_matrix_format) }) study.linear_registration_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('registered.nii.gz', study.name)
def test_response(self): study = self.create_study(DwiStudy, 'response', [ InputFilesets('bias_correct', 'bias_correct', nifti_gz_format), InputFilesets('brain_mask', 'brain_mask', nifti_gz_format), InputFilesets('grad_dirs', 'gradient_dirs', fsl_bvecs_format), InputFilesets('bvalues', 'bvalues', fsl_bvals_format) ]) study.response_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('response.txt', study.name)
def test_tensor(self): study = self.create_study(DwiStudy, 'tensor', [ InputFilesets('bias_correct', 'bias_correct', nifti_gz_format), InputFilesets('brain_mask', 'brain_mask', nifti_gz_format), InputFilesets('grad_dirs', 'gradient_dirs', fsl_bvecs_format), InputFilesets('bvalues', 'bvalues', fsl_bvals_format) ]) study.tensor_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('tensor.nii.gz', study.name)
def test_format_conversions(self): study = self.create_study(ConversionStudy, 'conversion', [ InputFilesets('text', 'text', text_format), InputFilesets('directory', 'directory', directory_format), InputFilesets('zip', 'zip', zip_format) ]) self.assertCreated(list(study.data('text_from_text'))[0]) self.assertCreated(list(study.data('directory_from_zip_on_input'))[0]) self.assertCreated(list(study.data('zip_from_directory_on_input'))[0]) self.assertCreated(list(study.data('directory_from_zip_on_output'))[0]) self.assertCreated(list(study.data('zip_from_directory_on_output'))[0])
def test_suvr(self): study = self.create_study(StaticPetStudy, 'suvr', inputs=[ InputFilesets('registered_volume', 'suvr_registered_volume', nifti_gz_format), InputFilesets('base_mask', 'cerebellum_mask', nifti_gz_format) ]) study.suvr_pipeline().run(work_dir=self.work_dir, plugin='Linear') self.assertFilesetCreated('SUVR_image.nii.gz', study.name)
def test_brain_extraction_pipelines(self): study = self.create_study( T1T2Study, 'brain_mask', inputs=[ InputFilesets('t1', 'mprage', nifti_gz_format), InputFilesets('t2', 'flair', nifti_gz_format), InputFilesets('manual_wmh_mask', nifti_gz_format, 'manual_wmh_mask')]) study.t1_brain_extraction_pipeline().run(work_dir=self.work_dir) study.manual_wmh_mask_registration_pipeline().run( work_dir=self.work_dir) for fname in ('t1_brain.nii.gz', 't2_brain.nii.gz', 'brain_mask.nii.gz', 'manual_wmh_mask_coreg.nii.gz'): self.assertFilesetCreated(fname, study.name)
def test_registration(self): study = self.create_study(CoregisteredStudy, 'registration', inputs=[ InputFilesets('to_register', 'flair', nifti_gz_format), InputFilesets('reference', 'mprage', nifti_gz_format) ]) pipeline = study.linear_registration_pipeline() pipeline.run(work_dir=self.work_dir) self.assertFilesetCreated('registered.nii.gz', study.name) self.assertFilesetCreated('matrix.mat', study.name)
def test_concatenate(self): study = self.create_study(NODDIStudy, 'concatenate', inputs=[ InputFilesets('low_b_dw_scan', 'r_l_dwi_b700_30', mrtrix_image_format), InputFilesets('high_b_dw_scan', 'r_l_dwi_b2000_60', mrtrix_image_format) ]) study.concatenate_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('dwi_scan.mif', study.name)
def test_repository_roundtrip(self): study = DummyStudy(self.STUDY_NAME, self.repository, processor=SingleProc('a_dir'), inputs=[ InputFilesets('source1', 'source1', text_format), InputFilesets('source2', 'source2', text_format), InputFilesets('source3', 'source3', text_format), InputFilesets('source4', 'source4', text_format) ]) # TODO: Should test out other file formats as well. source_files = ('source1', 'source2', 'source3', 'source4') sink_files = ('sink1', 'sink3', 'sink4') inputnode = pe.Node(IdentityInterface(['subject_id', 'visit_id']), 'inputnode') inputnode.inputs.subject_id = self.SUBJECT inputnode.inputs.visit_id = self.VISIT source = pe.Node(RepositorySource( study.bound_spec(f).collection for f in source_files), name='source') dummy_pipeline = study.dummy_pipeline() dummy_pipeline.cap() sink = pe.Node(RepositorySink((study.bound_spec(f).collection for f in sink_files), dummy_pipeline), name='sink') sink.inputs.name = 'repository_sink' sink.inputs.desc = ( "A test session created by repository roundtrip unittest") # Create workflow connecting them together workflow = pe.Workflow('source_sink_unit_test', base_dir=self.work_dir) workflow.add_nodes((source, sink)) workflow.connect(inputnode, 'subject_id', source, 'subject_id') workflow.connect(inputnode, 'visit_id', source, 'visit_id') workflow.connect(inputnode, 'subject_id', sink, 'subject_id') workflow.connect(inputnode, 'visit_id', sink, 'visit_id') for source_name in source_files: if not source_name.endswith('2'): sink_name = source_name.replace('source', 'sink') workflow.connect(source, source_name + PATH_SUFFIX, sink, sink_name + PATH_SUFFIX) workflow.run() # Check local directory was created properly outputs = [ f for f in sorted( os.listdir(self.get_session_dir(from_study=self.STUDY_NAME))) if f not in (BasicRepo.FIELDS_FNAME, BasicRepo.PROV_DIR) ] self.assertEqual(outputs, ['sink1.txt', 'sink3.txt', 'sink4.txt'])
class TestDicomTagMatch(BaseTestCase): IMAGE_TYPE_TAG = ('0008', '0008') GRE_PATTERN = 'gre_field_mapping_3mm.*' PHASE_IMAGE_TYPE = ['ORIGINAL', 'PRIMARY', 'P', 'ND'] MAG_IMAGE_TYPE = ['ORIGINAL', 'PRIMARY', 'M', 'ND', 'NORM'] DICOM_MATCH = [ InputFilesets('gre_phase', GRE_PATTERN, dicom_format, dicom_tags={IMAGE_TYPE_TAG: PHASE_IMAGE_TYPE}, is_regex=True), InputFilesets('gre_mag', GRE_PATTERN, dicom_format, dicom_tags={IMAGE_TYPE_TAG: MAG_IMAGE_TYPE}, is_regex=True) ] INPUTS_FROM_REF_DIR = True REF_FORMATS = [dicom_format] def test_dicom_match(self): study = self.create_study(TestMatchStudy, 'test_dicom', inputs=self.DICOM_MATCH) phase = list(study.data('gre_phase'))[0] mag = list(study.data('gre_mag'))[0] self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase') self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag') def test_order_match(self): study = self.create_study(TestMatchStudy, 'test_dicom', inputs=[ InputFilesets('gre_phase', pattern=self.GRE_PATTERN, valid_formats=dicom_format, order=1, is_regex=True), InputFilesets('gre_mag', pattern=self.GRE_PATTERN, valid_formats=dicom_format, order=0, is_regex=True) ]) phase = list(study.data('gre_phase'))[0] mag = list(study.data('gre_mag'))[0] self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase') self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag')
def test_id_match(self): study = test_data.TestMatchStudy( name='test_dicom', repository=XnatRepo( project_id=self.project, server=SERVER, cache_dir=tempfile.mkdtemp()), processor=SingleProc(self.work_dir), inputs=[ InputFilesets('gre_phase', valid_formats=dicom_format, id=7), InputFilesets('gre_mag', valid_formats=dicom_format, id=6)]) phase = list(study.data('gre_phase'))[0] mag = list(study.data('gre_mag'))[0] self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase') self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag')
def test_ute(self): study = self.create_study( UteStudy, 'pipeline', { InputFilesets('ute_echo1', 'ute_echo1', dicom_format), InputFilesets('ute_echo2', 'ute_echo2', dicom_format), InputFilesets('umap_ute', 'umap_ute', dicom_format)}) study.conversion_to_dicom_pipeline().run(work_dir=self.work_dir) self.assertFilesetCreated('sute_cont_dicoms', study.name) self.assertFilesetCreated('sute_fix_dicoms', study.name)
def test_reg(self): study = self.create_study( DynamicPetStudy, 'reg', inputs=[ InputFilesets('pet_volumes', 'pet_image', nifti_gz_format)]) study.ICA_pipeline().run(work_dir=self.work_dir, plugin='Linear') self.assertFilesetCreated('decomposed_file.nii.gz', study.name)
def test_fields_roundtrip(self): repository = XnatRepo( server=SERVER, cache_dir=self.cache_dir, project_id=self.project) study = DummyStudy( self.STUDY_NAME, repository, processor=SingleProc('a_dir'), inputs=[InputFilesets('source1', 'source1', text_format)]) fields = ['field{}'.format(i) for i in range(1, 4)] dummy_pipeline = study.dummy_pipeline() dummy_pipeline.cap() sink = pe.Node( RepositorySink( (study.bound_spec(f).collection for f in fields), dummy_pipeline), name='fields_sink') sink.inputs.field1_field = field1 = 1 sink.inputs.field2_field = field2 = 2.0 sink.inputs.field3_field = field3 = str('3') sink.inputs.subject_id = self.SUBJECT sink.inputs.visit_id = self.VISIT sink.inputs.desc = "Test sink of fields" sink.inputs.name = 'test_sink' sink.run() source = pe.Node( RepositorySource( study.bound_spec(f).collection for f in fields), name='fields_source') source.inputs.visit_id = self.VISIT source.inputs.subject_id = self.SUBJECT source.inputs.desc = "Test source of fields" source.inputs.name = 'test_source' results = source.run() self.assertEqual(results.outputs.field1_field, field1) self.assertEqual(results.outputs.field2_field, field2) self.assertEqual(results.outputs.field3_field, field3)
def test_pipeline_prerequisites(self): study = self.create_study( ConversionStudy, 'conversion', [ InputFilesets('mrtrix', 'mrtrix', text_format), InputFilesets('nifti_gz', text_format, 'nifti_gz'), InputFilesets('dicom', dicom_format, 't1_mprage_sag_p2_iso_1_ADNI'), InputFilesets('directory', directory_format, 't1_mprage_sag_p2_iso_1_ADNI'), InputFilesets('zip', 'zip', zip_format)]) self.assertFilesetCreated( next(iter(study.data('nifti_gz_from_dicom')))) self.assertFilesetCreated( next(iter(study.data('mrtrix_from_nifti_gz')))) self.assertFilesetCreated(next(iter(study.data('nifti_from_mrtrix')))) self.assertFilesetCreated(next(iter(study.data('directory_from_zip')))) self.assertFilesetCreated(next(iter(study.data('zip_from_directory'))))
def test_scan_label_quality(self): tmp_dir = tempfile.mkdtemp() repository = XnatRepo( project_id=self.project, server=SERVER, cache_dir=tmp_dir) tree = repository.tree( subject_ids=[self.SUBJECT], visit_ids=[self.VISIT]) for accepted, expected in ( (None, '1unusable'), ((None, 'questionable', 'usable'), '2unlabelled'), (('questionable', 'usable'), '3questionable'), ('usable', '4usable')): inpt = InputFilesets('dummy', order=0, valid_formats=text_format, acceptable_quality=accepted) matched = inpt.match(tree).item(subject_id=self.SUBJECT, visit_id=self.VISIT) self.assertEqual(matched.name, expected)
def test_derivable(self): # Test vanilla study study = self.create_study(TestDerivableStudy, 'study', inputs={'required': 'required'}) self.assertTrue(study.spec('derivable').derivable) self.assertTrue(study.spec('another_derivable').derivable) self.assertFalse(study.spec('missing_input').derivable) self.assertFalse(study.spec('requires_switch').derivable) self.assertFalse(study.spec('requires_switch2').derivable) self.assertTrue(study.spec('requires_foo').derivable) self.assertFalse(study.spec('requires_bar').derivable) # Test study with 'switch' enabled study_with_switch = self.create_study( TestDerivableStudy, 'study_with_switch', inputs=[InputFilesets('required', 'required', text_format)], parameters={'switch': True}) self.assertTrue(study_with_switch.spec('requires_switch').derivable) self.assertTrue(study_with_switch.spec('requires_switch2').derivable) # Test study with branch=='bar' study_bar_branch = self.create_study( TestDerivableStudy, 'study_bar_branch', inputs=[InputFilesets('required', 'required', text_format)], parameters={'branch': 'bar'}) self.assertFalse(study_bar_branch.spec('requires_foo').derivable) self.assertTrue(study_bar_branch.spec('requires_bar').derivable) # Test study with optional input study_with_input = self.create_study( TestDerivableStudy, 'study_with_inputs', inputs=[ InputFilesets('required', 'required', text_format), InputFilesets('optional', 'required', text_format) ]) self.assertTrue(study_with_input.spec('missing_input').derivable) study_unhandled = self.create_study( TestDerivableStudy, 'study_unhandled', inputs=[InputFilesets('required', 'required', text_format)], parameters={'branch': 'wee'}) self.assertRaises(ArcanaDesignError, getattr, study_unhandled.spec('requires_foo'), 'derivable')
def test_module_load_in_map(self): study = self.create_study(RequirementsStudy, 'requirements', [InputFilesets('ones', 'ones', text_format)], environment=ModulesEnv()) threes = study.data('threes') fours = study.data('fours') self.assertEqual(next(iter(threes)).value, 3) self.assertEqual(next(iter(fours)).value, 4) self.assertEqual(ModulesEnv.loaded(), {})
def test_order_match(self): study = self.create_study(TestMatchStudy, 'test_dicom', inputs=[ InputFilesets('gre_phase', pattern=self.GRE_PATTERN, valid_formats=dicom_format, order=1, is_regex=True), InputFilesets('gre_mag', pattern=self.GRE_PATTERN, valid_formats=dicom_format, order=0, is_regex=True) ]) phase = list(study.data('gre_phase'))[0] mag = list(study.data('gre_mag'))[0] self.assertEqual(phase.name, 'gre_field_mapping_3mm_phase') self.assertEqual(mag.name, 'gre_field_mapping_3mm_mag')
def test_multi_multi_study(self): study = self.create_study( MultiMultiStudy, 'multi_multi', [ InputFilesets('ss1_x', 'ones', text_format), InputFilesets('ss1_y', 'ones', text_format), InputFilesets('full_a', 'ones', text_format), InputFilesets('full_b', 'ones', text_format), InputFilesets('full_c', 'ones', text_format), InputFilesets('partial_a', 'ones', text_format), InputFilesets('partial_b', 'ones', text_format), InputFilesets('partial_c', 'ones', text_format) ], parameters=[ Parameter('full_required_op', 'mul'), Parameter('partial_ss2_product_op', 'mul') ]) self.assertContentsEqual(study.data('g'), 11.0) # Test parameter values in MultiStudy self.assertEqual(study._get_parameter('full_p1').value, 100) self.assertEqual(study._get_parameter('full_p2').value, '200') self.assertEqual(study._get_parameter('full_p3').value, 300.0) self.assertEqual(study._get_parameter('full_q1').value, 150) self.assertEqual(study._get_parameter('full_q2').value, '250') self.assertEqual(study._get_parameter('full_required_op').value, 'mul') # Test parameter values in SubStudy ss1 = study.substudy('full').substudy('ss1') self.assertEqual(ss1._get_parameter('o1').value, 100) self.assertEqual(ss1._get_parameter('o2').value, '200') self.assertEqual(ss1._get_parameter('o3').value, 300.0) ss2 = study.substudy('full').substudy('ss2') self.assertEqual(ss2._get_parameter('o1').value, 150) self.assertEqual(ss2._get_parameter('o2').value, '250') self.assertEqual(ss2._get_parameter('o3').value, 300.0) self.assertEqual(ss2._get_parameter('product_op').value, 'mul') # Test parameter values in MultiStudy self.assertEqual(study._get_parameter('partial_p1').value, 1000) self.assertEqual(study._get_parameter('partial_ss1_o2').value, '2') self.assertEqual(study._get_parameter('partial_ss1_o3').value, 3.0) self.assertEqual(study._get_parameter('partial_ss2_o2').value, '20') self.assertEqual(study._get_parameter('partial_ss2_o3').value, 30.0) self.assertEqual( study._get_parameter('partial_ss2_product_op').value, 'mul') # Test parameter values in SubStudy ss1 = study.substudy('partial').substudy('ss1') self.assertEqual(ss1._get_parameter('o1').value, 1000) self.assertEqual(ss1._get_parameter('o2').value, '2') self.assertEqual(ss1._get_parameter('o3').value, 3.0) ss2 = study.substudy('partial').substudy('ss2') self.assertEqual(ss2._get_parameter('o1').value, 1000) self.assertEqual(ss2._get_parameter('o2').value, '20') self.assertEqual(ss2._get_parameter('o3').value, 30.0) self.assertEqual(ss2._get_parameter('product_op').value, 'mul')
def test_intensity_normalization(self): study = self.create_study(DwiStudy, 'intens_norm', [ InputFilesets('bias_correct', 'biascorrect', nifti_gz_format), InputFilesets('brain_mask', 'brainmask', nifti_gz_format), InputFilesets('grad_dirs', 'gradientdirs', fsl_bvecs_format), InputFilesets('bvalues', 'bvalues', fsl_bvals_format) ]) study.intensity_normalisation_pipeline().run(work_dir=self.work_dir) for subject_id in self.subject_ids: for visit_id in self.visit_ids(subject_id): self.assertFilesetCreated('norm_intensity.mif', study.name, subject=subject_id, visit=visit_id) self.assertFilesetCreated('norm_intens_fa_template.mif', study.name, frequency='per_study') self.assertFilesetCreated('norm_intens_wm_mask.mif', study.name, frequency='per_study')
def test_average_response(self): study = self.create_study( DwiStudy, 'response', {InputFilesets('response', 'response', text_format)}) study.average_response_pipeline().run(work_dir=self.work_dir) for subject_id in self.subject_ids: for visit_id in self.visit_ids(subject_id): self.assertFilesetCreated('avg_response.txt', study.name, subject=subject_id, visit=visit_id)
def test_dcm2niix(self): study = self.create_study(DummyStudy, 'concatenate', environment=environment, inputs=[ InputFilesets('input_fileset', dicom_format, 't2_tse_tra_p2_448') ]) list(study.data('output_fileset'))[0] self.assertFilesetCreated('output_fileset.nii.gz', study.name)