Exemplo n.º 1
0
 def from_path(cls, path, frequency='per_session', format=None,  # @ReservedAssignment @IgnorePep8
               **kwargs):
     if not os.path.exists(path):
         raise ArcanaUsageError(
             "Attempting to read Fileset from path '{}' but it "
             "does not exist".format(path))
     if os.path.isdir(path):
         within_exts = frozenset(
             split_extension(f)[1] for f in os.listdir(path)
             if not f.startswith('.'))
         if format is None:
             # Try to guess format
             try:
                 format = FileFormat.by_within_dir_exts(within_exts)  # @ReservedAssignment @IgnorePep8
             except ArcanaFileFormatNotRegisteredError:
                 # Fall back to general directory format
                 format = directory_format  # @ReservedAssignment
         name = os.path.basename(path)
     else:
         filename = os.path.basename(path)
         name, ext = split_extension(filename)
         if format is None:
             try:
                 format = FileFormat.by_ext(ext)  # @ReservedAssignment @IgnorePep8
             except ArcanaFileFormatNotRegisteredError as e:
                 raise ArcanaFileFormatNotRegisteredError(
                     str(e) + ", which is required to identify the "
                     "format of the fileset at '{}'".format(path))
     return cls(name, format, frequency=frequency,
                path=path, **kwargs)
Exemplo n.º 2
0
Arquivo: bids.py Projeto: amrka/banana
 def __init__(
         self,
         path,
         type,
         subject_id,
         visit_id,
         repository,  # @ReservedAssignment @IgnorePep8
         modality=None,
         task=None,
         checksums=None):
     Fileset.__init__(self,
                      name=op.basename(path),
                      format=FileFormat.by_ext(split_extension(path)[1]),
                      frequency='per_session',
                      path=path,
                      subject_id=subject_id,
                      visit_id=visit_id,
                      repository=repository,
                      checksums=checksums)
     BaseBidsFileset.__init__(self, type, modality, task)
Exemplo n.º 3
0
analyze_format.set_converter(nifti_format, MrtrixConverter)
analyze_format.set_converter(nifti_gz_format, MrtrixConverter)
analyze_format.set_converter(mrtrix_image_format, MrtrixConverter)

mrtrix_image_format.set_converter(dicom_format, MrtrixConverter)
mrtrix_image_format.set_converter(nifti_format, MrtrixConverter)
mrtrix_image_format.set_converter(nifti_gz_format, MrtrixConverter)
mrtrix_image_format.set_converter(analyze_format, MrtrixConverter)

STD_IMAGE_FORMATS = [
    dicom_format, nifti_format, nifti_gz_format, nifti_gz_x_format,
    analyze_format, mrtrix_image_format
]

multi_nifti_gz_format = FileFormat(name='multi_nifti_gz',
                                   extension=None,
                                   directory=True,
                                   within_dir_exts=['.nii.gz'])
multi_nifti_gz_format.set_converter(zip_format, UnzipConverter)
multi_nifti_gz_format.set_converter(targz_format, UnTarGzConverter)

# Tractography formats
mrtrix_track_format = FileFormat(name='mrtrix_track', extension='.tck')

# Tabular formats
rfile_format = FileFormat(name='rdata', extension='.RData')
tsv_format = FileFormat(name='tab_separated', extension='.tsv')
# matlab_format = FileFormat(name='matlab', extension='.mat')
csv_format = FileFormat(name='comma_separated', extension='.csv')
text_matrix_format = FileFormat(name='text_matrix', extension='.mat')

# Diffusion gradient-table data formats
Exemplo n.º 4
0
        tree = self.repository.tree()
        for subj_id, visits in self.PROJECT_STRUCTURE.items():
            for visit_id in visits:
                session = tree.subject(subj_id).session(visit_id)
                fileset = session.fileset('thousand', study=self.STUDY_NAME)
                self.assertContentsEqual(fileset, targets[subj_id][visit_id],
                                         "{}:{}".format(subj_id, visit_id))
                if subj_id == 'subject1' and visit_id == 'visit3':
                    self.assertNotIn(
                        'ten', [d.name for d in session.filesets],
                        "'ten' should not be generated for "
                        "subject1:visit3 as hundred and thousand are "
                        "already present")


test1_format = FileFormat('test1', extension='.t1')
test2_format = FileFormat('test2',
                          extension='.t2',
                          converters={'test1': IdentityConverter})
test3_format = FileFormat('test3', extension='.t3')

FileFormat.register(test1_format)
FileFormat.register(test2_format)
FileFormat.register(test3_format)


class TestInputValidationStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        AcquiredFilesetSpec('a', (test1_format, test2_format)),
        AcquiredFilesetSpec('b', test3_format),
Exemplo n.º 5
0
nifti_gz_format.set_converter(nifti_gz_x_format, IdentityConverter)

analyze_format.set_converter(dicom_format, MrtrixConverter)
analyze_format.set_converter(nifti_format, MrtrixConverter)
analyze_format.set_converter(nifti_gz_format, MrtrixConverter)
analyze_format.set_converter(mrtrix_image_format, MrtrixConverter)

mrtrix_image_format.set_converter(dicom_format, MrtrixConverter)
mrtrix_image_format.set_converter(nifti_format, MrtrixConverter)
mrtrix_image_format.set_converter(nifti_gz_format, MrtrixConverter)
mrtrix_image_format.set_converter(analyze_format, MrtrixConverter)

STD_IMAGE_FORMATS = [dicom_format, nifti_format, nifti_gz_format,
                     nifti_gz_x_format, analyze_format, mrtrix_image_format]

multi_nifti_gz_format = FileFormat(name='multi_nifti_gz', extension=None,
                                   directory=True, within_dir_exts=['.nii.gz'])
multi_nifti_gz_format.set_converter(zip_format, UnzipConverter)
multi_nifti_gz_format.set_converter(targz_format, UnTarGzConverter)

# Tractography formats
mrtrix_track_format = FileFormat(name='mrtrix_track', extension='.tck')

# Tabular formats
rfile_format = FileFormat(name='rdata', extension='.RData')
tsv_format = FileFormat(name='tab_separated', extension='.tsv')
# matlab_format = FileFormat(name='matlab', extension='.mat')
csv_format = FileFormat(name='comma_separated', extension='.csv')
text_matrix_format = FileFormat(name='text_matrix', extension='.mat')

# Diffusion gradient-table data formats
fsl_bvecs_format = FileFormat(name='fsl_bvecs', extension='.bvec')
Exemplo n.º 6
0
import os
import os.path as op
from arcana.data.file_format import text_format
from arcana.analysis import Analysis, AnalysisMetaClass
from arcana.data import (
    Fileset, InputFilesetSpec, FilesetSpec, Field)
from arcana.utils.testing import BaseMultiSubjectTestCase
from arcana.repository import Tree
from future.utils import with_metaclass
from arcana.utils.testing import BaseTestCase
from arcana.data.file_format import FileFormat


# A dummy format that contains a header
with_header_format = FileFormat(name='with_header', extension='.whf',
                                aux_files={'header': '.hdr'})


class DummyAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('source1', text_format),
        InputFilesetSpec('source2', text_format),
        InputFilesetSpec('source3', text_format),
        InputFilesetSpec('source4', text_format,
                         optional=True),
        FilesetSpec('sink1', text_format, 'dummy_pipeline'),
        FilesetSpec('sink3', text_format, 'dummy_pipeline'),
        FilesetSpec('sink4', text_format, 'dummy_pipeline'),
        FilesetSpec('subject_sink', text_format, 'dummy_pipeline',
                    frequency='per_subject'),
Exemplo n.º 7
0
# Import TestExistingPrereqs study to test it on XNAT
sys.path.insert(0, op.join(op.dirname(__file__), '..', 'study'))
import test_study  # @UnresolvedImport @IgnorePep8

sys.path.pop(0)

# Import test_local to run TestProjectInfo on XNAT using TestOnXnat mixin
sys.path.insert(0, op.join(op.dirname(__file__)))
import test_directory  # @UnresolvedImport @IgnorePep8

sys.path.pop(0)

logger = logging.getLogger('arcana')

dicom_format = FileFormat(name='dicom',
                          extension=None,
                          directory=True,
                          within_dir_exts=['.dcm'])

try:
    SERVER = os.environ['ARCANA_TEST_XNAT']
except KeyError:
    SERVER = None

SKIP_ARGS = (SERVER is None, "Skipping as ARCANA_TEST_XNAT env var not set")


class DummyStudy(with_metaclass(StudyMetaClass, Study)):

    add_data_specs = [
        AcquiredFilesetSpec('source1', text_format),
        AcquiredFilesetSpec('source2', text_format, optional=True),
Exemplo n.º 8
0

def dicom_header_loader(path):
    dcm_files = [f for f in os.listdir(path) if f.endswith('.dcm')]
    return pydicom.dcmread(op.join(path, dcm_files[0]))


# =====================================================================
# All Data Formats
# =====================================================================

# NeuroImaging data formats
dicom_format = FileFormat(name='dicom',
                          extension=None,
                          directory=True,
                          within_dir_exts=['.dcm'],
                          alternate_names=['secondary'],
                          array_loader=dicom_array_loader,
                          header_loader=dicom_header_loader)
nifti_format = FileFormat(name='nifti',
                          extension='.nii',
                          converters={
                              'dicom': Dcm2niixConverter,
                              'analyze': MrtrixConverter,
                              'nifti_gz': MrtrixConverter,
                              'mrtrix': MrtrixConverter
                          },
                          array_loader=nifti_array_loader,
                          header_loader=nifti_header_loader)
nifti_gz_format = FileFormat(name='nifti_gz',
                             extension='.nii.gz',
Exemplo n.º 9
0
                'visit2': 1110.0,
                'visit3': 1000.0
            }
        }
        tree = self.dataset.tree
        for subj_id, visits in self.PROJECT_STRUCTURE.items():
            for visit_id in visits:
                session = tree.subject(subj_id).session(visit_id)
                fileset = session.fileset('thousand',
                                          from_analysis=self.STUDY_NAME)
                fileset.format = text_format
                self.assertContentsEqual(fileset, targets[subj_id][visit_id],
                                         "{}:{}".format(subj_id, visit_id))


test1_format = FileFormat('test1', extension='.t1')
test2_format = FileFormat('test2', extension='.t2')
test3_format = FileFormat('test3', extension='.t3')

test2_format.set_converter(test1_format, IdentityConverter)


class TestInputValidationAnalysis(with_metaclass(AnalysisMetaClass, Analysis)):

    add_data_specs = [
        InputFilesetSpec('a', (test1_format, test2_format)),
        InputFilesetSpec('b', test3_format),
        FilesetSpec('c', test2_format, 'identity_pipeline'),
        FilesetSpec('d', test3_format, 'identity_pipeline')
    ]