예제 #1
1
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_read_raw_header(self):
        expected_header = {u'type': 'float', u'dimension': 3, u'min': 0, u'max': 35.4}
        header = nrrd.read_header(('NRRD0005', 'type: float', 'dimension: 3', 'min: 0', 'max: 35.4'))
        self.assertEqual(expected_header, header)

        expected_header = {u'my extra info': u'my : colon-separated : values'}
        header = nrrd.read_header(('NRRD0005', 'my extra info:=my : colon-separated : values'))
        np.testing.assert_equal(expected_header, header)
예제 #2
0
    def test_invalid_custom_field(self):
        custom_field_map = {'int': 'fake'}

        with self.assertRaisesRegex(nrrd.NRRDError,
                                    'Invalid field type given: fake'):
            nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH,
                             custom_field_map)
예제 #3
0
 def test_invalid_magic_line2(self):
     with self.assertRaisesRegex(
             nrrd.NRRDError,
             'Unsupported NRRD file version \\(version: 2000\\). This library '
             'only supports v5 and below.'):
         nrrd.read_header(
             ('NRRD2000', 'my extra info:=my : colon-separated : values'))
예제 #4
0
    def test_read_raw_header(self):
        expected_header = {u'type': 'float', u'dimension': 3, u'keyvaluepairs': {}}
        header = nrrd.read_header(('NRRD0005', 'type: float', 'dimension: 3'))
        self.assertEqual(expected_header, header)

        expected_header = {u'keyvaluepairs': {u'my extra info': u'my : colon-separated : values'}}
        header = nrrd.read_header(('NRRD0005', 'my extra info:=my : colon-separated : values'))
        np.testing.assert_equal(expected_header, header)
예제 #5
0
    def test_read_raw_header(self):
        expected_header = {u'type': 'float', u'dimension': 3, u'keyvaluepairs': {}}
        header = nrrd.read_header(("NRRD0005", "type: float", "dimension: 3"))
        self.assertEqual(expected_header, header)

        expected_header = {u'keyvaluepairs': {u'my extra info': u'my : colon-separated : values'}}
        header = nrrd.read_header(("NRRD0005", "my extra info:=my : colon-separated : values"))
        self.assertEqual(expected_header, header)
예제 #6
0
    def __init__(self,
                 root: str,
                 transform=None,
                 size: Tuple[Union[int, None], Union[int, None],
                             Union[int, None]] = (512, 512, None),
                 spacing: Union[Tuple[float, float, float], None] = None,
                 ext: str = '.nrrd'):
        '''size: any scan not compatible with the specified size will be discarded.
        Insert None for a dim that can be any size'''

        root_path = Path(root)

        self.transform = transform

        scans = np.array(list(map(str, Path(root).glob(f'**/*{ext}'))))
        scan_sizes = np.array(
            [nrrd.read_header(str(scan_path))['sizes'] for scan_path in scans])
        # I know we're reading the scans twice, but I get some strange Nones when I try
        # to do it in one go.
        # types = np.array([nrrd.read_header(str(scan_path))['type'] for scan_path in scans])

        if spacing is not None:
            spacings = np.array([
                nrrd.read_header(
                    str(scan_path))['space directions'][np.diag_indices(3)]
                for scan_path in scans
            ])
            faulty_spacing = np.where(
                ~np.isclose(spacings, spacing, atol=1e-3).all(axis=1))[0]
        else:
            faulty_spacing = []

        if len(faulty_spacing):
            print(
                f"Found {len(faulty_spacing)} scans where their spacing doesn't match the input spacing {spacing}. Ignoring scans {scans[faulty_spacing]}"
            )

        faulty_sizes = np.unique(
            np.where(~(scan_sizes == size).T[np.where(size)[0]])[1])
        if len(faulty_sizes):
            print(
                f"Found {len(faulty_sizes)} scans where their size doesn't match the input size {size}. Ignoring scans {scans[faulty_sizes]}"
            )

        faulty_idx = np.unique(np.append(faulty_sizes, faulty_spacing))

        self.scans = np.delete(scans, faulty_idx)
        self.scan_sizes = np.delete(scan_sizes, faulty_idx, axis=0)
예제 #7
0
 def test_read_detached_header_only(self):
     header = None
     expected_header = self.expected_header
     expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
     with open(RAW_NHDR_FILE_PATH, 'rb') as f:
         header = nrrd.read_header(f)
     self.assertEqual(self.expected_header, header)
예제 #8
0
def convert(data_idx):
    data = nib.load(os.path.join(
        args.nifti_dir,
        data_idx + '.nii.gz'
    )).get_data()
    header = nrrd.read_header(os.path.join(
        args.nrrd_dir,
        data_idx,
        'img.nrrd'
    ))
    zoom = tuple(
        (s1 / s2) for s1, s2
        in zip(header['sizes'], data.shape)
    )
    data = ndimage.zoom(
        data,
        zoom,
        order=0,
        mode='nearest'
    )
    assert data.shape == tuple(header['sizes'])
    nrrd.write(
        os.path.join(
            args.output_dir,
            data_idx + '.nrrd'
        ),
        data,
        header=header
    )
예제 #9
0
파일: SliceGenerator.py 프로젝트: mpi2/iev
    def __init__(self, recon):
        """The constructor takes a recon path as an argument, and calls `unu head` on the NRRD file to write the
        header as separate file. The header is then parsed to extract the datatype, dimensions and encoding.
        This size of the header file (.hdr) determines an offset, which allows the remaining raw portion of the file to
        be memory mapped using numpy.

        :param recon: a path to a NRRD file.
        """
        super(NrrdSliceGenerator, self).__init__(recon)
        self.ext = 'nrrd'

        if os.path.exists(recon):

            try:

                # Parse the header using nrrd.py
                with open(recon, 'rb') as f:
                    self.header = nrrd.read_header(f)
                    self.dims = self.header['sizes']
                    self.datatype = self.header['type']

                # Pipe the raw data to a separate file using "unu data"
                raw_data = tempfile.TemporaryFile(mode='wb+')
                sp.check_call(['unu', 'data', recon], stdout=raw_data)

            except Exception:
                raise ReconFormatError("Error opening file as NRRD")

        else:
            raise IOError("Failed to locate '{}'".format(recon))

        self.raw = np.memmap(raw_data, dtype=self.datatype, mode='r', shape=tuple(self.dims), order='F')
예제 #10
0
    def test_custom_fields_with_field_map(self):
        expected_header = {u'dimension': 1,
                           u'encoding': 'ASCII',
                           u'kinds': ['domain'],
                           u'sizes': [27],
                           u'spacings': [1.0458000000000001],
                           u'int': 24,
                           u'double': 25.5566,
                           u'string': 'This is a long string of information that is important.',
                           u'int list': np.array([1, 2, 3, 4, 5, 100]),
                           u'double list': np.array([0.2, 0.502, 0.8]),
                           u'string list': ['words', 'are', 'split', 'by', 'space', 'in', 'list'],
                           u'int vector': np.array([100, 200, -300]),
                           u'double vector': np.array([100.5, 200.3, -300.99]),
                           u'int matrix': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
                           u'double matrix': np.array([[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]]),
                           u'type': 'unsigned char'}

        custom_field_map = {'int': 'int',
                            'double': 'double',
                            'string': 'string',
                            'int list': 'int list',
                            'double list': 'double list',
                            'string list': 'string list',
                            'int vector': 'int vector',
                            'double vector': 'double vector',
                            'int matrix': 'int matrix',
                            'double matrix': 'double matrix'}
        header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, custom_field_map)

        np.testing.assert_equal(header, expected_header)
예제 #11
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_read_header_only_with_filename(self):
        header = nrrd.read_header(RAW_NRRD_FILE_PATH)

        # np.testing.assert_equal is used to compare the headers because it will appropriately handle each
        # value in the structure. Since some of the values can be Numpy arrays inside the headers, this must be
        # used to compare the two values.
        np.testing.assert_equal(self.expected_header, header)
예제 #12
0
    def test_read_dup_field_error_and_warn(self):
        expected_header = {u'type': 'float', u'dimension': 3}
        header_txt_tuple = ('NRRD0005', 'type: float', 'dimension: 3', 'type: float')

        with self.assertRaisesRegex(nrrd.NRRDError, "Duplicate header field: 'type'"):
            header = nrrd.read_header(header_txt_tuple)

        import warnings
        with warnings.catch_warnings(record=True) as w:
            nrrd.reader.ALLOW_DUPLICATE_FIELD = True
            header = nrrd.read_header(header_txt_tuple)

            self.assertTrue("Duplicate header field: 'type'" in str(w[0].message))

            self.assertEqual(expected_header, header)
            nrrd.reader._NRRD_ALLOW_DUPLICATE_FIELD = False
예제 #13
0
    def test_read_header_only_with_filename(self):
        header = nrrd.read_header(RAW_NRRD_FILE_PATH)

        # np.testing.assert_equal is used to compare the headers because it will appropriately handle each
        # value in the structure. Since some of the values can be Numpy arrays inside the headers, this must be
        # used to compare the two values.
        np.testing.assert_equal(self.expected_header, header)
예제 #14
0
    def read_nrrd(
            self,
            member: ZipMember) -> Tuple[np.ndarray, collections.OrderedDict]:
        """
        Access, load and transform NRRD file member of the MRB file into
        a numpy ndarray and corresponding header metadata.
        Accessed member is specified via string or ZipInfo instance.

        Parameters
        ----------

        member : str or zipfile.ZipInfo
            Internal path or ZipInfo instance pointing to the member.
        
        Returns
        -------

        (data, header) : tuple
            2-Tuple of the raw data and the NRRD file header.
            The raw data is a numpy.ndarray.
        """
        member_fobj = io.BytesIO(self.read(member))
        header = nrrd.read_header(member_fobj)
        data = nrrd.read_data(header, fh=member_fobj)
        return (data, header)
예제 #15
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_custom_fields_with_field_map(self):
        expected_header = {u'dimension': 1,
                           u'encoding': 'ASCII',
                           u'kinds': ['domain'],
                           u'sizes': [27],
                           u'spacings': [1.0458000000000001],
                           u'int': 24,
                           u'double': 25.5566,
                           u'string': 'This is a long string of information that is important.',
                           u'int list': np.array([1, 2, 3, 4, 5, 100]),
                           u'double list': np.array([0.2, 0.502, 0.8]),
                           u'string list': ['words', 'are', 'split', 'by', 'space', 'in', 'list'],
                           u'int vector': np.array([100, 200, -300]),
                           u'double vector': np.array([100.5, 200.3, -300.99]),
                           u'int matrix': np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
                           u'double matrix': np.array([[1.2, 0.3, 0.0], [0.0, 1.5, 0.0], [0.0, -0.55, 1.6]]),
                           u'type': 'unsigned char'}

        custom_field_map = {'int': 'int',
                            'double': 'double',
                            'string': 'string',
                            'int list': 'int list',
                            'double list': 'double list',
                            'string list': 'string list',
                            'int vector': 'int vector',
                            'double vector': 'double vector',
                            'int matrix': 'int matrix',
                            'double matrix': 'double matrix'}
        header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, custom_field_map)

        np.testing.assert_equal(header, expected_header)
예제 #16
0
 def test_read_detached_header_only(self):
     header = None
     expected_header = self.expected_header
     expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)
     with open(RAW_NHDR_FILE_PATH, 'rb') as f:
         header = nrrd.read_header(f)
     np.testing.assert_equal(self.expected_header, header)
예제 #17
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_read_dup_field_error_and_warn(self):
        expected_header = {u'type': 'float', u'dimension': 3}
        header_txt_tuple = ('NRRD0005', 'type: float', 'dimension: 3', 'type: float')

        with self.assertRaisesRegex(nrrd.NRRDError, "Duplicate header field: 'type'"):
            header = nrrd.read_header(header_txt_tuple)

        import warnings
        with warnings.catch_warnings(record=True) as w:
            nrrd.reader.ALLOW_DUPLICATE_FIELD = True
            header = nrrd.read_header(header_txt_tuple)

            self.assertTrue("Duplicate header field: 'type'" in str(w[0].message))

            self.assertEqual(expected_header, header)
            nrrd.reader.ALLOW_DUPLICATE_FIELD = False
예제 #18
0
def read_headers(*fn_patterns):
    if len(fn_patterns) == 0:
        fn_patterns = ['*.nrrd']
        print('Reading headers of all nrrd files in this directory.\n')
    for fn_pattern in fn_patterns:
        for fn in glob.glob(fn_pattern):
            print(fn)
            print(nrrd.read_header(fn), '\n')
예제 #19
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_missing_required_field(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Delete required field
            del header['type']

            with self.assertRaisesRegex(nrrd.NRRDError, 'Header is missing required field: "type".'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #20
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_invalid_encoding(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set the encoding to be incorrect
            header['encoding'] = 'fake'

            with self.assertRaisesRegex(nrrd.NRRDError, 'Unsupported encoding: "fake"'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #21
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_invalid_endian(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set endianness to fake value
            header['endian'] = 'fake'

            with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid endian value in header: "fake"'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #22
0
 def _get_shape(self, nrrd_path):
     header = nrrd.read_header(nrrd_path)
     if self.resample:
         return tuple(
             int(
                 round(header['sizes'][i] *
                       header['space directions'][i, i] / self.spacing))
             for i in range(3))
     else:
         return tuple(int(header['sizes'][i]) for i in range(3))
예제 #23
0
    def get_label_shape(self, data_idx):

        file_match = os.path.join(self.data_dir, data_idx, 'structures',
                                  '*.nrrd')
        shape = None
        for f in glob(file_match):
            if shape is None:
                # get shape
                nrrd_info = nrrd.read_header(f)
                shape = tuple(nrrd_info['sizes'].tolist())
                z_scale = nrrd_info['space directions'][2, 2]
            else:
                # check shape
                nrrd_info = nrrd.read_header(f)
                assert shape == tuple(nrrd_info['sizes'].tolist())
                assert z_scale == nrrd_info['space directions'][2, 2]

        shape = (256, 256, int(round(shape[2] * z_scale)))
        return shape
예제 #24
0
    def test_big_endian(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set endianness to big to verify it is doing correctly
            header['endian'] = 'big'

            data = nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
            np.testing.assert_equal(data, self.expected_data.byteswap())
예제 #25
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_big_endian(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set endianness to big to verify it is doing correctly
            header['endian'] = 'big'

            data = nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
            np.testing.assert_equal(data, self.expected_data.byteswap())
예제 #26
0
    def test_invalid_encoding(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set the encoding to be incorrect
            header['encoding'] = 'fake'

            with self.assertRaisesRegex(nrrd.NRRDError,
                                        'Unsupported encoding: "fake"'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #27
0
    def test_invalid_endian(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set endianness to fake value
            header['endian'] = 'fake'

            with self.assertRaisesRegex(
                    nrrd.NRRDError, 'Invalid endian value in header: "fake"'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #28
0
    def get_image_shape(self, data_idx):
        nrrd_info = nrrd.read_header(
            os.path.join(self.data_dir, data_idx, 'img.nrrd'))
        shape = tuple(nrrd_info['sizes'].tolist())

        # FIXME
        # # after rescale and crop
        # z_scale = nrrd_info['space directions'][2, 2]
        # shape = (256, 256, int(round(shape[2] * z_scale)))

        return shape
예제 #29
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_missing_endianness(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Delete the endian field from header
            # Since our data is short (itemsize = 2), we should receive an error
            del header['endian']

            with self.assertRaisesRegex(nrrd.NRRDError, 'Header is missing required field: "endian".'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #30
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_invalid_lineskip(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set the line skip to be incorrect
            header['line skip'] = -1

            with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid lineskip, allowed values are greater than or equal to'
                                                        ' 0'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #31
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_detached_header_no_filename(self):
        self.expected_header[u'data file'] = os.path.basename(RAW_DATA_FILE_PATH)

        with open(RAW_NHDR_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # No filename is specified for read_data
            with self.assertRaisesRegex(nrrd.NRRDError, 'Filename parameter must be specified when a relative data file'
                                                        ' path is given'):
                nrrd.read_data(header, fh)
예제 #32
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_wrong_sizes(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Make the number of dimensions wrong
            header['dimension'] = 2

            with self.assertRaisesRegex(nrrd.NRRDError, 'Number of elements in sizes does not match dimension. '
                                                        'Dimension: 2, len\\(sizes\\): 3'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #33
0
    def test_missing_required_field(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Delete required field
            del header['type']

            with self.assertRaisesRegex(
                    nrrd.NRRDError,
                    'Header is missing required field: "type".'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #34
0
    def test_wrong_sizes(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Make the number of dimensions wrong
            header['dimension'] = 2

            with self.assertRaisesRegex(
                    nrrd.NRRDError,
                    'Number of elements in sizes does not match dimension. '
                    'Dimension: 2, len\\(sizes\\): 3'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #35
0
def nrrd_header_read(fname):
    """
    Read NRRD header
    """
    if not check_file_exists(fname, "nrrd_header_read"):
        return

    import nrrd

    with open(fname, "rb") as fid:
        header = nrrd.read_header(fid)

    return header
예제 #36
0
def nrrdHeaderRead(fname):
  """
  Read NRRD header
  """
  if not os.path.exists(fname):
    print "imageStackLoader.nrrdHeaderRead can not find %s" % fname
    return

  import nrrd
  with open(fname,'rb') as fid:
    header = nrrd.read_header(fid)

  return header
예제 #37
0
    def test_invalid_lineskip(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Set the line skip to be incorrect
            header['line skip'] = -1

            with self.assertRaisesRegex(
                    nrrd.NRRDError,
                    'Invalid lineskip, allowed values are greater than or equal to'
                    ' 0'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #38
0
    def test_missing_endianness(self):
        with open(RAW_NRRD_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # Delete the endian field from header
            # Since our data is short (itemsize = 2), we should receive an error
            del header['endian']

            with self.assertRaisesRegex(
                    nrrd.NRRDError,
                    'Header is missing required field: "endian".'):
                nrrd.read_data(header, fh, RAW_NRRD_FILE_PATH)
예제 #39
0
    def test_detached_header_no_filename(self):
        self.expected_header[u'data file'] = os.path.basename(
            RAW_DATA_FILE_PATH)

        with open(RAW_NHDR_FILE_PATH, 'rb') as fh:
            header = nrrd.read_header(fh)
            np.testing.assert_equal(self.expected_header, header)

            # No filename is specified for read_data
            with self.assertRaisesRegex(
                    nrrd.NRRDError,
                    'Filename parameter must be specified when a relative data file'
                    ' path is given'):
                nrrd.read_data(header, fh)
예제 #40
0
    def test_read_quoted_string_header_no_quotes(self):
        header = nrrd.read_header([
            'NRRD0004', '# Complete NRRD file format specification at:',
            '# http://teem.sourceforge.net/nrrd/format.html', 'type: double',
            'dimension: 3', 'space dimension: 3', 'sizes: 32 40 16',
            'encoding: raw', 'units: mm cm in', 'space units: mm cm in',
            'labels: X Y f(log(X,10),Y)',
            'space origin: (-0.79487200000000002,-1,-0.38461499999999998)'
        ])

        # Check that the quoted values were appropriately parsed
        self.assertEqual(['mm', 'cm', 'in'], header['units'])
        self.assertEqual(['mm', 'cm', 'in'], header['space units'])
        self.assertEqual(['X', 'Y', 'f(log(X,10),Y)'], header['labels'])
예제 #41
0
def _read_mitk_field(archive, fieldpath):
    """ mitk stores a zip archive with one nrrd file per label mask 
    read individual nrrd files into tempfiles, pass to internal nrrd io routines """

    b = archive.read(fieldpath)
    with tempfile.TemporaryFile() as tf:
        tf.write(b)
        tf.seek(0)
        header = nrrd.read_header(tf)
        data = nrrd.read_data(header, tf, None)
        # data = np.transpose(data)
        # mitk stores column-major 3D image....
        # if data.ndim == 3:
        #     data = data[0]
        return data
예제 #42
0
    def test_custom_fields_without_field_map(self):
        expected_header = {u'dimension': 1,
                           u'encoding': 'ASCII',
                           u'kinds': ['domain'],
                           u'sizes': [27],
                           u'spacings': [1.0458000000000001],
                           u'int': '24',
                           u'double': '25.5566',
                           u'string': 'This is a long string of information that is important.',
                           u'int list': '1 2 3 4 5 100',
                           u'double list': '0.2 0.502 0.8',
                           u'string list': 'words are split by space in list',
                           u'int vector': '(100, 200, -300)',
                           u'double vector': '(100.5,200.3,-300.99)',
                           u'int matrix': '(1,0,0) (0,1,0) (0,0,1)',
                           u'double matrix': '(1.2,0.3,0) (0,1.5,0) (0,-0.55,1.6)',
                           u'type': 'unsigned char'}

        header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH)

        self.assertEqual(header, expected_header)
예제 #43
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_custom_fields_without_field_map(self):
        expected_header = {u'dimension': 1,
                           u'encoding': 'ASCII',
                           u'kinds': ['domain'],
                           u'sizes': [27],
                           u'spacings': [1.0458000000000001],
                           u'int': '24',
                           u'double': '25.5566',
                           u'string': 'This is a long string of information that is important.',
                           u'int list': '1 2 3 4 5 100',
                           u'double list': '0.2 0.502 0.8',
                           u'string list': 'words are split by space in list',
                           u'int vector': '(100, 200, -300)',
                           u'double vector': '(100.5,200.3,-300.99)',
                           u'int matrix': '(1,0,0) (0,1,0) (0,0,1)',
                           u'double matrix': '(1.2,0.3,0) (0,1.5,0) (0,-0.55,1.6)',
                           u'type': 'unsigned char'}

        header = nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH)

        self.assertEqual(header, expected_header)
예제 #44
0
    def save_prediction(self, data_idx, prediction, output_dir):
        assert isinstance(prediction, np.ndarray), type(prediction)
        os.makedirs(os.path.join(output_dir, data_idx, 'structures'),
                    exist_ok=True)
        header = nrrd.read_header(
            os.path.join(self.data_dir, data_idx, 'img.nrrd'))

        # resampling
        if self.resample:
            scale = tuple(header['sizes'][i] / prediction.shape[i]
                          for i in range(3))
            prediction = ndimage.zoom(prediction,
                                      scale,
                                      order=0,
                                      mode='nearest')
            for a, b in zip(prediction.shape, header['sizes']):
                assert a == b, (prediction.shape, header['sizes'])

        for roi, idx in self.roi_map.items():
            nrrd.write(os.path.join(output_dir, data_idx, 'structures',
                                    roi + '.nrrd'),
                       (prediction == idx).astype(np.int),
                       header=header)
예제 #45
0
def prepare(pid):
    os.makedirs(os.path.join(cf.raw_data_dir, pid), exist_ok=True)
    # Get scan from pylidc
    scan = pl.query(pl.Scan).filter(pl.Scan.patient_id == pid).first()
    print("processing:", scan.patient_id)
    vol_shape = scan.to_volume().shape
    # Write scan nrrd
    scan_path = glob.glob(os.path.join(lidc_path, pid, "*", "*", f"{pid}_CT.nrrd"))[0]
    copyfile(scan_path, os.path.join(cf.raw_data_dir, pid, f"{pid}_CT.nrrd"))
    # Cluster the annotations for the scan, and grab one.
    nodules = scan.cluster_annotations()
    nodule_ix = 0
    for nodule_anns in nodules:
        # Build 50% consensus mask
        cmask, cbbox, _ = consensus(nodule_anns, clevel=0.5)
        cmask_full = np.zeros(vol_shape)
        cmask_full[cbbox] = cmask
        # Load header from NRRD
        header = nrrd.read_header(scan_path)
        # Write consensus to nrrd
        cmask_full = np.swapaxes(cmask_full, 0, 1)
        nodule_id = f"{pid}_nod_{nodule_ix}"
        nrrd.write(os.path.join(cf.raw_data_dir, pid, f"{nodule_id}.nrrd"), cmask_full, header=header)
        nodule_ix = nodule_ix + 1
예제 #46
0
파일: test_reading.py 프로젝트: mhe/pynrrd
 def test_invalid_magic_line3(self):
     with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid NRRD magic line: NRRDnono'):
         nrrd.read_header(('NRRDnono', 'my extra info:=my : colon-separated : values'))
예제 #47
0
파일: test_reading.py 프로젝트: mhe/pynrrd
    def test_invalid_custom_field(self):
        custom_field_map = {'int': 'fake'}

        with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid field type given: fake'):
            nrrd.read_header(ASCII_1D_CUSTOM_FIELDS_FILE_PATH, custom_field_map)
예제 #48
0
    offsetTuple = (preoffset[0] + offset, preoffset[1], preoffset[2])
    method(handle, seg, offsetTuple, throttle, compress)


def yieldtoDvid(method, handle, header, filehandle, dtype, compress=True):
    """Generator for posting to DVID."""
    pdb.set_trace()
    for col, row, z, data in nrrd.iterate_data(header, inputnrrd, handle):
        data = np.ascontiguousarray(data)
        data = data.astype(dtype)
        res = method(handle, data, (z, row, col), False, compress)
        print res


with open(args.file, "rb") as inputnrrd:
    header = nrrd.read_header(inputnrrd)
    headerJson = json.dumps(header)

    service = DVIDNodeService(addr, uid)
    kvname = 'headers'
    if service.create_keyvalue(kvname):
        service.put(kvname, args.file, headerJson)
    else:
        service.put(kvname, args.file, headerJson)
        # we should check if the key is there and warn the user to avoid overwriting when not desired

    data = np.ascontiguousarray(nrrd.read_data(header, inputnrrd, args.file))

    reshaper = []

    for dim in data.shape:
예제 #49
0
파일: test_reading.py 프로젝트: mhe/pynrrd
 def test_invalid_magic_line(self):
     with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid NRRD magic line. Is this an NRRD file?'):
         nrrd.read_header(('invalid magic line', 'my extra info:=my : colon-separated : values'))
예제 #50
0
 def test_read_header_only(self):
     header = None
     with open(RAW_NRRD_FILE_PATH, 'rb') as f:
         header = nrrd.read_header(f)
     self.assertEqual(self.expected_header, header)
예제 #51
0
파일: test_reading.py 프로젝트: mhe/pynrrd
 def test_invalid_magic_line2(self):
     with self.assertRaisesRegex(nrrd.NRRDError, 'Unsupported NRRD file version \\(version: 2000\\). This library '
                                                 'only supports v5 and below.'):
         nrrd.read_header(('NRRD2000', 'my extra info:=my : colon-separated : values'))
  def execute(self):

    header=nrrd.read_header(open(self._ct_file_name))
    
    max_z=header['sizes'][2]
    spacing=[header['space directions'][kk][kk] for kk in xrange(3)]
    
    if len(crop) < 2:
      crop_flag = False
    else:
      if crop[0]<0:
        crop[0]=0
      elif crop[1]>max_z:
        crop_flag = False
 
      crop_flag = True

    ct_file_name = self._ct_file_name
    pl_file_name = self._pl_file_name

    #Data preprocessing (cropping and/or resampling) before region based analysis
    if self._justparticles == False:
      
      #Crop volume if flag is set
      ct_file_name_crop = os.path.join(self._tmp_dir,self._case_id + "_crop.nhdr")
      pl_file_name_crop = os.path.join(self._tmp_dir,self._case_id + "_croppartialLungLabelMap.nhdr")
      if crop_flag == True:
        if self._rate == 1:
          ratestr = "="
        else:
          ratestr = 'x%f' % rate 
        tmpCommand = "unu crop -min 0 0 %(z1)d -max M M %(z2)d -i %(in)s | unu resample -k %(kernel)s -s = = %(ratestr)s -o %(out)s"

        tmpCommandCT = tmpCommand % {'z1':crop[0],'z2':crop[1],'in':ct_file_name,'out':ct_file_name_crop,'ratestr':ratestr,'kernel':"cubic:1,0"}
        tmpCommandPL = tmpCommand % {'z1':crop[0],'z2':crop[1],'in':pl_file_name,'out':pl_file_name_crop,'ratestr':ratestr,'kernel':"cheap"}
        print tmpCommandCT
        print tmpCommandPL
        
        subprocess.call(tmpCommandCT,shell=True)
        subprocess.call(tmpCommandPL,shell=True)
        ct_file_name = ct_file_name_crop
        pl_file_name = pl_file_name_crop

      #Do resampling to isotropic voxel size to compute accurate scale measurements
      ct_file_name_resample = os.path.join(self._tmp_dir,self._case_id + "_resample.nhdr")
      pl_file_name_resample = os.path.join(self._tmp_dir,self._case_id + "_resamplepartialLungLabelMap.nhdr")

      if self._voxel_size>0:
        tmpCommand = "unu resample -k %(kernel)s -s x%(f1)f x%(f2)f x%(f3)f -i %(in)s -o %(out)s"
        tmpCommandCT = tmpCommand % {'in':ct_file_name,'out':ct_file_name_resample,'kernel':"tent",'f1':spacing[0]/self._voxel_size,'f2':spacing[1]/self._voxel_size,'f3':spacing[2]/self._voxel_size}
        tmpCommandPL = tmpCommand % {'in':pl_file_name,'out':pl_file_name_resample,'kernel':"cheap",'f1':spacing[0]/self._voxel_size,'f2':spacing[1]/self._voxel_size,'f3':spacing[2]/self._voxel_size}
        print tmpCommandCT
        print tmpCommandPL
        
        subprocess.call(tmpCommandCT,shell=True)
        subprocess.call(tmpCommandPL,shell=True)
        ct_file_name = ct_file_name_resample
        pl_file_name = pl_file_name_resample
        

    for ii in self._regions:
        rtag = ii.lower()
        tmpDir = os.path.join(self._tmp_dir,rtag)
        if os.path.exists(tmpDir) == False:
            os.mkdir(tmpDir)

        # Define FileNames that will be used
        pl_file_nameRegion= os.path.join(tmpDir,self._case_id + "_" + rtag + "_partialLungLabelMap.nhdr")
        ct_file_nameRegion= os.path.join(tmpDir,self._case_id + "_" + rtag + ".nhdr")
        featureMapFileNameRegion = os.path.join(tmpDir,self._case_id + "_" + rtag + "_featureMap.nhdr")
        maskFileNameRegion = os.path.join(tmpDir,self._case_id + "_" + rtag + "_mask.nhdr")
        particlesFileNameRegion = os.path.join(self._output_prefix+ "_" + rtag + "AirwayParticles.vtk")

        if self._justparticles == False:

            #Create SubVolume Region
            tmpCommand ="CropLung -r %(region)s -m 0 -v -1200 -i %(ct-in)s --plf %(lm-in)s -o %(ct-out)s --opl %(lm-out)s"
            tmpCommand = tmpCommand % {'region':ii,'ct-in':ct_file_name,'lm-in':pl_file_name,'ct-out':ct_file_nameRegion,'lm-out':pl_file_nameRegion}
            tmpCommand = os.path.join(path['CIP_PATH'],tmpCommand)
            print tmpCommand
            subprocess.call( tmpCommand, shell=True )

            #Extract Lung Region + Distance map to peel lung
            tmpCommand ="ExtractChestLabelMap -r %(region)s -i %(lm-in)s -o %(lm-out)s"
            tmpCommand = tmpCommand % {'region':ii,'lm-in':pl_file_nameRegion,'lm-out':pl_file_nameRegion}
            tmpCommand = os.path.join(path['CIP_PATH'],tmpCommand)
            print tmpCommand
            subprocess.call( tmpCommand, shell=True )

            tmpCommand ="unu 2op gt %(lm-in)s 0.5 -o %(lm-out)s"
            tmpCommand = tmpCommand % {'lm-in':pl_file_nameRegion,'lm-out':pl_file_nameRegion}
            print tmpCommand
            subprocess.call( tmpCommand, shell=True )

            #tmpCommand ="ComputeDistanceMap -l %(lm-in)s -d %(distance-map)s -s 2"
            #tmpCommand = tmpCommand % {'lm-in':self._pl_file_nameRegion,'distance-map':self._pl_file_nameRegion}
            #tmpCommand = os.path.join(path['CIP_PATH'],tmpCommand)
            #print tmpCommand
            #subprocess.call( tmpCommand, shell=True )

            tmpCommand ="pxdistancetransform -in %(lm-in)s -out %(distance-map)s"
            tmpCommand = tmpCommand % {'lm-in':pl_file_nameRegion,'distance-map':pl_file_nameRegion}
            tmpCommand = os.path.join(path['ITKTOOLS_PATH'],tmpCommand)
            print tmpCommand
            subprocess.call( tmpCommand, shell=True )

            tmpCommand ="unu 2op lt %(distance-map)s %(distance)f -t short -o %(lm-out)s"
            tmpCommand = tmpCommand % {'distance-map':pl_file_nameRegion,'distance':self._distance_from_wall,'lm-out':pl_file_nameRegion}
            print tmpCommand
            subprocess.call( tmpCommand, shell=True )

            # Compute Frangi
            if self._init_method == 'Frangi':
                tmpCommand = "ComputeFeatureStrength -i %(in)s -m Frangi -f ValleyLine --std %(minscale)f,4,%(maxscale)f --ssm 1 --alpha 0.5 --beta 0.5 --C 50 -o %(out)s"
                tmpCommand = tmpCommand % {'in':ct_file_nameRegion,'out':featureMapFileNameRegion,'minscale':self._min_scale,'maxscale':self._max_scale}
                tmpCommand  = os.path.join(path['CIP_PATH'],tmpCommand)
                print tmpCommand
                subprocess.call( tmpCommand, shell=True )

                #Hist equalization, threshold Feature strength and masking
                tmpCommand = "unu 2op x %(feat)s %(mask)s -t float | unu heq -b 10000 -a 0.5 -s 2 | unu 2op gt - %(airwayness_th)f  | unu convert -t short -o %(out)s"
                tmpCommand = tmpCommand % {'feat':featureMapFileNameRegion,'mask':pl_file_nameRegion,'airwayness_th':self._airwayness_th,'out':maskFileNameRegion}
                print tmpCommand
                subprocess.call( tmpCommand , shell=True)
            elif self._init_method == 'StrainEnergy':
                tmpCommand = "ComputeFeatureStrength -i %(in)s -m StrainEnergy -f RidgeLine --std %(minscale)f,4,%(maxscale)f --ssm 1 --alpha 0.2 --beta 0.1 --kappa 0.5 --nu 0.1 -o %(out)s"
                tmpCommand = tmpCommand % {'in':ct_file_nameRegion,'out':featureMapFileNameRegion,'minscale':self._min_scale,'maxscale':self._max_scale}
                tmpCommand  = os.path.join(path['CIP_PATH'],tmpCommand)
                print tmpCommand
                subprocess.call( tmpCommand, shell=True )
                    
                #Hist equalization, threshold Feature strength and masking
                tmpCommand = "unu 2op x %(feat)s %(mask)s -t float | unu heq -b 10000 -a 0.5 -s 2 | unu 2op gt - %(airwayness_th)f  | unu convert -t short -o %(out)s"
                tmpCommand = tmpCommand % {'feat':featureMapFileNameRegion,'mask':pl_file_nameRegion,'airwayness_th':self._airwayness_th,'out':maskFileNameRegion}
                print tmpCommand
                subprocess.call( tmpCommand , shell=True)
            elif self._init_method == 'Threshold':
                tmpCommand = "unu 2op lt %(in)s %(intensity_th)f | unu 2op x - %(mask)s -o %(out)s"
                tmpCommand = tmpCommand % {'in':ct_file_nameRegion,'mask':pl_file_nameRegion,'intensity_th':self._intensity_th,'out':maskFileNameRegion}
                print tmpCommand
                subprocess.call( tmpCommand , shell=True)
            
            #Binary Thinning
            tmpCommand = "GenerateBinaryThinning3D -i %(in)s -o %(out)s"
            tmpCommand = tmpCommand % {'in':maskFileNameRegion,'out':maskFileNameRegion}
            tmpCommand = os.path.join(path['CIP_PATH'],tmpCommand)
            print tmpCommand
            subprocess.call( tmpCommand, shell=True)

        # Vessel Particles For the Region
        if self._multires==False:
            particlesGenerator = AirwayParticles(ct_file_nameRegion,particlesFileNameRegion,tmpDir,maskFileNameRegion,live_thresh=self._lth,seed_thresh=self._sth,min_intensity=-1100,max_intensity=-500)
            particlesGenerator._clean_tmp_dir=self._clean_cache
            particlesGenerator._interations_phase3 = 70
            particlesGenerator._irad_phase3 = 0.9
            particlesGenerator._srad_phase3 = 4
            particlesGenerator._verbose = 1
            particlesGenerator.execute()
        else:
            particlesGenerator = MultiResAirwayParticles(ct_file_nameRegion,particlesFileNameRegion,tmpDir,maskFileNameRegion,live_thresh=-600,seed_thresh=-600)
            particlesGenerator._clean_tmp_dir=self._clean_cache
            particlesGenerator.execute()