コード例 #1
0
ファイル: trk.py プロジェクト: vanandrew/nibabel
    def __str__(self):
        """ Gets a formatted string of the header of a TRK file.

        Returns
        -------
        info : string
            Header information relevant to the TRK format.
        """
        vars = self.header.copy()
        for attr in dir(Field):
            if attr[0] in string.ascii_uppercase:
                hdr_field = getattr(Field, attr)
                if hdr_field in vars:
                    vars[attr] = vars[hdr_field]

        nb_scalars = self.header[Field.NB_SCALARS_PER_POINT]
        scalar_names = [
            asstr(s) for s in vars['scalar_name'][:nb_scalars] if len(s) > 0
        ]
        vars['scalar_names'] = '\n  '.join(scalar_names)
        nb_properties = self.header[Field.NB_PROPERTIES_PER_STREAMLINE]
        property_names = [
            asstr(s) for s in vars['property_name'][:nb_properties]
            if len(s) > 0
        ]
        vars['property_names'] = "\n  ".join(property_names)
        # Make all byte strings into strings
        # Fixes recursion error on Python 3.3
        vars = dict((k, asstr(v) if hasattr(v, 'decode') else v)
                    for k, v in vars.items())
        return """\
MAGIC NUMBER: {MAGIC_NUMBER}
v.{version}
dim: {DIMENSIONS}
voxel_sizes: {VOXEL_SIZES}
origin: {ORIGIN}
nb_scalars: {NB_SCALARS_PER_POINT}
scalar_names:\n  {scalar_names}
nb_properties: {NB_PROPERTIES_PER_STREAMLINE}
property_names:\n  {property_names}
vox_to_world:\n{VOXEL_TO_RASMM}
voxel_order: {VOXEL_ORDER}
image_orientation_patient: {image_orientation_patient}
pad1: {pad1}
pad2: {pad2}
invert_x: {invert_x}
invert_y: {invert_y}
invert_z: {invert_z}
swap_xy: {swap_xy}
swap_yz: {swap_yz}
swap_zx: {swap_zx}
n_count: {NB_STREAMLINES}
hdr_size: {hdr_size}""".format(**vars)
コード例 #2
0
ファイル: test_openers.py プロジェクト: yarikoptic/nibabel
def test_iter():
    # Check we can iterate over lines, if the underlying file object allows it
    lines = \
        """On the
blue ridged mountains
of
virginia
""".split('\n')
    with InTemporaryDirectory():
        sobj = BytesIO()
        for input, does_t in (('test.txt', True), ('test.txt.gz', False),
                              ('test.txt.bz2', False), (sobj, True)):
            with Opener(input, 'wb') as fobj:
                for line in lines:
                    fobj.write(asbytes(line + os.linesep))
            with Opener(input, 'rb') as fobj:
                for back_line, line in zip(fobj, lines):
                    assert_equal(asstr(back_line).rstrip(), line)
            if not does_t:
                continue
            with Opener(input, 'rt') as fobj:
                for back_line, line in zip(fobj, lines):
                    assert_equal(back_line.rstrip(), line)
        lobj = Opener(Lunk(''))
        assert_raises(TypeError, list, lobj)
コード例 #3
0
def decode_value_from_name(encoded_name):
    """ Decodes a value that has been encoded in the last bytes of a string.

    Check :func:`encode_value_in_name` to see how the value has been encoded.

    Parameters
    ----------
    encoded_name : bytes
        Name in which a value has been encoded or not.

    Returns
    -------
    name : bytes
        Name without the encoded value.
    value : int
        Value decoded from the name.
    """
    encoded_name = asstr(encoded_name)
    if len(encoded_name) == 0:
        return encoded_name, 0

    splits = encoded_name.rstrip('\x00').split('\x00')
    name = splits[0]
    value = 1

    if len(splits) == 2:
        value = int(splits[1])  # Decode value.
    elif len(splits) > 2:
        # The remaining bytes are not \x00, raising.
        msg = ("Wrong scalar_name or property_name: '{0}'."
               " Unused characters should be \\x00.").format(encoded_name)
        raise HeaderError(msg)

    return name, value
コード例 #4
0
ファイル: test_openers.py プロジェクト: jhlegarreta/nibabel
def test_iter():
    # Check we can iterate over lines, if the underlying file object allows it
    lines = \
        """On the
blue ridged mountains
of
virginia
""".split('\n')
    with InTemporaryDirectory():
        sobj = BytesIO()
        files_to_test = [('test.txt', True), ('test.txt.gz', False),
                         ('test.txt.bz2', False), (sobj, True)]
        if HAVE_ZSTD:
            files_to_test += [('test.txt.zst', False)]
        for input, does_t in files_to_test:
            with Opener(input, 'wb') as fobj:
                for line in lines:
                    fobj.write(asbytes(line + os.linesep))
            with Opener(input, 'rb') as fobj:
                for back_line, line in zip(fobj, lines):
                    assert asstr(back_line).rstrip() == line
            if not does_t:
                continue
            with Opener(input, 'rt') as fobj:
                for back_line, line in zip(fobj, lines):
                    assert back_line.rstrip() == line
        lobj = Opener(Lunk(''))
        with pytest.raises(TypeError):
            list(lobj)
コード例 #5
0
def find_private_section(dcm_data, group_no, creator):
    """ Return start element in group `group_no` given creator name `creator`

    Private attribute tags need to announce where they will go by putting a tag
    in the private group (here `group_no`) between elements 1 and 0xFF.  The
    element number of these tags give the start of matching information, in the
    higher tag numbers.

    Parameters
    ----------
    dcm_data : dicom ``dataset``
        Iterating over `dcm_data` produces ``elements`` with attributes
        ``tag``, ``VR``, ``value``
    group_no : int
        Group number in which to search
    creator : str or bytes or regex
        Name of section - e.g. 'SIEMENS CSA HEADER' - or regex to search for
        section name.  Regex used via ``creator.search(element_value)`` where
        ``element_value`` is the value of the data element.

    Returns
    -------
    element_start : int
        Element number at which named section starts
    """
    is_regex = hasattr(creator, 'search')
    if not is_regex:  # assume string / bytes
        creator = asstr(creator)
    for element in dcm_data:  # Assumed ordered by tag (groupno, elno)
        grpno, elno = element.tag.group, element.tag.elem
        if grpno > group_no:
            break
        if grpno != group_no:
            continue
        if elno > 0xFF:
            break
        if element.VR not in ('LO', 'OB'):
            continue
        name = asstr(element.value)
        if is_regex:
            if creator.search(name) is not None:
                return elno * 0x100
        else:  # string - needs exact match
            if creator == name:
                return elno * 0x100
    return None
コード例 #6
0
ファイル: tck.py プロジェクト: wannabe2020/nibabel
    def _write_header(fileobj, header):
        """ Write TCK header to file-like object.

        Parameters
        ----------
        fileobj : file-like object
            An open file-like object in binary mode pointing to TCK file (and
            ready to read from the beginning of the TCK header).
        """
        # Fields to exclude
        exclude = [
            Field.MAGIC_NUMBER,  # Handled separately.
            Field.NB_STREAMLINES,  # Handled separately.
            Field.ENDIANNESS,  # Handled separately.
            Field.VOXEL_TO_RASMM,  # Streamlines are always in RAS+ mm.
            "count",
            "datatype",
            "file"
        ]  # Fields being replaced.

        lines = []
        lines.append(asstr(header[Field.MAGIC_NUMBER]))
        lines.append("count: {0:010}".format(header[Field.NB_STREAMLINES]))
        lines.append("datatype: Float32LE")  # Always Float32LE.
        lines.extend([
            "{0}: {1}".format(k, v) for k, v in header.items()
            if k not in exclude and not k.startswith("_")
        ])
        lines.append("file: . ")  # Manually add this last field.
        out = "\n".join(lines)

        # Check the header is well formatted.
        if out.count("\n") > len(lines) - 1:  # \n only allowed between lines.
            msg = "Key-value pairs cannot contain '\\n':\n{}".format(out)
            raise HeaderError(msg)

        if out.count(":") > len(lines) - 1:
            # : only one per line (except the last one which contains END).
            msg = "Key-value pairs cannot contain ':':\n{}".format(out)
            raise HeaderError(msg)

        # Write header to file.
        fileobj.write(asbytes(out))

        hdr_len_no_offset = len(out) + 5
        # Need to add number of bytes to store offset as decimal string. We
        # start with estimate without string, then update if the
        # offset-as-decimal-string got longer after adding length of the
        # offset string.
        new_offset = -1
        old_offset = hdr_len_no_offset
        while new_offset != old_offset:
            old_offset = new_offset
            new_offset = hdr_len_no_offset + len(str(old_offset))

        fileobj.write(asbytes(str(new_offset) + "\n"))
        fileobj.write(asbytes("END\n"))
コード例 #7
0
ファイル: netcdf.py プロジェクト: yarikoptic/nibabel
    def _read_att_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_ATTRIBUTE]:
            raise ValueError("Unexpected header.")
        count = self._unpack_int()

        attributes = {}
        for attr in range(count):
            name = asstr(self._unpack_string())
            attributes[name] = self._read_values()
        return attributes
コード例 #8
0
ファイル: netcdf.py プロジェクト: yarikoptic/nibabel
    def _read_dim_array(self):
        header = self.fp.read(4)
        if not header in [ZERO, NC_DIMENSION]:
            raise ValueError("Unexpected header.")
        count = self._unpack_int()

        for dim in range(count):
            name = asstr(self._unpack_string())
            length = self._unpack_int() or None  # None for record dimension
            self.dimensions[name] = length
            self._dims.append(name)  # preserve order
コード例 #9
0
 def gets(self, addr, length):
     """Get string of bytes from given address. If any entries are blank
     from addr through addr+length, a NotEnoughDataError exception will
     be raised. Padding is not used."""
     a = array('B', asbytes('\0' * length))
     try:
         for i in range(length):
             a[i] = self._buf[addr + i]
     except KeyError:
         raise NotEnoughDataError(address=addr, length=length)
     return asstr(a.tostring())
コード例 #10
0
    def _from_bytes(bytes):
        """Takes a list of bytes, computes the checksum, and outputs the entire
        record as a string. bytes should be the hex record without the colon
        or final checksum.

        @param  bytes   list of byte values so far to pack into record.
        @return         String representation of one HEX record
        """
        assert len(bytes) >= 4
        # calculate checksum
        s = (-sum(bytes)) & 0x0FF
        bin = array('B', bytes + [s])
        return ':' + asstr(hexlify(bin.tostring())).upper()
コード例 #11
0
ファイル: tck.py プロジェクト: wannabe2020/nibabel
    def is_correct_format(cls, fileobj):
        """ Check if the file is in TCK format.

        Parameters
        ----------
        fileobj : string or file-like object
            If string, a filename; otherwise an open file-like object in
            binary mode pointing to TCK file (and ready to read from the
            beginning of the TCK header). Note that calling this function
            does not change the file position.

        Returns
        -------
        is_correct_format : {True, False}
            Returns True if `fileobj` is compatible with TCK format,
            otherwise returns False.
        """
        with Opener(fileobj) as f:
            magic_number = asstr(f.fobj.readline())
            f.seek(-len(magic_number), os.SEEK_CUR)

        return magic_number.strip() == cls.MAGIC_NUMBER
コード例 #12
0
ファイル: netcdf.py プロジェクト: yarikoptic/nibabel
    def _read_var(self):
        name = asstr(self._unpack_string())
        dimensions = []
        shape = []
        dims = self._unpack_int()

        for i in range(dims):
            dimid = self._unpack_int()
            dimname = self._dims[dimid]
            dimensions.append(dimname)
            dim = self.dimensions[dimname]
            shape.append(dim)
        dimensions = tuple(dimensions)
        shape = tuple(shape)

        attributes = self._read_att_array()
        nc_type = self.fp.read(4)
        vsize = self._unpack_int()
        begin = [self._unpack_int, self._unpack_int64][self.version_byte - 1]()

        typecode, size = TYPEMAP[nc_type]
        dtype_ = '>%s' % typecode

        return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
コード例 #13
0
ファイル: tck.py プロジェクト: wannabe2020/nibabel
    def _read_header(fileobj):
        """ Reads a TCK header from a file.

        Parameters
        ----------
        fileobj : string or file-like object
            If string, a filename; otherwise an open file-like object in
            binary mode pointing to TCK file (and ready to read from the
            beginning of the TCK header). Note that calling this function
            does not change the file position.

        Returns
        -------
        header : dict
            Metadata associated with this tractogram file.
        """
        # Record start position if this is a file-like object
        start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None

        with Opener(fileobj) as f:
            # Read magic number
            magic_number = f.fobj.readline().strip()

            # Read all key-value pairs contained in the header.
            buf = asstr(f.fobj.readline())
            while not buf.rstrip().endswith("END"):
                buf += asstr(f.fobj.readline())

            offset_data = f.tell()

        # Build header dictionary from the buffer.
        hdr = dict(item.split(': ') for item in buf.rstrip().split('\n')[:-1])
        hdr[Field.MAGIC_NUMBER] = magic_number

        # Check integrity of TCK header.
        if 'datatype' not in hdr:
            msg = ("Missing 'datatype' attribute in TCK header."
                   " Assuming it is Float32LE.")
            warnings.warn(msg, HeaderWarning)
            hdr['datatype'] = "Float32LE"

        if not hdr['datatype'].startswith('Float32'):
            msg = ("TCK only supports float32 dtype but 'datatype: {}' was"
                   " specified in the header.").format(hdr['datatype'])
            raise HeaderError(msg)

        if 'file' not in hdr:
            msg = ("Missing 'file' attribute in TCK header."
                   " Will try to guess it.")
            warnings.warn(msg, HeaderWarning)
            hdr['file'] = '. {}'.format(offset_data)

        if hdr['file'].split()[0] != '.':
            msg = ("TCK only supports single-file - in other words the"
                   " filename part must be specified as '.' but '{}' was"
                   " specified.").format(hdr['file'].split()[0])
            raise HeaderError("Missing 'file' attribute in TCK header.")

        # Set endianness and _dtype attributes in the header.
        hdr[Field.ENDIANNESS] = '>' if hdr['datatype'].endswith('BE') else '<'

        hdr['_dtype'] = np.dtype(hdr[Field.ENDIANNESS] + 'f4')

        # Keep the file position where the data begin.
        hdr['_offset_data'] = int(hdr['file'].split()[1])

        # Set the file position where it was, if it was previously open.
        if start_position is not None:
            fileobj.seek(start_position, os.SEEK_SET)

        return hdr
コード例 #14
0
    def write_hex_file(self, f, write_start_addr=True):
        """Write data to file f in HEX format.

        @param  f                   filename or file-like object for writing
        @param  write_start_addr    enable or disable writing start address
                                    record to file (enabled by default).
                                    If there is no start address in obj, nothing
                                    will be written regardless of this setting.
        """
        fwrite = getattr(f, "write", None)
        if fwrite:
            fobj = f
            fclose = None
        else:
            fobj = open(f, 'w')
            fwrite = fobj.write
            fclose = fobj.close

        # Translation table for uppercasing hex ascii string.
        # timeit shows that using hexstr.translate(table)
        # is faster than hexstr.upper():
        # 0.452ms vs. 0.652ms (translate vs. upper)
        if sys.version_info[0] >= 3:
            table = bytes(range(256)).upper()
        else:
            table = ''.join(chr(i).upper() for i in range(256))

        # start address record if any
        if self.start_addr and write_start_addr:
            keys = self.start_addr.keys()
            keys.sort()
            bin = array('B', asbytes('\0' * 9))
            if keys == ['CS', 'IP']:
                # Start Segment Address Record
                bin[0] = 4  # reclen
                bin[1] = 0  # offset msb
                bin[2] = 0  # offset lsb
                bin[3] = 3  # rectyp
                cs = self.start_addr['CS']
                bin[4] = (cs >> 8) & 0x0FF
                bin[5] = cs & 0x0FF
                ip = self.start_addr['IP']
                bin[6] = (ip >> 8) & 0x0FF
                bin[7] = ip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF  # chksum
                fwrite(':' + asstr(hexlify(bin.tostring()).translate(table)) +
                       '\n')
            elif keys == ['EIP']:
                # Start Linear Address Record
                bin[0] = 4  # reclen
                bin[1] = 0  # offset msb
                bin[2] = 0  # offset lsb
                bin[3] = 5  # rectyp
                eip = self.start_addr['EIP']
                bin[4] = (eip >> 24) & 0x0FF
                bin[5] = (eip >> 16) & 0x0FF
                bin[6] = (eip >> 8) & 0x0FF
                bin[7] = eip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF  # chksum
                fwrite(':' + asstr(hexlify(bin.tostring()).translate(table)) +
                       '\n')
            else:
                if fclose:
                    fclose()
                raise InvalidStartAddressValueError(start_addr=self.start_addr)

        # data
        addresses = self._buf.keys()
        addresses.sort()
        addr_len = len(addresses)
        if addr_len:
            minaddr = addresses[0]
            maxaddr = addresses[-1]

            if maxaddr > 65535:
                need_offset_record = True
            else:
                need_offset_record = False
            high_ofs = 0

            cur_addr = minaddr
            cur_ix = 0

            while cur_addr <= maxaddr:
                if need_offset_record:
                    bin = array('B', asbytes('\0' * 7))
                    bin[0] = 2  # reclen
                    bin[1] = 0  # offset msb
                    bin[2] = 0  # offset lsb
                    bin[3] = 4  # rectyp
                    high_ofs = int(cur_addr >> 16)
                    b = divmod(high_ofs, 256)
                    bin[4] = b[0]  # msb of high_ofs
                    bin[5] = b[1]  # lsb of high_ofs
                    bin[6] = (-sum(bin)) & 0x0FF  # chksum
                    fwrite(':' +
                           asstr(hexlify(bin.tostring()).translate(table)) +
                           '\n')

                while True:
                    # produce one record
                    low_addr = cur_addr & 0x0FFFF
                    # chain_len off by 1
                    chain_len = min(15, 65535 - low_addr, maxaddr - cur_addr)

                    # search continuous chain
                    stop_addr = cur_addr + chain_len
                    if chain_len:
                        ix = bisect_right(
                            addresses, stop_addr, cur_ix,
                            min(cur_ix + chain_len + 1, addr_len))
                        chain_len = ix - cur_ix  # real chain_len
                        # there could be small holes in the chain
                        # but we will catch them by try-except later
                        # so for big continuous files we will work
                        # at maximum possible speed
                    else:
                        chain_len = 1  # real chain_len

                    bin = array('B', asbytes('\0' * (5 + chain_len)))
                    b = divmod(low_addr, 256)
                    bin[1] = b[0]  # msb of low_addr
                    bin[2] = b[1]  # lsb of low_addr
                    bin[3] = 0  # rectype
                    try:  # if there is small holes we'll catch them
                        for i in range(chain_len):
                            bin[4 + i] = self._buf[cur_addr + i]
                    except KeyError:
                        # we catch a hole so we should shrink the chain
                        chain_len = i
                        bin = bin[:5 + i]
                    bin[0] = chain_len
                    bin[4 + chain_len] = (-sum(bin)) & 0x0FF  # chksum
                    fwrite(':' +
                           asstr(hexlify(bin.tostring()).translate(table)) +
                           '\n')

                    # adjust cur_addr/cur_ix
                    cur_ix += chain_len
                    if cur_ix < addr_len:
                        cur_addr = addresses[cur_ix]
                    else:
                        cur_addr = maxaddr + 1
                        break
                    high_addr = int(cur_addr >> 16)
                    if high_addr > high_ofs:
                        break

        # end-of-file record
        fwrite(":00000001FF\n")
        if fclose:
            fclose()
コード例 #15
0
 def _tobinstr_really(self, start, end, pad, size):
     return asstr(self._tobinarray_really(start, end, pad, size).tostring())
コード例 #16
0
def _check_hdr_points_space(hdr, points_space):
    """ Check header `hdr` for consistency with transform `points_space`

    Parameters
    ----------
    hdr : ndarray
        trackvis header as structured ndarray
    points_space : {None, 'voxmm', 'voxel', 'rasmm'
        nature of transform that we will (elsewhere) apply to streamlines
        paired with `hdr`.  None or 'voxmm' means pass through with no futher
        checks.  'voxel' checks for all ``hdr['voxel_sizes'] being <= zero
        (error) or any being zero (warning).  'rasmm' checks for presence of
        non-zeros affine in ``hdr['vox_to_ras']``, and that the affine therein
        corresponds to ``hdr['voxel_order']`` and ''hdr['voxel_sizes']`` - and
        raises an error otherwise.

    Returns
    -------
    None

    Notes
    -----
    """
    if points_space is None or points_space == 'voxmm':
        return
    if points_space == 'voxel':
        voxel_size = hdr['voxel_size']
        if np.any(voxel_size < 0):
            raise HeaderError('Negative voxel sizes %s not valid for voxel - '
                              'voxmm conversion' % voxel_size)
        if np.all(voxel_size == 0):
            raise HeaderError('Cannot convert between voxels and voxmm when '
                              '"voxel_sizes" all 0')
        if np.any(voxel_size == 0):
            warnings.warn('zero values in "voxel_size" - %s' % voxel_size)
        return
    elif points_space == 'rasmm':
        try:
            affine = hdr['vox_to_ras']
        except ValueError:
            raise HeaderError('Need "vox_to_ras" field to get '
                              'affine with which to convert points; '
                              'this is present for headers >= version 2')
        if np.all(affine == 0) or affine[3, 3] == 0:
            raise HeaderError('Need non-zero affine to convert between '
                              'rasmm points and voxmm')
        zooms = hdr['voxel_size']
        aff_zooms = np.sqrt(np.sum(affine[:3, :3]**2, axis=0))
        if not np.allclose(aff_zooms, zooms):
            raise HeaderError('Affine zooms %s differ from voxel_size '
                              'field value %s' % (aff_zooms, zooms))
        aff_order = ''.join(aff2axcodes(affine))
        voxel_order = asstr(hdr['voxel_order'].item())
        if voxel_order == '':
            voxel_order = 'LPS'  # trackvis default
        if not voxel_order == aff_order:
            raise HeaderError('Affine implies voxel_order %s but '
                              'header voxel_order is %s' %
                              (aff_order, voxel_order))
    else:
        raise ValueError('Painfully confusing "points_space" value of "%s"' %
                         points_space)
コード例 #17
0
def aff_from_hdr(trk_hdr, atleast_v2=True):
    ''' Return voxel to mm affine from trackvis header

    Affine is mapping from voxel space to Nifti (RAS) output coordinate
    system convention; x: Left -> Right, y: Posterior -> Anterior, z:
    Inferior -> Superior.

    Parameters
    ----------
    trk_hdr : mapping
       Mapping with trackvis header keys ``version``. If ``version == 2``, we
       also expect ``vox_to_ras``.
    atleast_v2 : None or bool
        If None, currently defaults to False.  This will change to True in
        future versions.  If True, require that there is a valid 'vox_to_ras'
        affine, raise HeaderError otherwise.  If False, look for valid
        'vox_to_ras' affine, but fall back to best guess from version 1 fields
        otherwise.

    Returns
    -------
    aff : (4,4) array
       affine giving mapping from voxel coordinates (affine applied on
       the left to points on the right) to millimeter coordinates in the
       RAS coordinate system

    Notes
    -----
    Our initial idea was to try and work round the deficiencies of the version
    1 format by using the DICOM orientation fields to store the affine.  This
    proved difficult in practice because trackvis (the application) doesn't
    allow negative voxel sizes (needed for recording axis flips) and sets the
    origin field to 0. In future, we'll raise an error rather than try and
    estimate the affine from version 1 fields
    '''
    if trk_hdr['version'] == 2:
        aff = trk_hdr['vox_to_ras']
        if aff[3, 3] != 0:
            return aff
        if atleast_v2:
            raise HeaderError('Requiring version 2 affine and this affine is '
                              'not valid')
    # Now we are in the dark world of the DICOM fields.  We might have made
    # this one ourselves, in which case the origin might be set, and it might
    # have negative voxel sizes
    aff = np.eye(4)
    # The IOP field has only two of the three columns we need
    iop = trk_hdr['image_orientation_patient'].reshape(2, 3).T
    # R might be a rotation matrix (and so completed by the cross product of
    # the first two columns), or it might be an orthogonal matrix with negative
    # determinant. We try pure rotation first
    R = np.c_[iop, np.cross(*iop.T)]
    vox = trk_hdr['voxel_size']
    aff[:3, :3] = R * vox
    aff[:3, 3] = trk_hdr['origin']
    aff = np.dot(DPCS_TO_TAL, aff)
    # Next we check against the 'voxel_order' field if present and not empty.
    try:
        voxel_order = asstr(trk_hdr['voxel_order'].item())
    except (KeyError, ValueError):
        voxel_order = ''
    if voxel_order == '':
        return aff
    # If the voxel_order conflicts with the affine by one flip, this may have
    # been a negative determinant affine saved with positive voxel sizes
    exp_order = ''.join(aff2axcodes(aff))
    if voxel_order != exp_order:
        # If first pass doesn't match, try flipping the (estimated) third
        # column
        aff[:, 2] *= -1
        exp_order = ''.join(aff2axcodes(aff))
        if voxel_order != exp_order:
            raise HeaderError('Estimate of header affine does not match '
                              'voxel_order of %s' % exp_order)
    return aff