Exemple #1
0
def read(fileobj, as_generator=False, points_space=None):
    ''' Read trackvis file, return streamlines, header

    Parameters
    ----------
    fileobj : string or file-like object
       If string, a filename; otherwise an open file-like object
       pointing to trackvis file (and ready to read from the beginning
       of the trackvis header data)
    as_generator : bool, optional
       Whether to return tracks as sequence (False, default) or as a generator
       (True).
    points_space : {None, 'voxel', 'rasmm'}, optional
        The coordinates in which you want the points in the *output* streamlines
        expressed.  If None, then return the points exactly as they are stored
        in the trackvis file. The points will probably be in trackviz voxmm
        space - see Notes for ``write`` function.  If 'voxel', we convert the
        points to voxel space simply by dividing by the recorded voxel size.  If
        'rasmm' we'll convert the points to RAS mm space (real space). For
        'rasmm' we check if the affine is set and matches the voxel sizes and
        voxel order.

    Returns
    -------
    streamlines : sequence or generator
       Returns sequence if `as_generator` is False, generator if True.  Value is
       sequence or generator of 3 element sequences with elements:

       #. points : ndarray shape (N,3)
          where N is the number of points
       #. scalars : None or ndarray shape (N, M)
          where M is the number of scalars per point
       #. properties : None or ndarray shape (P,)
          where P is the number of properties

    hdr : structured array
       structured array with trackvis header fields

    Notes
    -----
    The endianness of the input data can be deduced from the endianness
    of the returned `hdr` or `streamlines`

    Points are in trackvis *voxel mm*.  Each track has N points, each with 3
    coordinates, ``x, y, z``, where ``x`` is the floating point voxel coordinate
    along the first image axis, multiplied by the voxel size for that axis.
    '''
    fileobj = allopen(fileobj, mode='rb')
    hdr_str = fileobj.read(header_2_dtype.itemsize)
    # try defaulting to version 2 format
    hdr = np.ndarray(shape=(),
                     dtype=header_2_dtype,
                     buffer=hdr_str)
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characters of id_string')
    if hdr['hdr_size'] == 1000:
        endianness = native_code
    else:
        hdr = hdr.newbyteorder()
        if hdr['hdr_size'] != 1000:
            raise HeaderError('Invalid hdr_size of %s'
                              % hdr['hdr_size'])
        endianness = swapped_code
    # Check version and adapt structure accordingly
    version = hdr['version']
    if version not in (1, 2):
        raise HeaderError('Reader only supports versions 1 and 2')
    if version == 1: # make a new header with the same data
        hdr = np.ndarray(shape=(),
                         dtype=header_1_dtype,
                         buffer=hdr_str)
        if endianness == swapped_code:
            hdr = hdr.newbyteorder()
    # Do points_space checks
    _check_hdr_points_space(hdr, points_space)
    # prepare transforms for later use
    if points_space == 'voxel':
        zooms = hdr['voxel_size'][None,:].astype('f4')
    elif points_space == 'rasmm':
        zooms = hdr['voxel_size']
        affine = hdr['vox_to_ras']
        tv2vx = np.diag((1. / zooms).tolist() + [1])
        tv2mm = np.dot(affine, tv2vx).astype('f4')
    n_s = hdr['n_scalars']
    n_p = hdr['n_properties']
    f4dt = np.dtype(endianness + 'f4')
    pt_cols = 3 + n_s
    pt_size = int(f4dt.itemsize * pt_cols)
    ps_size = int(f4dt.itemsize * n_p)
    i_fmt = endianness + 'i'
    stream_count = hdr['n_count']
    if stream_count < 0:
        raise HeaderError('Unexpected negative n_count')
    def track_gen():
        n_streams = 0
        # For case where there are no scalars or no properties
        scalars = None
        ps = None
        while True:
            n_str = fileobj.read(4)
            if len(n_str) < 4:
                if stream_count:
                    raise HeaderError(
                        'Expecting %s points, found only %s' % (
                                stream_count, n_streams))
                break
            n_pts = struct.unpack(i_fmt, n_str)[0]
            pts_str = fileobj.read(n_pts * pt_size)
            pts = np.ndarray(
                shape = (n_pts, pt_cols),
                dtype = f4dt,
                buffer = pts_str)
            if n_p:
                ps_str = fileobj.read(ps_size)
                ps = np.ndarray(
                    shape = (n_p,),
                    dtype = f4dt,
                    buffer = ps_str)
            xyz = pts[:,:3]
            if points_space == 'voxel':
                xyz = xyz / zooms
            elif points_space == 'rasmm':
                xyz = apply_affine(tv2mm, pts)
            if n_s:
                scalars = pts[:,3:]
            yield (xyz, scalars, ps)
            n_streams += 1
            # deliberately misses case where stream_count is 0
            if n_streams == stream_count:
                raise StopIteration
    streamlines = track_gen()
    if not as_generator:
        streamlines = list(streamlines)
    return streamlines, hdr
Exemple #2
0
 def get_fileobj(nibimage):
     fileobj = allopen(nibimage.get_filename())
     return fileobj
Exemple #3
0
def write(fileobj, streamlines,  hdr_mapping=None, endianness=None,
          points_space=None):
    ''' Write header and `streamlines` to trackvis file `fileobj`

    The parameters from the streamlines override conflicting parameters
    in the `hdr_mapping` information.  In particular, the number of
    streamlines, the number of scalars, and the number of properties are
    written according to `streamlines` rather than `hdr_mapping`.

    Parameters
    ----------
    fileobj : filename or file-like
       If filename, open file as 'wb', otherwise `fileobj` should be an
       open file-like object, with a ``write`` method.
    streamlines : iterable
       iterable returning 3 element sequences with elements:

       #. points : ndarray shape (N,3)
          where N is the number of points
       #. scalars : None or ndarray shape (N, M)
          where M is the number of scalars per point
       #. properties : None or ndarray shape (P,)
          where P is the number of properties

       If `streamlines` has a ``len`` (for example, it is a list or a tuple),
       then we can write the number of streamlines into the header.  Otherwise
       we write 0 for the number of streamlines (a valid trackvis header) and
       write streamlines into the file until the iterable is exhausted.
       M - the number of scalars - has to be the same for each streamline in
       `streamlines`.  Similarly for P. See `points_space` and Notes for more
       detail on the coordinate system for ``points`` above.
    hdr_mapping : None, ndarray or mapping, optional
       Information for filling header fields.  Can be something
       dict-like (implementing ``items``) or a structured numpy array
    endianness : {None, '<', '>'}, optional
       Endianness of file to be written.  '<' is little-endian, '>' is
       big-endian.  None (the default) is to use the endianness of the
       `streamlines` data.
    points_space : {None, 'voxel', 'rasmm'}, optional
        The coordinates in which the points in the input streamlines are
        expressed.  If None, then assume the points are as you want them
        (probably trackviz voxmm space - see Notes).  If 'voxel', the points are
        in voxel space, and we will transform them to trackviz voxmm space.  If
        'rasmm' the points are in RAS mm space (real space).  We transform them
        to trackvis voxmm space.  If 'voxel' or 'rasmm' we insist that the voxel
        sizes and ordering are set to non-default values.  If 'rasmm' we also
        check if the affine is set and matches the voxel sizes

    Returns
    -------
    None

    Examples
    --------
    >>> from StringIO import StringIO #23dt : BytesIO
    >>> file_obj = StringIO() #23dt : BytesIO
    >>> pts0 = np.random.uniform(size=(10,3))
    >>> pts1 = np.random.uniform(size=(10,3))
    >>> streamlines = ([(pts0, None, None), (pts1, None, None)])
    >>> write(file_obj, streamlines)
    >>> _ = file_obj.seek(0) # returns 0 in python 3
    >>> streams, hdr = read(file_obj)
    >>> len(streams)
    2

    If there are too many streamlines to fit in memory, you can pass an iterable
    thing instead of a list

    >>> file_obj = StringIO() #23dt : BytesIO
    >>> def gen():
    ...     yield (pts0, None, None)
    ...     yield (pts0, None, None)
    >>> write(file_obj, gen())
    >>> _ = file_obj.seek(0)
    >>> streams, hdr = read(file_obj)
    >>> len(streams)
    2

    Notes
    -----
    Trackvis (the application) expects the ``points`` in the streamlines be in
    what we call *trackviz voxmm* coordinates.  If we have a point (x, y, z) in
    voxmm coordinates, and ``voxel_size`` has the voxel sizes for each of the 3
    dimensions, then x, y, z refer to mm in voxel space. Thus if i, j, k is a
    point in voxel coordinates, then ``x = i * voxel_size[0]; y = j *
    voxel_size[1]; z = k * voxel_size[2]``.   The spatial direction of x, y and
    z are defined with the "voxel_order" field.  For example, if the original
    image had RAS voxel ordering then "voxel_order" would be "RAS".  RAS here
    refers to the spatial direction of the voxel axes: "R" means that moving
    along first voxel axis moves from left to right in space, "A" -> second axis
    goes from posterior to anterior, "S" -> inferior to superior.  If
    "voxel_order" is empty we assume "LPS".

    This information comes from some helpful replies on the trackviz forum about
    `interpreting point coordiantes
    <http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates>`_
    '''
    stream_iter = iter(streamlines)
    try:
        streams0 = stream_iter.next()
    except StopIteration: # empty sequence or iterable
        # write header without streams
        hdr = _hdr_from_mapping(None, hdr_mapping, endianness)
        fileobj = allopen(fileobj, mode='wb')
        fileobj.write(hdr.tostring())
        return
    if endianness is None:
        endianness = endian_codes[streams0[0].dtype.byteorder]
    # fill in a new header from mapping-like
    hdr = _hdr_from_mapping(None, hdr_mapping, endianness)
    # Try and get number of streams from streamlines.  If this is an iterable,
    # we don't have a len, so we write 0 for length.  The 0 is a valid trackvis
    # value with meaning - keep reading until you run out of data.
    try:
        n_streams = len(streamlines)
    except TypeError: # iterable; we don't know the number of streams
        n_streams = 0
    hdr['n_count'] = n_streams
    # Get number of scalars and properties
    pts, scalars, props = streams0
    # calculate number of scalars
    if not scalars is None:
        n_s = scalars.shape[1]
    else:
        n_s = 0
    hdr['n_scalars'] = n_s
    # calculate number of properties
    if not props is None:
        n_p = props.size
        hdr['n_properties'] = n_p
    else:
        n_p = 0
    # do points_space checks
    _check_hdr_points_space(hdr, points_space)
    # prepare transforms for later use
    if points_space == 'voxel':
        zooms = hdr['voxel_size'][None,:].astype('f4')
    elif points_space == 'rasmm':
        zooms = hdr['voxel_size']
        affine = hdr['vox_to_ras']
        vx2tv = np.diag(zooms.tolist() + [1])
        mm2vx = npl.inv(affine)
        mm2tv = np.dot(vx2tv, mm2vx).astype('f4')
    # write header
    fileobj = allopen(fileobj, mode='wb')
    fileobj.write(hdr.tostring())
    # track preliminaries
    f4dt = np.dtype(endianness + 'f4')
    i_fmt = endianness + 'i'
    # Add back the read first streamline to the sequence
    for pts, scalars, props in itertools.chain([streams0], stream_iter):
        n_pts, n_coords = pts.shape
        if n_coords != 3:
            raise ValueError('pts should have 3 columns')
        fileobj.write(struct.pack(i_fmt, n_pts))
        if points_space == 'voxel':
            pts = pts * zooms
        elif points_space == 'rasmm':
            pts = apply_affine(mm2tv, pts)
        # This call ensures that the data are 32-bit floats, and that
        # the endianness is OK.
        if pts.dtype != f4dt:
            pts = pts.astype(f4dt)
        if n_s == 0:
            if not (scalars is None or len(scalars) == 0):
                raise DataError('Expecting 0 scalars per point')
        else:
            if scalars.shape != (n_pts, n_s):
                raise DataError('Scalars should be shape (%s, %s)'
                                 % (n_pts, n_s))
            if scalars.dtype != f4dt:
                scalars = scalars.astype(f4dt)
            pts = np.c_[pts, scalars]
        fileobj.write(pts.tostring())
        if n_p == 0:
            if not (props is None or len(props) == 0):
                raise DataError('Expecting 0 properties per point')
        else:
            if props.size != n_p:
                raise DataError('Properties should be size %s' % n_p)
            if props.dtype != f4dt:
                props = props.astype(f4dt)
            fileobj.write(props.tostring())
Exemple #4
0
def read(fileobj, as_generator=False, points_space=None):
    ''' Read trackvis file, return streamlines, header

    Parameters
    ----------
    fileobj : string or file-like object
       If string, a filename; otherwise an open file-like object
       pointing to trackvis file (and ready to read from the beginning
       of the trackvis header data)
    as_generator : bool, optional
       Whether to return tracks as sequence (False, default) or as a generator
       (True).
    points_space : {None, 'voxel', 'rasmm'}, optional
        The coordinates in which you want the points in the *output* streamlines
        expressed.  If None, then return the points exactly as they are stored
        in the trackvis file. The points will probably be in trackviz voxmm
        space - see Notes for ``write`` function.  If 'voxel', we convert the
        points to voxel space simply by dividing by the recorded voxel size.  If
        'rasmm' we'll convert the points to RAS mm space (real space). For
        'rasmm' we check if the affine is set and matches the voxel sizes and
        voxel order.

    Returns
    -------
    streamlines : sequence or generator
       Returns sequence if `as_generator` is False, generator if True.  Value is
       sequence or generator of 3 element sequences with elements:

       #. points : ndarray shape (N,3)
          where N is the number of points
       #. scalars : None or ndarray shape (N, M)
          where M is the number of scalars per point
       #. properties : None or ndarray shape (P,)
          where P is the number of properties

    hdr : structured array
       structured array with trackvis header fields

    Notes
    -----
    The endianness of the input data can be deduced from the endianness
    of the returned `hdr` or `streamlines`

    Points are in trackvis *voxel mm*.  Each track has N points, each with 3
    coordinates, ``x, y, z``, where ``x`` is the floating point voxel coordinate
    along the first image axis, multiplied by the voxel size for that axis.
    '''
    fileobj = allopen(fileobj, mode='rb')
    hdr_str = fileobj.read(header_2_dtype.itemsize)
    # try defaulting to version 2 format
    hdr = np.ndarray(shape=(), dtype=header_2_dtype, buffer=hdr_str)
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characters of id_string')
    if hdr['hdr_size'] == 1000:
        endianness = native_code
    else:
        hdr = hdr.newbyteorder()
        if hdr['hdr_size'] != 1000:
            raise HeaderError('Invalid hdr_size of %s' % hdr['hdr_size'])
        endianness = swapped_code
    # Check version and adapt structure accordingly
    version = hdr['version']
    if version not in (1, 2):
        raise HeaderError('Reader only supports versions 1 and 2')
    if version == 1:  # make a new header with the same data
        hdr = np.ndarray(shape=(), dtype=header_1_dtype, buffer=hdr_str)
        if endianness == swapped_code:
            hdr = hdr.newbyteorder()
    # Do points_space checks
    _check_hdr_points_space(hdr, points_space)
    # prepare transforms for later use
    if points_space == 'voxel':
        zooms = hdr['voxel_size'][None, :].astype('f4')
    elif points_space == 'rasmm':
        zooms = hdr['voxel_size']
        affine = hdr['vox_to_ras']
        tv2vx = np.diag((1. / zooms).tolist() + [1])
        tv2mm = np.dot(affine, tv2vx).astype('f4')
    n_s = hdr['n_scalars']
    n_p = hdr['n_properties']
    f4dt = np.dtype(endianness + 'f4')
    pt_cols = 3 + n_s
    pt_size = int(f4dt.itemsize * pt_cols)
    ps_size = int(f4dt.itemsize * n_p)
    i_fmt = endianness + 'i'
    stream_count = hdr['n_count']
    if stream_count < 0:
        raise HeaderError('Unexpected negative n_count')

    def track_gen():
        n_streams = 0
        # For case where there are no scalars or no properties
        scalars = None
        ps = None
        while True:
            n_str = fileobj.read(4)
            if len(n_str) < 4:
                if stream_count:
                    raise HeaderError('Expecting %s points, found only %s' %
                                      (stream_count, n_streams))
                break
            n_pts = struct.unpack(i_fmt, n_str)[0]
            pts_str = fileobj.read(n_pts * pt_size)
            pts = np.ndarray(shape=(n_pts, pt_cols),
                             dtype=f4dt,
                             buffer=pts_str)
            if n_p:
                ps_str = fileobj.read(ps_size)
                ps = np.ndarray(shape=(n_p, ), dtype=f4dt, buffer=ps_str)
            xyz = pts[:, :3]
            if points_space == 'voxel':
                xyz = xyz / zooms
            elif points_space == 'rasmm':
                xyz = apply_affine(tv2mm, pts)
            if n_s:
                scalars = pts[:, 3:]
            yield (xyz, scalars, ps)
            n_streams += 1
            # deliberately misses case where stream_count is 0
            if n_streams == stream_count:
                raise StopIteration

    streamlines = track_gen()
    if not as_generator:
        streamlines = list(streamlines)
    return streamlines, hdr
Exemple #5
0
def write(fileobj,
          streamlines,
          hdr_mapping=None,
          endianness=None,
          points_space=None):
    ''' Write header and `streamlines` to trackvis file `fileobj`

    The parameters from the streamlines override conflicting parameters
    in the `hdr_mapping` information.  In particular, the number of
    streamlines, the number of scalars, and the number of properties are
    written according to `streamlines` rather than `hdr_mapping`.

    Parameters
    ----------
    fileobj : filename or file-like
       If filename, open file as 'wb', otherwise `fileobj` should be an
       open file-like object, with a ``write`` method.
    streamlines : iterable
       iterable returning 3 element sequences with elements:

       #. points : ndarray shape (N,3)
          where N is the number of points
       #. scalars : None or ndarray shape (N, M)
          where M is the number of scalars per point
       #. properties : None or ndarray shape (P,)
          where P is the number of properties

       If `streamlines` has a ``len`` (for example, it is a list or a tuple),
       then we can write the number of streamlines into the header.  Otherwise
       we write 0 for the number of streamlines (a valid trackvis header) and
       write streamlines into the file until the iterable is exhausted.
       M - the number of scalars - has to be the same for each streamline in
       `streamlines`.  Similarly for P. See `points_space` and Notes for more
       detail on the coordinate system for ``points`` above.
    hdr_mapping : None, ndarray or mapping, optional
       Information for filling header fields.  Can be something
       dict-like (implementing ``items``) or a structured numpy array
    endianness : {None, '<', '>'}, optional
       Endianness of file to be written.  '<' is little-endian, '>' is
       big-endian.  None (the default) is to use the endianness of the
       `streamlines` data.
    points_space : {None, 'voxel', 'rasmm'}, optional
        The coordinates in which the points in the input streamlines are
        expressed.  If None, then assume the points are as you want them
        (probably trackviz voxmm space - see Notes).  If 'voxel', the points are
        in voxel space, and we will transform them to trackviz voxmm space.  If
        'rasmm' the points are in RAS mm space (real space).  We transform them
        to trackvis voxmm space.  If 'voxel' or 'rasmm' we insist that the voxel
        sizes and ordering are set to non-default values.  If 'rasmm' we also
        check if the affine is set and matches the voxel sizes

    Returns
    -------
    None

    Examples
    --------
    >>> from StringIO import StringIO #23dt : BytesIO
    >>> file_obj = StringIO() #23dt : BytesIO
    >>> pts0 = np.random.uniform(size=(10,3))
    >>> pts1 = np.random.uniform(size=(10,3))
    >>> streamlines = ([(pts0, None, None), (pts1, None, None)])
    >>> write(file_obj, streamlines)
    >>> _ = file_obj.seek(0) # returns 0 in python 3
    >>> streams, hdr = read(file_obj)
    >>> len(streams)
    2

    If there are too many streamlines to fit in memory, you can pass an iterable
    thing instead of a list

    >>> file_obj = StringIO() #23dt : BytesIO
    >>> def gen():
    ...     yield (pts0, None, None)
    ...     yield (pts0, None, None)
    >>> write(file_obj, gen())
    >>> _ = file_obj.seek(0)
    >>> streams, hdr = read(file_obj)
    >>> len(streams)
    2

    Notes
    -----
    Trackvis (the application) expects the ``points`` in the streamlines be in
    what we call *trackviz voxmm* coordinates.  If we have a point (x, y, z) in
    voxmm coordinates, and ``voxel_size`` has the voxel sizes for each of the 3
    dimensions, then x, y, z refer to mm in voxel space. Thus if i, j, k is a
    point in voxel coordinates, then ``x = i * voxel_size[0]; y = j *
    voxel_size[1]; z = k * voxel_size[2]``.   The spatial direction of x, y and
    z are defined with the "voxel_order" field.  For example, if the original
    image had RAS voxel ordering then "voxel_order" would be "RAS".  RAS here
    refers to the spatial direction of the voxel axes: "R" means that moving
    along first voxel axis moves from left to right in space, "A" -> second axis
    goes from posterior to anterior, "S" -> inferior to superior.  If
    "voxel_order" is empty we assume "LPS".

    This information comes from some helpful replies on the trackviz forum about
    `interpreting point coordiantes
    <http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates>`_
    '''
    stream_iter = iter(streamlines)
    try:
        streams0 = stream_iter.next()
    except StopIteration:  # empty sequence or iterable
        # write header without streams
        hdr = _hdr_from_mapping(None, hdr_mapping, endianness)
        fileobj = allopen(fileobj, mode='wb')
        fileobj.write(hdr.tostring())
        return
    if endianness is None:
        endianness = endian_codes[streams0[0].dtype.byteorder]
    # fill in a new header from mapping-like
    hdr = _hdr_from_mapping(None, hdr_mapping, endianness)
    # Try and get number of streams from streamlines.  If this is an iterable,
    # we don't have a len, so we write 0 for length.  The 0 is a valid trackvis
    # value with meaning - keep reading until you run out of data.
    try:
        n_streams = len(streamlines)
    except TypeError:  # iterable; we don't know the number of streams
        n_streams = 0
    hdr['n_count'] = n_streams
    # Get number of scalars and properties
    pts, scalars, props = streams0
    # calculate number of scalars
    if not scalars is None:
        n_s = scalars.shape[1]
    else:
        n_s = 0
    hdr['n_scalars'] = n_s
    # calculate number of properties
    if not props is None:
        n_p = props.size
        hdr['n_properties'] = n_p
    else:
        n_p = 0
    # do points_space checks
    _check_hdr_points_space(hdr, points_space)
    # prepare transforms for later use
    if points_space == 'voxel':
        zooms = hdr['voxel_size'][None, :].astype('f4')
    elif points_space == 'rasmm':
        zooms = hdr['voxel_size']
        affine = hdr['vox_to_ras']
        vx2tv = np.diag(zooms.tolist() + [1])
        mm2vx = npl.inv(affine)
        mm2tv = np.dot(vx2tv, mm2vx).astype('f4')
    # write header
    fileobj = allopen(fileobj, mode='wb')
    fileobj.write(hdr.tostring())
    # track preliminaries
    f4dt = np.dtype(endianness + 'f4')
    i_fmt = endianness + 'i'
    # Add back the read first streamline to the sequence
    for pts, scalars, props in itertools.chain([streams0], stream_iter):
        n_pts, n_coords = pts.shape
        if n_coords != 3:
            raise ValueError('pts should have 3 columns')
        fileobj.write(struct.pack(i_fmt, n_pts))
        if points_space == 'voxel':
            pts = pts * zooms
        elif points_space == 'rasmm':
            pts = apply_affine(mm2tv, pts)
        # This call ensures that the data are 32-bit floats, and that
        # the endianness is OK.
        if pts.dtype != f4dt:
            pts = pts.astype(f4dt)
        if n_s == 0:
            if not (scalars is None or len(scalars) == 0):
                raise DataError('Expecting 0 scalars per point')
        else:
            if scalars.shape != (n_pts, n_s):
                raise DataError('Scalars should be shape (%s, %s)' %
                                (n_pts, n_s))
            if scalars.dtype != f4dt:
                scalars = scalars.astype(f4dt)
            pts = np.c_[pts, scalars]
        fileobj.write(pts.tostring())
        if n_p == 0:
            if not (props is None or len(props) == 0):
                raise DataError('Expecting 0 properties per point')
        else:
            if props.size != n_p:
                raise DataError('Properties should be size %s' % n_p)
            if props.dtype != f4dt:
                props = props.astype(f4dt)
            fileobj.write(props.tostring())
Exemple #6
0
 def _read_data(self):
     fileobj = allopen(self.file_like)
     data = self.header.data_from_fileobj(fileobj)
     if isinstance(self.file_like, basestring):  # filename
         fileobj.close()
     return data
Exemple #7
0
 def get_fileobj(nibimage):
     fileobj = allopen(nibimage.get_filename())
     return fileobj
Exemple #8
0
 def _read_data(self):
     fileobj = allopen(self.file_like)
     data = self.header.data_from_fileobj(fileobj)
     if isinstance(self.file_like, basestring):  # filename
         fileobj.close()
     return data