Ejemplo n.º 1
0
    def test_load_file_with_wrong_information(self):
        tck_file = open(DATA['simple_tck_fname'], 'rb').read()

        # Simulate a TCK file where `datatype` has not the right endianness.
        new_tck_file = tck_file.replace(asbytes("Float32LE"),
                                        asbytes("Float32BE"))

        with pytest.raises(DataError):
            TckFile.load(BytesIO(new_tck_file))

        # Simulate a TCK file with unsupported `datatype`.
        new_tck_file = tck_file.replace(asbytes("Float32LE"), asbytes("int32"))
        with pytest.raises(HeaderError):
            TckFile.load(BytesIO(new_tck_file))

        # Simulate a TCK file with no `datatype` field.
        new_tck_file = tck_file.replace(b"datatype: Float32LE\n", b"")
        # Need to adjust data offset.
        new_tck_file = new_tck_file.replace(b"file: . 67\n", b"file: . 47\n")
        with clear_and_catch_warnings(record=True, modules=[tck_module]) as w:
            tck = TckFile.load(BytesIO(new_tck_file))
            assert len(w) == 1
            assert issubclass(w[0].category, HeaderWarning)
            assert "Missing 'datatype'" in str(w[0].message)
            assert_array_equal(tck.header['datatype'], "Float32LE")

        # Simulate a TCK file with no `file` field.
        new_tck_file = tck_file.replace(b"\nfile: . 67", b"")
        with clear_and_catch_warnings(record=True, modules=[tck_module]) as w:
            tck = TckFile.load(BytesIO(new_tck_file))
            assert len(w) == 1
            assert issubclass(w[0].category, HeaderWarning)
            assert "Missing 'file'" in str(w[0].message)
            assert_array_equal(tck.header['file'], ". 56")

        # Simulate a TCK file with `file` field pointing to another file.
        new_tck_file = tck_file.replace(b"file: . 67\n",
                                        b"file: dummy.mat 75\n")
        with pytest.raises(HeaderError):
            TckFile.load(BytesIO(new_tck_file))

        # Simulate a TCK file which is missing a streamline delimiter.
        eos = TckFile.FIBER_DELIMITER.tostring()
        eof = TckFile.EOF_DELIMITER.tostring()
        new_tck_file = tck_file[:-(len(eos) + len(eof))] + tck_file[-len(eof):]

        # Force TCK loading to use buffering.
        buffer_size = 1. / 1024**2  # 1 bytes
        hdr = TckFile._read_header(BytesIO(new_tck_file))
        tck_reader = TckFile._read(BytesIO(new_tck_file), hdr, buffer_size)
        with pytest.raises(DataError):
            list(tck_reader)

        # Simulate a TCK file which is missing the end-of-file delimiter.
        new_tck_file = tck_file[:-len(eof)]
        with pytest.raises(DataError):
            TckFile.load(BytesIO(new_tck_file))
Ejemplo n.º 2
0
    def _write_header(fileobj, header):
        """ Write TCK header to file-like object.

        Parameters
        ----------
        fileobj : file-like object
            An open file-like object in binary mode pointing to TCK file (and
            ready to read from the beginning of the TCK header).
        """
        # Fields to exclude
        exclude = [
            Field.MAGIC_NUMBER,  # Handled separately.
            Field.NB_STREAMLINES,  # Handled separately.
            Field.ENDIANNESS,  # Handled separately.
            Field.VOXEL_TO_RASMM,  # Streamlines are always in RAS+ mm.
            "count",
            "datatype",
            "file"
        ]  # Fields being replaced.

        lines = []
        lines.append(asstr(header[Field.MAGIC_NUMBER]))
        lines.append("count: {0:010}".format(header[Field.NB_STREAMLINES]))
        lines.append("datatype: Float32LE")  # Always Float32LE.
        lines.extend([
            "{0}: {1}".format(k, v) for k, v in header.items()
            if k not in exclude and not k.startswith("_")
        ])
        lines.append("file: . ")  # Manually add this last field.
        out = "\n".join(lines)

        # Check the header is well formatted.
        if out.count("\n") > len(lines) - 1:  # \n only allowed between lines.
            msg = "Key-value pairs cannot contain '\\n':\n{}".format(out)
            raise HeaderError(msg)

        if out.count(":") > len(lines) - 1:
            # : only one per line (except the last one which contains END).
            msg = "Key-value pairs cannot contain ':':\n{}".format(out)
            raise HeaderError(msg)

        # Write header to file.
        fileobj.write(asbytes(out))

        hdr_len_no_offset = len(out) + 5
        # Need to add number of bytes to store offset as decimal string. We
        # start with estimate without string, then update if the
        # offset-as-decimal-string got longer after adding length of the
        # offset string.
        new_offset = -1
        old_offset = hdr_len_no_offset
        while new_offset != old_offset:
            old_offset = new_offset
            new_offset = hdr_len_no_offset + len(str(old_offset))

        fileobj.write(asbytes(str(new_offset) + "\n"))
        fileobj.write(asbytes("END\n"))
Ejemplo n.º 3
0
    def test_load_file_with_wrong_information(self):
        tck_file = open(DATA['simple_tck_fname'], 'rb').read()

        # Simulate a TCK file where `datatype` has not the right endianness.
        new_tck_file = tck_file.replace(asbytes("Float32LE"),
                                        asbytes("Float32BE"))
        assert_raises(DataError, TckFile.load, BytesIO(new_tck_file))

        # Simulate a TCK file with unsupported `datatype`.
        new_tck_file = tck_file.replace(asbytes("Float32LE"),
                                        asbytes("int32"))
        assert_raises(HeaderError, TckFile.load, BytesIO(new_tck_file))

        # Simulate a TCK file with no `datatype` field.
        new_tck_file = tck_file.replace(b"datatype: Float32LE\n", b"")
        # Need to adjust data offset.
        new_tck_file = new_tck_file.replace(b"file: . 67\n", b"file: . 47\n")
        with clear_and_catch_warnings(record=True, modules=[tck_module]) as w:
            tck = TckFile.load(BytesIO(new_tck_file))
            assert_equal(len(w), 1)
            assert_true(issubclass(w[0].category, HeaderWarning))
            assert_true("Missing 'datatype'" in str(w[0].message))
            assert_array_equal(tck.header['datatype'], "Float32LE")

        # Simulate a TCK file with no `file` field.
        new_tck_file = tck_file.replace(b"\nfile: . 67", b"")
        with clear_and_catch_warnings(record=True, modules=[tck_module]) as w:
            tck = TckFile.load(BytesIO(new_tck_file))
            assert_equal(len(w), 1)
            assert_true(issubclass(w[0].category, HeaderWarning))
            assert_true("Missing 'file'" in str(w[0].message))
            assert_array_equal(tck.header['file'], ". 56")

        # Simulate a TCK file with `file` field pointing to another file.
        new_tck_file = tck_file.replace(b"file: . 67\n",
                                        b"file: dummy.mat 75\n")
        assert_raises(HeaderError, TckFile.load, BytesIO(new_tck_file))

        # Simulate a TCK file which is missing a streamline delimiter.
        eos = TckFile.FIBER_DELIMITER.tostring()
        eof = TckFile.EOF_DELIMITER.tostring()
        new_tck_file = tck_file[:-(len(eos) + len(eof))] + tck_file[-len(eof):]

        # Force TCK loading to use buffering.
        buffer_size = 1. / 1024**2  # 1 bytes
        hdr = TckFile._read_header(BytesIO(new_tck_file))
        tck_reader = TckFile._read(BytesIO(new_tck_file), hdr, buffer_size)
        assert_raises(DataError, list, tck_reader)

        # Simulate a TCK file which is missing the end-of-file delimiter.
        new_tck_file = tck_file[:-len(eof)]
        assert_raises(DataError, TckFile.load, BytesIO(new_tck_file))
Ejemplo n.º 4
0
    def _write_header(fileobj, header):
        """ Write TCK header to file-like object.

        Parameters
        ----------
        fileobj : file-like object
            An open file-like object in binary mode pointing to TCK file (and
            ready to read from the beginning of the TCK header).
        """
        # Fields to exclude
        exclude = [Field.MAGIC_NUMBER,  # Handled separately.
                   Field.NB_STREAMLINES,  # Handled separately.
                   Field.ENDIANNESS,  # Handled separately.
                   Field.VOXEL_TO_RASMM,  # Streamlines are always in RAS+ mm.
                   "count", "datatype", "file"]  # Fields being replaced.

        lines = []
        lines.append(asstr(header[Field.MAGIC_NUMBER]))
        lines.append("count: {0:010}".format(header[Field.NB_STREAMLINES]))
        lines.append("datatype: Float32LE")  # Always Float32LE.
        lines.extend(["{0}: {1}".format(k, v)
                      for k, v in header.items()
                      if k not in exclude and not k.startswith("_")])
        lines.append("file: . ")  # Manually add this last field.
        out = "\n".join(lines)

        # Check the header is well formatted.
        if out.count("\n") > len(lines) - 1:  # \n only allowed between lines.
            msg = "Key-value pairs cannot contain '\\n':\n{}".format(out)
            raise HeaderError(msg)

        if out.count(":") > len(lines) - 1:
            # : only one per line (except the last one which contains END).
            msg = "Key-value pairs cannot contain ':':\n{}".format(out)
            raise HeaderError(msg)

        # Write header to file.
        fileobj.write(asbytes(out))

        hdr_len_no_offset = len(out) + 5
        # Need to add number of bytes to store offset as decimal string. We
        # start with estimate without string, then update if the
        # offset-as-decimal-string got longer after adding length of the
        # offset string.
        new_offset = -1
        old_offset = hdr_len_no_offset
        while new_offset != old_offset:
            old_offset = new_offset
            new_offset = hdr_len_no_offset + len(str(old_offset))

        fileobj.write(asbytes(str(new_offset) + "\n"))
        fileobj.write(asbytes("END\n"))
Ejemplo n.º 5
0
def test_is_supported_detect_format():
    # Test is_supported and detect_format functions
    # Empty file/string
    f = BytesIO()
    assert_false(nib.streamlines.is_supported(f))
    assert_false(nib.streamlines.is_supported(""))
    assert_true(nib.streamlines.detect_format(f) is None)
    assert_true(nib.streamlines.detect_format("") is None)

    # Valid file without extension
    for tfile_cls in FORMATS.values():
        f = BytesIO()
        f.write(asbytes(tfile_cls.MAGIC_NUMBER))
        f.seek(0, os.SEEK_SET)
        assert_true(nib.streamlines.is_supported(f))
        assert_true(nib.streamlines.detect_format(f) is tfile_cls)

    # Wrong extension but right magic number
    for tfile_cls in FORMATS.values():
        with tempfile.TemporaryFile(mode="w+b", suffix=".txt") as f:
            f.write(asbytes(tfile_cls.MAGIC_NUMBER))
            f.seek(0, os.SEEK_SET)
            assert_true(nib.streamlines.is_supported(f))
            assert_true(nib.streamlines.detect_format(f) is tfile_cls)

    # Good extension but wrong magic number
    for ext, tfile_cls in FORMATS.items():
        with tempfile.TemporaryFile(mode="w+b", suffix=ext) as f:
            f.write(b"pass")
            f.seek(0, os.SEEK_SET)
            assert_false(nib.streamlines.is_supported(f))
            assert_true(nib.streamlines.detect_format(f) is None)

    # Wrong extension, string only
    f = "my_tractogram.asd"
    assert_false(nib.streamlines.is_supported(f))
    assert_true(nib.streamlines.detect_format(f) is None)

    # Good extension, string only
    for ext, tfile_cls in FORMATS.items():
        f = "my_tractogram" + ext
        assert_true(nib.streamlines.is_supported(f))
        assert_equal(nib.streamlines.detect_format(f), tfile_cls)

    # Extension should not be case-sensitive.
    for ext, tfile_cls in FORMATS.items():
        f = "my_tractogram" + ext.upper()
        assert_true(nib.streamlines.detect_format(f) is tfile_cls)
Ejemplo n.º 6
0
def test_is_supported_detect_format():
    # Test is_supported and detect_format functions
    # Empty file/string
    f = BytesIO()
    assert_false(nib.streamlines.is_supported(f))
    assert_false(nib.streamlines.is_supported(""))
    assert_true(nib.streamlines.detect_format(f) is None)
    assert_true(nib.streamlines.detect_format("") is None)

    # Valid file without extension
    for tfile_cls in FORMATS.values():
        f = BytesIO()
        f.write(asbytes(tfile_cls.MAGIC_NUMBER))
        f.seek(0, os.SEEK_SET)
        assert_true(nib.streamlines.is_supported(f))
        assert_true(nib.streamlines.detect_format(f) is tfile_cls)

    # Wrong extension but right magic number
    for tfile_cls in FORMATS.values():
        with tempfile.TemporaryFile(mode="w+b", suffix=".txt") as f:
            f.write(asbytes(tfile_cls.MAGIC_NUMBER))
            f.seek(0, os.SEEK_SET)
            assert_true(nib.streamlines.is_supported(f))
            assert_true(nib.streamlines.detect_format(f) is tfile_cls)

    # Good extension but wrong magic number
    for ext, tfile_cls in FORMATS.items():
        with tempfile.TemporaryFile(mode="w+b", suffix=ext) as f:
            f.write(b"pass")
            f.seek(0, os.SEEK_SET)
            assert_false(nib.streamlines.is_supported(f))
            assert_true(nib.streamlines.detect_format(f) is None)

    # Wrong extension, string only
    f = "my_tractogram.asd"
    assert_false(nib.streamlines.is_supported(f))
    assert_true(nib.streamlines.detect_format(f) is None)

    # Good extension, string only
    for ext, tfile_cls in FORMATS.items():
        f = "my_tractogram" + ext
        assert_true(nib.streamlines.is_supported(f))
        assert_equal(nib.streamlines.detect_format(f), tfile_cls)

    # Extension should not be case-sensitive.
    for ext, tfile_cls in FORMATS.items():
        f = "my_tractogram" + ext.upper()
        assert_true(nib.streamlines.detect_format(f) is tfile_cls)
Ejemplo n.º 7
0
def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code):
    ''' Fill `hdr` from mapping `mapping`, with given endianness '''
    if hdr is None:
        # passed a valid mapping as header?  Copy and return
        if isinstance(mapping, np.ndarray):
            test_dtype = mapping.dtype.newbyteorder('=')
            if test_dtype in (header_1_dtype, header_2_dtype):
                return mapping.copy()
        # otherwise make a new empty header.   If no version specified,
        # go for default (2)
        if mapping is None:
            version = 2
        else:
            version =  mapping.get('version', 2)
        hdr = empty_header(endianness, version)
    if mapping is None:
        return hdr
    if isinstance(mapping, np.ndarray):
        mapping = rec2dict(mapping)
    for key, value in mapping.items():
        hdr[key] = value
    # check header values
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characaters of id_string')
    if hdr['version'] not in (1, 2):
        raise HeaderError('Reader only supports version 1')
    if hdr['hdr_size'] != 1000:
        raise HeaderError('hdr_size should be 1000')
    return hdr
Ejemplo n.º 8
0
def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code):
    ''' Fill `hdr` from mapping `mapping`, with given endianness '''
    if hdr is None:
        # passed a valid mapping as header?  Copy and return
        if isinstance(mapping, np.ndarray):
            test_dtype = mapping.dtype.newbyteorder('=')
            if test_dtype in (header_1_dtype, header_2_dtype):
                return mapping.copy()
        # otherwise make a new empty header.   If no version specified,
        # go for default (2)
        if mapping is None:
            version = 2
        else:
            version = mapping.get('version', 2)
        hdr = empty_header(endianness, version)
    if mapping is None:
        return hdr
    if isinstance(mapping, np.ndarray):
        mapping = rec2dict(mapping)
    for key, value in mapping.items():
        hdr[key] = value
    # check header values
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characaters of id_string')
    if hdr['version'] not in (1, 2):
        raise HeaderError('Reader only supports version 1')
    if hdr['hdr_size'] != 1000:
        raise HeaderError('hdr_size should be 1000')
    return hdr
Ejemplo n.º 9
0
def test_formula_inputs():
    # Check we can send in fields of type 'S', 'U', 'O' for factor levels
    level_names = ['red', 'green', 'blue']
    for field_type in ('S', 'U', 'O'):
        levels = np.array(level_names, dtype=field_type)
        f = F.Factor('myname', levels)
        assert_equal(f.levels, level_names)
    # Sending in byte objects
    levels = [asbytes(L) for L in level_names]
    f = F.Factor('myname', levels)
    assert_equal(f.levels, level_names)
Ejemplo n.º 10
0
def test_formula_inputs():
    # Check we can send in fields of type 'S', 'U', 'O' for factor levels
    level_names = ['red', 'green', 'blue']
    for field_type in ('S', 'U', 'O'):
        levels = np.array(level_names, dtype=field_type)
        f = F.Factor('myname', levels)
        assert_equal(f.levels, level_names)
    # Sending in byte objects
    levels = [asbytes(L) for L in level_names]
    f = F.Factor('myname', levels)
    assert_equal(f.levels, level_names)
Ejemplo n.º 11
0
    def record_events_to_file(self, filename="record.log"):
        """ Records events during the interaction.

        The recording is represented as a list of VTK events
        that happened during the interaction. The recording is
        going to be saved into `filename`.

        Parameters
        ----------
        filename : str
            Name of the file that will contain the recording (.log|.log.gz).
        """
        events = self.record_events()

        # Compress file if needed
        if filename.endswith(".gz"):
            gzip.open(filename, 'wb').write(asbytes(events))
        else:
            open(filename, 'w').write(events)
Ejemplo n.º 12
0
    def record_events_to_file(self, filename="record.log"):
        """ Records events during the interaction.

        The recording is represented as a list of VTK events
        that happened during the interaction. The recording is
        going to be saved into `filename`.

        Parameters
        ----------
        filename : str
            Name of the file that will contain the recording (.log|.log.gz).
        """
        events = self.record_events()

        # Compress file if needed
        if filename.endswith(".gz"):
            gzip.open(filename, 'wb').write(asbytes(events))
        else:
            open(filename, 'w').write(events)
Ejemplo n.º 13
0
def read(fileobj, as_generator=False, points_space=None):
    ''' Read trackvis file, return streamlines, header

    Parameters
    ----------
    fileobj : string or file-like object
       If string, a filename; otherwise an open file-like object
       pointing to trackvis file (and ready to read from the beginning
       of the trackvis header data)
    as_generator : bool, optional
       Whether to return tracks as sequence (False, default) or as a generator
       (True).
    points_space : {None, 'voxel', 'rasmm'}, optional
        The coordinates in which you want the points in the *output* streamlines
        expressed.  If None, then return the points exactly as they are stored
        in the trackvis file. The points will probably be in trackviz voxmm
        space - see Notes for ``write`` function.  If 'voxel', we convert the
        points to voxel space simply by dividing by the recorded voxel size.  If
        'rasmm' we'll convert the points to RAS mm space (real space). For
        'rasmm' we check if the affine is set and matches the voxel sizes and
        voxel order.

    Returns
    -------
    streamlines : sequence or generator
       Returns sequence if `as_generator` is False, generator if True.  Value is
       sequence or generator of 3 element sequences with elements:

       #. points : ndarray shape (N,3)
          where N is the number of points
       #. scalars : None or ndarray shape (N, M)
          where M is the number of scalars per point
       #. properties : None or ndarray shape (P,)
          where P is the number of properties

    hdr : structured array
       structured array with trackvis header fields

    Notes
    -----
    The endianness of the input data can be deduced from the endianness
    of the returned `hdr` or `streamlines`

    Points are in trackvis *voxel mm*.  Each track has N points, each with 3
    coordinates, ``x, y, z``, where ``x`` is the floating point voxel coordinate
    along the first image axis, multiplied by the voxel size for that axis.
    '''
    fileobj = allopen(fileobj, mode='rb')
    hdr_str = fileobj.read(header_2_dtype.itemsize)
    # try defaulting to version 2 format
    hdr = np.ndarray(shape=(), dtype=header_2_dtype, buffer=hdr_str)
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characters of id_string')
    if hdr['hdr_size'] == 1000:
        endianness = native_code
    else:
        hdr = hdr.newbyteorder()
        if hdr['hdr_size'] != 1000:
            raise HeaderError('Invalid hdr_size of %s' % hdr['hdr_size'])
        endianness = swapped_code
    # Check version and adapt structure accordingly
    version = hdr['version']
    if version not in (1, 2):
        raise HeaderError('Reader only supports versions 1 and 2')
    if version == 1:  # make a new header with the same data
        hdr = np.ndarray(shape=(), dtype=header_1_dtype, buffer=hdr_str)
        if endianness == swapped_code:
            hdr = hdr.newbyteorder()
    # Do points_space checks
    _check_hdr_points_space(hdr, points_space)
    # prepare transforms for later use
    if points_space == 'voxel':
        zooms = hdr['voxel_size'][None, :].astype('f4')
    elif points_space == 'rasmm':
        zooms = hdr['voxel_size']
        affine = hdr['vox_to_ras']
        tv2vx = np.diag((1. / zooms).tolist() + [1])
        tv2mm = np.dot(affine, tv2vx).astype('f4')
    n_s = hdr['n_scalars']
    n_p = hdr['n_properties']
    f4dt = np.dtype(endianness + 'f4')
    pt_cols = 3 + n_s
    pt_size = int(f4dt.itemsize * pt_cols)
    ps_size = int(f4dt.itemsize * n_p)
    i_fmt = endianness + 'i'
    stream_count = hdr['n_count']
    if stream_count < 0:
        raise HeaderError('Unexpected negative n_count')

    def track_gen():
        n_streams = 0
        # For case where there are no scalars or no properties
        scalars = None
        ps = None
        while True:
            n_str = fileobj.read(4)
            if len(n_str) < 4:
                if stream_count:
                    raise HeaderError('Expecting %s points, found only %s' %
                                      (stream_count, n_streams))
                break
            n_pts = struct.unpack(i_fmt, n_str)[0]
            pts_str = fileobj.read(n_pts * pt_size)
            pts = np.ndarray(shape=(n_pts, pt_cols),
                             dtype=f4dt,
                             buffer=pts_str)
            if n_p:
                ps_str = fileobj.read(ps_size)
                ps = np.ndarray(shape=(n_p, ), dtype=f4dt, buffer=ps_str)
            xyz = pts[:, :3]
            if points_space == 'voxel':
                xyz = xyz / zooms
            elif points_space == 'rasmm':
                xyz = apply_affine(tv2mm, pts)
            if n_s:
                scalars = pts[:, 3:]
            yield (xyz, scalars, ps)
            n_streams += 1
            # deliberately misses case where stream_count is 0
            if n_streams == stream_count:
                raise StopIteration

    streamlines = track_gen()
    if not as_generator:
        streamlines = list(streamlines)
    return streamlines, hdr
Ejemplo n.º 14
0
    def save(self, fileobj):
        """ Save tractogram to a filename or file-like object using TCK format.

        Parameters
        ----------
        fileobj : string or file-like object
            If string, a filename; otherwise an open file-like object in
            binary mode pointing to TCK file (and ready to write from the
            beginning of the TCK header data).
        """
        # Enforce float32 in little-endian byte order for data.
        dtype = np.dtype('<f4')
        header = self.create_empty_header()

        # Override hdr's fields by those contained in `header`.
        header.update(self.header)

        # Keep counts for correcting incoherent fields or warn.
        nb_streamlines = 0

        with Opener(fileobj, mode="wb") as f:
            # Keep track of the beginning of the header.
            beginning = f.tell()

            # Write temporary header that we will update at the end
            self._write_header(f, header)

            # Make sure streamlines are in rasmm.
            tractogram = self.tractogram.to_world(lazy=True)
            # Assume looping over the streamlines can be done only once.
            tractogram = iter(tractogram)

            try:
                # Use the first element to check
                #  1) the tractogram is not empty;
                #  2) quantity of information saved along each streamline.
                first_item, tractogram = peek_next(tractogram)
            except StopIteration:
                # Empty tractogram
                header[Field.NB_STREAMLINES] = 0
                self._finalize_header(f, header, offset=beginning)

                # Add the EOF_DELIMITER.
                f.write(asbytes(self.EOF_DELIMITER.tostring()))
                return

            data_for_streamline = first_item.data_for_streamline
            if len(data_for_streamline) > 0:
                keys = ", ".join(data_for_streamline.keys())
                msg = ("TCK format does not support saving additional data"
                       " alongside streamlines. Dropping: {}".format(keys))
                warnings.warn(msg, DataWarning)

            data_for_points = first_item.data_for_points
            if len(data_for_points) > 0:
                keys = ", ".join(data_for_points.keys())
                msg = ("TCK format does not support saving additional data"
                       " alongside points. Dropping: {}".format(keys))
                warnings.warn(msg, DataWarning)

            for t in tractogram:
                data = np.r_[t.streamline, self.FIBER_DELIMITER]
                f.write(data.astype(dtype).tostring())
                nb_streamlines += 1

            header[Field.NB_STREAMLINES] = nb_streamlines

            # Add the EOF_DELIMITER.
            f.write(asbytes(self.EOF_DELIMITER.tostring()))
            self._finalize_header(f, header, offset=beginning)
Ejemplo n.º 15
0
def test_to_str():
    # Test routine to convert to string
    assert_equal("1", to_str(1))
    assert_equal("1.0", to_str(1.0))
    assert_equal("from", to_str(asstr("from")))
    assert_equal("from", to_str(asbytes("from")))
Ejemplo n.º 16
0
    def save(self, fileobj):
        """ Save tractogram to a filename or file-like object using TCK format.

        Parameters
        ----------
        fileobj : string or file-like object
            If string, a filename; otherwise an open file-like object in
            binary mode pointing to TCK file (and ready to write from the
            beginning of the TCK header data).
        """
        # Enforce float32 in little-endian byte order for data.
        dtype = np.dtype('<f4')
        header = self.create_empty_header()

        # Override hdr's fields by those contained in `header`.
        header.update(self.header)

        # Keep counts for correcting incoherent fields or warn.
        nb_streamlines = 0

        with Opener(fileobj, mode="wb") as f:
            # Keep track of the beginning of the header.
            beginning = f.tell()

            # Write temporary header that we will update at the end
            self._write_header(f, header)

            # Make sure streamlines are in rasmm.
            tractogram = self.tractogram.to_world(lazy=True)
            # Assume looping over the streamlines can be done only once.
            tractogram = iter(tractogram)

            try:
                # Use the first element to check
                #  1) the tractogram is not empty;
                #  2) quantity of information saved along each streamline.
                first_item, tractogram = peek_next(tractogram)
            except StopIteration:
                # Empty tractogram
                header[Field.NB_STREAMLINES] = 0
                self._finalize_header(f, header, offset=beginning)

                # Add the EOF_DELIMITER.
                f.write(asbytes(self.EOF_DELIMITER.tostring()))
                return

            data_for_streamline = first_item.data_for_streamline
            if len(data_for_streamline) > 0:
                keys = ", ".join(data_for_streamline.keys())
                msg = ("TCK format does not support saving additional data"
                       " alongside streamlines. Dropping: {}".format(keys))
                warnings.warn(msg, DataWarning)

            data_for_points = first_item.data_for_points
            if len(data_for_points) > 0:
                keys = ", ".join(data_for_points.keys())
                msg = ("TCK format does not support saving additional data"
                       " alongside points. Dropping: {}".format(keys))
                warnings.warn(msg, DataWarning)

            for t in tractogram:
                data = np.r_[t.streamline, self.FIBER_DELIMITER]
                f.write(data.astype(dtype).tostring())
                nb_streamlines += 1

            header[Field.NB_STREAMLINES] = nb_streamlines

            # Add the EOF_DELIMITER.
            f.write(asbytes(self.EOF_DELIMITER.tostring()))
            self._finalize_header(f, header, offset=beginning)
Ejemplo n.º 17
0
def test_to_str():
    # Test routine to convert to string
    assert_equal('1', to_str(1))
    assert_equal('1.0', to_str(1.0))
    assert_equal('from', to_str(asstr('from')))
    assert_equal('from', to_str(asbytes('from')))
Ejemplo n.º 18
0
def read(fileobj, as_generator=False, points_space=None):
    ''' Read trackvis file, return streamlines, header

    Parameters
    ----------
    fileobj : string or file-like object
       If string, a filename; otherwise an open file-like object
       pointing to trackvis file (and ready to read from the beginning
       of the trackvis header data)
    as_generator : bool, optional
       Whether to return tracks as sequence (False, default) or as a generator
       (True).
    points_space : {None, 'voxel', 'rasmm'}, optional
        The coordinates in which you want the points in the *output* streamlines
        expressed.  If None, then return the points exactly as they are stored
        in the trackvis file. The points will probably be in trackviz voxmm
        space - see Notes for ``write`` function.  If 'voxel', we convert the
        points to voxel space simply by dividing by the recorded voxel size.  If
        'rasmm' we'll convert the points to RAS mm space (real space). For
        'rasmm' we check if the affine is set and matches the voxel sizes and
        voxel order.

    Returns
    -------
    streamlines : sequence or generator
       Returns sequence if `as_generator` is False, generator if True.  Value is
       sequence or generator of 3 element sequences with elements:

       #. points : ndarray shape (N,3)
          where N is the number of points
       #. scalars : None or ndarray shape (N, M)
          where M is the number of scalars per point
       #. properties : None or ndarray shape (P,)
          where P is the number of properties

    hdr : structured array
       structured array with trackvis header fields

    Notes
    -----
    The endianness of the input data can be deduced from the endianness
    of the returned `hdr` or `streamlines`

    Points are in trackvis *voxel mm*.  Each track has N points, each with 3
    coordinates, ``x, y, z``, where ``x`` is the floating point voxel coordinate
    along the first image axis, multiplied by the voxel size for that axis.
    '''
    fileobj = allopen(fileobj, mode='rb')
    hdr_str = fileobj.read(header_2_dtype.itemsize)
    # try defaulting to version 2 format
    hdr = np.ndarray(shape=(),
                     dtype=header_2_dtype,
                     buffer=hdr_str)
    if np.asscalar(hdr['id_string'])[:5] != asbytes('TRACK'):
        raise HeaderError('Expecting TRACK as first '
                          '5 characters of id_string')
    if hdr['hdr_size'] == 1000:
        endianness = native_code
    else:
        hdr = hdr.newbyteorder()
        if hdr['hdr_size'] != 1000:
            raise HeaderError('Invalid hdr_size of %s'
                              % hdr['hdr_size'])
        endianness = swapped_code
    # Check version and adapt structure accordingly
    version = hdr['version']
    if version not in (1, 2):
        raise HeaderError('Reader only supports versions 1 and 2')
    if version == 1: # make a new header with the same data
        hdr = np.ndarray(shape=(),
                         dtype=header_1_dtype,
                         buffer=hdr_str)
        if endianness == swapped_code:
            hdr = hdr.newbyteorder()
    # Do points_space checks
    _check_hdr_points_space(hdr, points_space)
    # prepare transforms for later use
    if points_space == 'voxel':
        zooms = hdr['voxel_size'][None,:].astype('f4')
    elif points_space == 'rasmm':
        zooms = hdr['voxel_size']
        affine = hdr['vox_to_ras']
        tv2vx = np.diag((1. / zooms).tolist() + [1])
        tv2mm = np.dot(affine, tv2vx).astype('f4')
    n_s = hdr['n_scalars']
    n_p = hdr['n_properties']
    f4dt = np.dtype(endianness + 'f4')
    pt_cols = 3 + n_s
    pt_size = int(f4dt.itemsize * pt_cols)
    ps_size = int(f4dt.itemsize * n_p)
    i_fmt = endianness + 'i'
    stream_count = hdr['n_count']
    if stream_count < 0:
        raise HeaderError('Unexpected negative n_count')
    def track_gen():
        n_streams = 0
        # For case where there are no scalars or no properties
        scalars = None
        ps = None
        while True:
            n_str = fileobj.read(4)
            if len(n_str) < 4:
                if stream_count:
                    raise HeaderError(
                        'Expecting %s points, found only %s' % (
                                stream_count, n_streams))
                break
            n_pts = struct.unpack(i_fmt, n_str)[0]
            pts_str = fileobj.read(n_pts * pt_size)
            pts = np.ndarray(
                shape = (n_pts, pt_cols),
                dtype = f4dt,
                buffer = pts_str)
            if n_p:
                ps_str = fileobj.read(ps_size)
                ps = np.ndarray(
                    shape = (n_p,),
                    dtype = f4dt,
                    buffer = ps_str)
            xyz = pts[:,:3]
            if points_space == 'voxel':
                xyz = xyz / zooms
            elif points_space == 'rasmm':
                xyz = apply_affine(tv2mm, pts)
            if n_s:
                scalars = pts[:,3:]
            yield (xyz, scalars, ps)
            n_streams += 1
            # deliberately misses case where stream_count is 0
            if n_streams == stream_count:
                raise StopIteration
    streamlines = track_gen()
    if not as_generator:
        streamlines = list(streamlines)
    return streamlines, hdr
Ejemplo n.º 19
0
def test_to_str():
    # Test routine to convert to string
    assert_equal('1', to_str(1))
    assert_equal('1.0', to_str(1.0))
    assert_equal('from', to_str(asstr('from')))
    assert_equal('from', to_str(asbytes('from')))