Пример #1
0
    def _read_channel_data(self, channel, offset=0, length=None):
        if offset < 0:
            raise ValueError("offset must be non-negative")
        if length is not None and length < 0:
            raise ValueError("length must be non-negative")
        reader = self._get_reader()

        with Timer(log, "Allocate space for channel"):
            # Allocate space for data
            if length is None:
                num_values = len(channel) - offset
            else:
                num_values = min(length, len(channel) - offset)
            num_values = max(0, num_values)
            channel_data = get_data_receiver(channel, num_values,
                                             self._memmap_dir)

        with Timer(log, "Read data for channel"):
            # Now actually read all the data
            for chunk in reader.read_raw_data_for_channel(
                    channel.path, offset, length):
                if chunk.data is not None:
                    channel_data.append_data(chunk.data)
                if chunk.scaler_data is not None:
                    for scaler_id, scaler_data in chunk.scaler_data.items():
                        channel_data.append_scaler_data(scaler_id, scaler_data)

        return channel_data
Пример #2
0
    def _read_segments(self, f):
        with Timer(log, "Read metadata"):
            # Read metadata first to work out how much space we need
            previous_segment = None
            while True:
                try:
                    segment = _TdmsSegment(f, self)
                except EOFError:
                    # We've finished reading the file
                    break
                segment.read_metadata(f, self.objects, previous_segment)

                self.segments.append(segment)
                previous_segment = segment
                if segment.next_segment_pos is None:
                    break
                else:
                    f.seek(segment.next_segment_pos)

        with Timer(log, "Allocate space"):
            # Allocate space for data
            for object in self.objects.values():
                object._initialise_data(memmap_dir=self.memmap_dir)

        with Timer(log, "Read data"):
            # Now actually read all the data
            for segment in self.segments:
                segment.read_raw_data(f)
Пример #3
0
    def _read_data(self, tdms_reader):
        with Timer(log, "Allocate space"):
            # Allocate space for data
            for group in self.groups():
                for channel in group.channels():
                    self._channel_data[channel.path] = get_data_receiver(
                        channel, len(channel), self._memmap_dir)

        with Timer(log, "Read data"):
            # Now actually read all the data
            for chunk in tdms_reader.read_raw_data():
                for (path, data) in chunk.channel_data.items():
                    channel_data = self._channel_data[path]
                    if data.data is not None:
                        channel_data.append_data(data.data)
                    elif data.scaler_data is not None:
                        for scaler_id, scaler_data in data.scaler_data.items():
                            channel_data.append_scaler_data(
                                scaler_id, scaler_data)

            for group in self.groups():
                for channel in group.channels():
                    channel_data = self._channel_data[channel.path]
                    if channel_data is not None:
                        channel._set_raw_data(channel_data)

        self.data_read = True
Пример #4
0
    def read_channel_chunk_for_index(self, channel_path, index):
        """ Read the chunk containing the given index

        :returns: Tuple of raw channel data chunk and the integer offset to the beginning of the chunk
        :rtype: (RawChannelDataChunk, int)
        """
        self._ensure_open()
        if self._segments is None:
            raise RuntimeError(
                "Cannot read data unless metadata has first been read")

        if self._segment_channel_offsets is None:
            with Timer(log, "Build data index"):
                self._build_index()
        segment_offsets = self._segment_channel_offsets[channel_path]

        # Binary search to find the segment to read
        segment_index = np.searchsorted(segment_offsets, index, side='right')
        segment = self._segments[segment_index]
        chunk_size = self._segment_chunk_sizes[channel_path][segment_index]
        segment_start_index = segment_offsets[segment_index -
                                              1] if segment_index > 0 else 0

        index_in_segment = index - segment_start_index
        chunk_index = index_in_segment // chunk_size

        self._verify_segment_start(segment)
        chunk_data = next(
            segment.read_raw_data_for_channel(self._file, channel_path,
                                              chunk_index, 1))
        chunk_offset = segment_start_index + chunk_index * chunk_size
        return chunk_data, chunk_offset
Пример #5
0
    def _read_segments(self, tdms_file):
        with Timer(log, "Read metadata"):
            # Read metadata first to work out how much space we need
            previous_segment = None
            while True:
                try:
                    segment = _TdmsSegment(tdms_file)
                    # Save the metadata
                except EOFError:
                    # We've finished reading the file
                    break
                segment.read_metadata(tdms_file, self.objects,
                                      previous_segment)

                self.segments.append(segment)
                previous_segment = segment
                if segment.next_segment_pos is None:
                    break
                else:
                    tdms_file.seek(segment.next_segment_pos)

        with Timer(log, "Allocate space"):
            # Allocate space for data
            if self.read_data:
                for object in self.objects.values():
                    object._initialise_data(memmap_dir=self.memmap_dir)

        with Timer(log, "Read data"):
            # Now actually read all the data
            if self.read_data:
                for segment in self.segments:
                    segment.read_raw_data(tdms_file)

        props_of_interest = {
            "niRF_acquisition_timestamp", "niRF_gain", "name",
            "niRF_IQ_sample_rate", "niRF_RF_center_frequency"
        }

        for segment in self.segments:
            for obj in segment.get_ordered_objects():
                #print "Prop " , obj.get_properties()
                pr = obj.get_properties()
                for p in props_of_interest:
                    if pr.has_key(p):
                        self.props[p] = str(pr[p])
Пример #6
0
    def read_metadata(self):
        """ Read all metadata and structure information from a TdmsFile
        """
        self._ensure_open()

        if self._index_file_path is not None:
            reading_index_file = True
            file = open(self._index_file_path, 'rb')
        else:
            reading_index_file = False
            file = self._file

        self._segments = []
        segment_position = 0
        try:
            with Timer(log, "Read metadata"):
                # Read metadata first to work out how much space we need
                previous_segment = None
                while True:
                    start_position = file.tell()
                    try:
                        segment = self._read_segment_metadata(
                            file, segment_position, previous_segment,
                            reading_index_file)
                    except EOFError:
                        # We've finished reading the file
                        break

                    self._update_object_metadata(segment)
                    self._update_object_properties(segment)
                    self._segments.append(segment)
                    previous_segment = segment

                    segment_position = segment.next_segment_pos
                    if reading_index_file:
                        lead_size = 7 * 4
                        file.seek(
                            start_position + lead_size +
                            segment.raw_data_offset, os.SEEK_SET)
                    else:
                        file.seek(segment.next_segment_pos, os.SEEK_SET)
        finally:
            if reading_index_file:
                file.close()
Пример #7
0
    def read_raw_data_for_channel(self, channel_path, offset=0, length=None):
        """ Read raw data for a single channel, chunk by chunk

        :param channel_path: The path of the channel object to read data for
        :param offset: Initial position to read data from.
        :param length: Number of values to attempt to read.
            If None, then all values starting from the offset will be read.
            Fewer values will be returned if attempting to read beyond the end of the available data.
        :returns: A generator that yields RawChannelDataChunk objects
        """
        self._ensure_open()
        if self._segments is None:
            raise RuntimeError(
                "Cannot read data unless metadata has first been read")

        if self._segment_channel_offsets is None:
            with Timer(log, "Build data index"):
                self._build_index()
        segment_offsets = self._segment_channel_offsets[channel_path]
        chunk_sizes = self._segment_chunk_sizes[channel_path]

        object_metadata = self.object_metadata[channel_path]
        if length is None:
            length = object_metadata.num_values - offset
        end_index = offset + length

        # Binary search to find first and last segments to read
        start_segment = np.searchsorted(segment_offsets, offset, side='right')
        end_segment = np.searchsorted(segment_offsets, end_index, side='left')

        segment_index = start_segment
        for segment in self._segments[start_segment:end_segment + 1]:
            self._verify_segment_start(segment)
            # By default, read all chunks in a segment
            chunk_offset = 0
            num_chunks = segment.num_chunks
            chunk_size = chunk_sizes[segment_index]
            segment_start_index = 0 if segment_index == 0 else segment_offsets[
                segment_index - 1]
            remaining_values_to_skip = 0
            remaining_values_to_trim = 0

            # For the first and last segments, we may not need to read all chunks,
            # and may need to trim some data from the beginning or end of the chunk.
            if segment_index == start_segment:
                num_values_to_skip = offset - segment_start_index
                chunk_offset = num_values_to_skip // chunk_size
                remaining_values_to_skip = num_values_to_skip % chunk_size
                num_chunks -= chunk_offset
            if segment_index == end_segment:
                # Note: segment_index may be both start and end
                segment_end_index = segment_offsets[segment_index]
                num_values_to_trim = segment_end_index - end_index

                # Account for segments where the final chunk is truncated
                final_chunk_size = (segment_end_index -
                                    segment_start_index) % chunk_size
                final_chunk_size = chunk_size if final_chunk_size == 0 else final_chunk_size
                if num_values_to_trim >= final_chunk_size:
                    num_chunks -= 1
                    num_values_to_trim -= final_chunk_size

                num_chunks -= num_values_to_trim // chunk_size
                remaining_values_to_trim = num_values_to_trim % chunk_size

            for i, chunk in enumerate(
                    segment.read_raw_data_for_channel(self._file, channel_path,
                                                      chunk_offset,
                                                      num_chunks)):
                skip = remaining_values_to_skip if i == 0 else 0
                trim = remaining_values_to_trim if i == (num_chunks - 1) else 0
                yield _trim_channel_chunk(chunk, skip, trim)

            segment_index += 1