Пример #1
0
 def test_load_save_load(self):
     c1 = Calibration().load(CAL1, keys=[])
     self.assertFalse(c1.signed)
     data = c1.save(private_key=PRIVATE_KEY)
     c2 = Calibration().load(data, keys=[PUBLIC_KEY])
     np.testing.assert_allclose(CAL1_VK, c2.voltage_offset)
     np.testing.assert_allclose(CAL1_VG, c2.voltage_gain)
     np.testing.assert_allclose(CAL1_IK, c2.current_offset)
     np.testing.assert_allclose(CAL1_1G, c2.current_gain)
Пример #2
0
    def open(self, filehandle):
        self.close()
        self.calibration = Calibration()  # default calibration
        self.config = None
        self.footer = None
        self._data_start_position = 0

        if isinstance(filehandle, str):
            log.info('DataReader(%s)', filehandle)
            self._fh = open(filehandle, 'rb')
            self._fh_close = True
        else:
            self._fh = filehandle
            self._fh_close = False
        self._f = datafile.DataFileReader(self._fh)
        while True:
            tag, value = self._f.peek()
            if tag is None:
                raise ValueError('could not read file')
            elif tag == datafile.TAG_SUBFILE:
                name, data = datafile.subfile_split(value)
                if name == 'calibration':
                    self.calibration = Calibration().load(data)
            elif tag == datafile.TAG_COLLECTION_START:
                self._data_start_position = self._f.tell()
            elif tag == datafile.TAG_META_JSON:
                meta = json.loads(value.decode('utf-8'))
                type_ = meta.get('type')
                if type_ == 'config':
                    self.config = meta
                elif type_ == 'footer':
                    self.footer = meta
                    break
                else:
                    log.warning('Unknown JSON section type=%s', type_)
            self._f.skip()
        if self._data_start_position == 0 or self.config is None or self.footer is None:
            raise ValueError('could not read file')
        log.info('DataReader with %d samples:\n%s', self.footer['size'],
                 json.dumps(self.config, indent=2))
        if self.config[
                'data_recorder_format_version'] != DATA_RECORDER_FORMAT_VERSION:
            raise ValueError('Invalid file format')
        self.config.setdefault('reduction_fields',
                               ['current', 'voltage', 'power'])
        cal = self.calibration
        self.raw_processor.calibration_set(cal.current_offset,
                                           cal.current_gain,
                                           cal.voltage_offset,
                                           cal.voltage_gain)
        return self
Пример #3
0
    def create_sinusoid_file(self, sample_rate, samples):
        cal = Calibration()
        cal.current_offset[:7] = -3000
        cal.current_gain[:7] = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9]
        cal.voltage_offset[:2] = -3000
        cal.voltage_gain[:2] = [1e-3, 1e-4]
        cal.data = cal.save(bytes([0] * 32))

        fh = io.BytesIO()
        d = DataRecorder(fh, calibration=cal)

        stream_buffer = StreamBuffer(1.0, [100], sample_rate)
        stream_buffer.calibration_set(cal.current_offset, cal.current_gain,
                                      cal.voltage_offset, cal.voltage_gain)
        d.stream_notify(stream_buffer)
        data = self.create_sinusoid_data(sample_rate, samples)

        chunk_size = (sample_rate // 2) * 2
        for i in range(0, 2 * samples, chunk_size):
            stream_buffer.insert_raw(data[i:(i + chunk_size)])
            stream_buffer.process()
            d.stream_notify(stream_buffer)

        d.close()
        fh.seek(0)
        return fh
Пример #4
0
    def create_sinusoid_file(self, file_duration, input_sample_rate, output_sample_rate,
                             stream_buffer_duration=None, chunk_size=None):
        stream_buffer_duration = 1.0 if stream_buffer_duration is None else float(stream_buffer_duration)
        min_duration = 400000 / output_sample_rate
        stream_buffer_duration = max(stream_buffer_duration, min_duration)
        chunk_size = 1024 if chunk_size is None else int(chunk_size)
        cal = Calibration()
        cal.current_offset[:7] = -3000
        cal.current_gain[:7] = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9]
        cal.voltage_offset[:2] = -3000
        cal.voltage_gain[:2] = [1e-3, 1e-4]
        cal.data = cal.save(bytes([0] * 32))

        fh = io.BytesIO()
        d = DataRecorder(fh, calibration=cal)

        buffer = DownsamplingStreamBuffer(stream_buffer_duration, [100], input_sample_rate, output_sample_rate)
        buffer.calibration_set(cal.current_offset, cal.current_gain, cal.voltage_offset, cal.voltage_gain)
        d.stream_notify(buffer)
        input_samples = int(file_duration * input_sample_rate)
        data = self.create_sinusoid_data(input_sample_rate, input_samples)

        i = 0
        while i < input_samples:
            i_next = min(i + chunk_size, input_samples)
            buffer.insert_raw(data[i:i_next])
            buffer.process()
            d.stream_notify(buffer)
            i = i_next

        d.close()
        fh.seek(0)
        return fh
Пример #5
0
 def test_cal1(self):
     c = Calibration().load(CAL1, keys=[PUBLIC_KEY])
     self.assertTrue(c.signed)
     np.testing.assert_allclose(CAL1_VK, c.voltage_offset)
     np.testing.assert_allclose(CAL1_VG, c.voltage_gain)
     np.testing.assert_allclose(CAL1_IK, c.current_offset)
     np.testing.assert_allclose(CAL1_1G, c.current_gain)
Пример #6
0
 def _calibration_read(self) -> Calibration:
     cal = Calibration()
     serial_number = self.serial_number
     cal.serial_number = serial_number
     try:
         cal_data = self._calibration_read_raw()
         if cal_data is None:
             log.info('no calibration present')
         else:
             cal.load(cal_data)
     except (ValueError, IOError):
         log.info('failed reading calibration')
     if cal.serial_number != serial_number:
         log.info('calibration serial number mismatch')
         return None
     self.calibration = cal
     self.stream_buffer.calibration_set(cal.current_offset, cal.current_gain, cal.voltage_offset, cal.voltage_gain)
     return cal
Пример #7
0
 def test_save_load_unsigned(self):
     c1 = Calibration()
     c1.current_offset = CAL1_IK[:7]
     c1.current_gain = CAL1_1G[:7]
     c1.voltage_offset = CAL1_VK
     c1.voltage_gain = CAL1_VG
     data = c1.save()
     c2 = Calibration().load(data, keys=[])
     self.assertFalse(c2.signed)
     np.testing.assert_allclose(CAL1_VK, c2.voltage_offset)
     np.testing.assert_allclose(CAL1_VG, c2.voltage_gain)
     np.testing.assert_allclose(CAL1_IK, c2.current_offset)
     np.testing.assert_allclose(CAL1_1G, c2.current_gain)
Пример #8
0
 def test_save_load_signed(self):
     c1 = Calibration()
     c1.current_offset = CAL1_IK[:7]
     c1.current_gain = CAL1_1G[:7]
     c1.voltage_offset = CAL1_VK
     c1.voltage_gain = CAL1_VG
     data = c1.save(PRIVATE_KEY)
     # with open(os.path.join(MYPATH, 'calibration_01.dat'), 'wb') as f:
     #     f.write(data)
     c2 = Calibration().load(data, keys=[PUBLIC_KEY])
     self.assertTrue(c2.signed)
     np.testing.assert_allclose(CAL1_VK, c2.voltage_offset)
     np.testing.assert_allclose(CAL1_VG, c2.voltage_gain)
     np.testing.assert_allclose(CAL1_IK, c2.current_offset)
     np.testing.assert_allclose(CAL1_1G, c2.current_gain)
Пример #9
0
class DataReader:

    def __init__(self):
        self.calibration = None
        self.config = None
        self.footer = None
        self._fh_close = False
        self._fh = None
        self._f = None  # type: datafile.DataFileReader
        self._data_start_position = 0

    def __str__(self):
        if self._f is not None:
            return 'DataReader %.2f seconds (%d samples)' % (self.duration, self.footer['size'])

    def close(self):
        if self._fh_close:
            self._fh.close()
        self._fh_close = False
        self._fh = None
        self._f = None

    def open(self, filehandle):
        self.close()
        self.calibration = Calibration()  # default calibration
        self.config = None
        self.footer = None
        self._data_start_position = 0

        if isinstance(filehandle, str):
            log.info('DataReader(%s)', filehandle)
            self._fh = open(filehandle, 'rb')
            self._fh_close = True
        else:
            self._fh = filehandle
            self._fh_close = False
        self._f = datafile.DataFileReader(self._fh)
        while True:
            tag, value = self._f.peek()
            if tag is None:
                raise ValueError('could not read file')
            elif tag == datafile.TAG_SUBFILE:
                name, data = datafile.subfile_split(value)
                if name == 'calibration':
                    self.calibration = Calibration().load(data)
            elif tag == datafile.TAG_COLLECTION_START:
                self._data_start_position = self._f.tell()
            elif tag == datafile.TAG_META_JSON:
                meta = json.loads(value.decode('utf-8'))
                type_ = meta.get('type')
                if type_ == 'config':
                    self.config = meta
                elif type_ == 'footer':
                    self.footer = meta
                    break
                else:
                    log.warning('Unknown JSON section type=%s', type_)
            self._f.skip()
        if self._data_start_position == 0 or self.config is None or self.footer is None:
            raise ValueError('could not read file')
        log.info('DataReader with %d samples:\n%s', self.footer['size'], json.dumps(self.config, indent=2))
        if self.config['data_recorder_format_version'] != DATA_RECORDER_FORMAT_VERSION:
            raise ValueError('Invalid file format')
        return self

    @property
    def sample_id_range(self):
        if self._f is not None:
            s_start = 0
            s_end = int(s_start + self.footer['size'])
            return [s_start, s_end]
        return 0

    @property
    def sampling_frequency(self):
        if self._f is not None:
            return float(self.config['sampling_frequency'])
        return 0.0

    @property
    def reduction_frequency(self):
        if self._f is not None:
            return self.config['sampling_frequency'] / self.config['samples_per_reduction']
        return 0.0

    @property
    def duration(self):
        f = self.sampling_frequency
        if f > 0:
            r = self.sample_id_range
            return (r[1] - r[0]) / f
        return 0.0

    def _validate_range(self, start, stop, increment=None):
        idx_start = 0
        idx_end = idx_start + self.footer['size']
        if increment is not None:
            idx_end = ((idx_end + increment - 1) // increment) * increment
        log.debug('[%d, %d] : [%d, %d]', start, stop, idx_start, idx_end)
        if not idx_start <= start < idx_end:
            raise ValueError('start out of range: %d <= %d < %d' % (idx_start, start, idx_end))
        if not idx_start <= stop <= idx_end:
            raise ValueError('stop out of range: %d <= %d <= %d: %s' %
                             (idx_start, stop, idx_end, increment))

    def raw(self, start=None, stop=None, calibrated=None, out=None):
        """Get the raw data.

        :param start: The starting sample identifier.
        :param stop: The ending sample identifier.
        :param calibrated: When true, return calibrated np.float32 data.
            When false, return raw np.uint16 data.
        :param out: The optional output Nx2 output array.
            N must be >= (stop - start).
        :return: The output which is either a new array or (when provided) out.
        """
        r_start, r_stop = self.sample_id_range
        if start is None:
            start = r_start
        if stop is None:
            stop = r_stop        
        self._fh.seek(self._data_start_position)
        self._validate_range(start, stop)
        length = stop - start
        if length <= 0:
            return np.empty((0, 2), dtype=np.uint16)
        if out is None:
            if calibrated:
                out = np.empty((length, 2), dtype=np.float32)
            else:
                out = np.empty((length, 2), dtype=np.uint16)

        sample_idx = 0
        samples_per_tlv = self.config['samples_per_tlv']
        samples_per_block = self.config['samples_per_block']
        block_start = start // samples_per_block
        block_counter = 0
        out_idx = 0
        if self._f.advance() != datafile.TAG_COLLECTION_START:
            raise ValueError('data section must be single collection')
        while True:
            tag, _ = self._f.peek_tag_length()
            if tag is None:
                break
            if tag == datafile.TAG_COLLECTION_START:
                if block_counter < block_start:
                    self._f.skip()
                    block_counter += 1
                else:
                    tag, collection_bytes = next(self._f)
                    c = datafile.Collection.decode(collection_bytes)
                    if c.data is None:
                        v_range = 0
                    else:
                        collection_start_meta = json.loads(c.data)
                        v_range = collection_start_meta.get('v_range', 0)
                    sample_idx = block_counter * samples_per_block
            elif tag == datafile.TAG_COLLECTION_END:
                block_counter += 1
                self._f.advance()
            elif tag == datafile.TAG_DATA_BINARY:
                tlv_stop = sample_idx + samples_per_tlv
                if start < tlv_stop:
                    tag, value = next(self._f)
                    data = np.frombuffer(value, dtype=np.uint16).reshape((-1, 2))
                    idx_start = 0
                    idx_stop = samples_per_tlv
                    if start > sample_idx:
                        idx_start = start - sample_idx
                    if stop < tlv_stop:
                        idx_stop = stop - sample_idx
                    length = idx_stop - idx_start
                    if calibrated:
                        v, i, _ = self.calibration.transform(data[idx_start:idx_stop, :],
                                                             v_range=v_range)
                        out[out_idx:(out_idx + length), 0] = v
                        out[out_idx:(out_idx + length), 1] = i
                    else:
                        out[out_idx:(out_idx + length), :] = data[idx_start:idx_stop, :]
                    out_idx += length
                else:
                    self._f.advance()
                sample_idx = tlv_stop
                if sample_idx > stop:
                    break
            else:
                self._f.advance()
        return out[:out_idx, :]

    def get_reduction(self, start=None, stop=None, out=None):
        """Get the fixed reduction with statistics.

        :param start: The starting sample identifier (inclusive).
        :param stop: The ending sample identifier (exclusive).
        :return: The Nx3x4 sample data.
        """
        if start is None:
            start = self.sample_id_range[0]
        if stop is None:
            stop = self.sample_id_range[1]

        sz = self.config['samples_per_reduction']
        incr = self.config['samples_per_block'] // sz
        self._fh.seek(self._data_start_position)
        self._validate_range(start, stop)
        r_start = start // sz
        length = (stop - start) // sz
        r_stop = r_start + length
        log.info('DataReader.get_reduction(r_start=%r,r_stop=%r)', r_start, r_stop)
        if length <= 0:
            return np.empty((0, 3, 4), dtype=np.float32)
        if out is None:
            out = np.empty((length, 3, 4), dtype=np.float32)
        elif len(out) < length:
            raise ValueError('out too small')

        out_idx = 0
        r_idx = 0

        if self._f.advance() != datafile.TAG_COLLECTION_START:
            raise ValueError('data section must be single collection')
        while True:
            tag, _ = self._f.peek_tag_length()
            if tag is None or tag == datafile.TAG_COLLECTION_END:
                break
            elif tag != datafile.TAG_COLLECTION_START:
                raise ValueError('invalid file format: not collection start')
            r_idx_next = r_idx + incr
            if r_start >= r_idx_next:
                self._f.skip()
                r_idx = r_idx_next
                continue
            self._f.collection_goto_end()
            tag, value = next(self._f)
            if tag != datafile.TAG_COLLECTION_END:
                raise ValueError('invalid file format: not collection end')
            data = np.frombuffer(value, dtype=np.float32).reshape((-1, 3, 4))
            r_idx_start = 0
            r_idx_stop = incr
            if r_idx < r_start:
                r_idx_start = r_start - r_idx
            if r_idx_next > r_stop:
                r_idx_stop = r_stop - r_idx
            if r_idx_stop > len(data):
                r_idx_stop = len(data)
            copy_len = r_idx_stop - r_idx_start
            out[out_idx:(out_idx + copy_len), :, :] = data[r_idx_start:r_idx_stop, :, :]
            out_idx += copy_len
            r_idx = r_idx_next
            if r_idx_next >= r_stop:
                break
        if out_idx != length:
            log.warning('DataReader length mismatch: out_idx=%s, length=%s', out_idx, length)
            length = min(out_idx, length)
        return out[:length, :]

    def _get_reduction_stats(self, start, stop):
        """Get statistics over the reduction

        :param start: The starting sample identifier (inclusive).
        :param stop: The ending sample identifier (exclusive).
        :return: The tuple of ((sample_start, sample_stop), :class:`Statistics`).
        """
        log.debug('_get_reduction_stats(%s, %s)', start, stop)
        s = Statistics()
        sz = self.config['samples_per_reduction']
        incr = self.config['samples_per_block'] // sz
        r_start = start // sz
        if (r_start * sz) < start:
            r_start += 1
        r_stop = stop // sz
        if r_start >= r_stop:  # use the reductions
            s_start = r_start * sz
            return (s_start, s_start), s
        r_idx = 0

        self._fh.seek(self._data_start_position)
        if self._f.advance() != datafile.TAG_COLLECTION_START:
            raise ValueError('data section must be single collection')
        while True:
            tag, _ = self._f.peek_tag_length()
            if tag is None or tag == datafile.TAG_COLLECTION_END:
                break
            elif tag != datafile.TAG_COLLECTION_START:
                raise ValueError('invalid file format: not collection start')
            r_idx_next = r_idx + incr
            if r_start >= r_idx_next:
                self._f.skip()
                r_idx = r_idx_next
                continue
            self._f.collection_goto_end()
            tag, value = next(self._f)
            if tag != datafile.TAG_COLLECTION_END:
                raise ValueError('invalid file format: not collection end')
            data = np.frombuffer(value, dtype=np.float32).reshape((-1, 3, 4))
            r_idx_start = 0
            r_idx_stop = incr
            if r_idx < r_start:
                r_idx_start = r_start - r_idx
            if r_idx_next > r_stop:
                r_idx_stop = r_stop - r_idx
            if r_idx_stop > len(data):
                r_idx_stop = len(data)
            length = r_idx_stop - r_idx_start
            r = reduction_downsample(data, r_idx_start, r_idx_stop, length)
            s.combine(Statistics(length=length * sz, stats=r[0, :, :]))
            r_idx = r_idx_next
            if r_idx_next >= r_stop:
                break
        return (r_start * sz, r_stop * sz), s

    def get_calibrated(self, start=None, stop=None):
        """Get the calibrated data (no statistics).

        :param start: The starting sample identifier (inclusive).
        :param stop: The ending sample identifier (exclusive).
        :return: The tuple of (current, voltage), each as np.ndarray
            with dtype=np.float32.
        """
        if start is None:
            start = self.sample_id_range[0]
        if stop is None:
            stop = self.sample_id_range[1]
        d = self.raw(start, stop, calibrated=True)
        i, v = d[:, 0], d[:, 1]
        return i, v

    def get(self, start=None, stop=None, increment=None):
        """Get the data with statistics.

        :param start: The starting sample identifier (inclusive).
        :param stop: The ending sample identifier (exclusive).
        :param increment: The number of raw samples per output sample.
        :return: The Nx3x4 sample data.
        """
        r_start, r_stop = self.sample_id_range
        if start is None:
            start = r_start
        if stop is None:
            stop = r_stop
        if increment is None:
            increment = 1
        log.debug('DataReader.get(start=%r,stop=%r,increment=%r)', start, stop, increment)
        if self._fh is None:
            raise IOError('file not open')
        increment = max(1, int(np.round(increment)))
        start = max(start, r_start)
        stop = min(stop, r_stop)
        out_len = (stop - start) // increment
        if out_len <= 0:
            return np.empty((0, 3, 4), dtype=np.float32)
        out = np.empty((out_len, 3, 4), dtype=np.float32)

        if increment == 1:
            d = self.raw(start, stop, calibrated=True)
            i, v = d[:, 0], d[:, 1]
            out[:, 0, 0] = i
            out[:, 1, 0] = v
            out[:, 2, 0] = i * v
            out[:, :, 1] = 0.0  # zero variance, only one sample!
            out[:, :, 2] = np.nan  # min
            out[:, :, 3] = np.nan  # max
        elif increment == self.config['samples_per_reduction']:
            out = self.get_reduction(start, stop, out=out)
        elif increment > self.config['samples_per_reduction']:
            r_out = self.get_reduction(start, stop)
            increment = int(increment / self.config['samples_per_reduction'])
            out = reduction_downsample(r_out, 0, len(r_out), increment)
        else:
            z = self.raw(start, stop, calibrated=True)
            i, v = z[:, 0], z[:, 1]
            p = i * v
            for idx in range(out_len):
                idx_start = idx * increment
                idx_stop = (idx + 1) * increment
                i_view = i[idx_start:idx_stop]
                zi = np.isfinite(i_view)
                i_view = i_view[zi]
                if len(i_view):
                    v_view = v[idx_start:idx_stop][zi]
                    p_view = p[idx_start:idx_stop][zi]
                    out[idx, 0, :] = np.vstack((np.mean(i_view, axis=0), np.var(i_view, axis=0),
                                                np.amin(i_view, axis=0), np.amax(i_view, axis=0))).T
                    out[idx, 1, :] = np.vstack((np.mean(v_view, axis=0), np.var(v_view, axis=0),
                                                np.amin(v_view, axis=0), np.amax(v_view, axis=0))).T
                    out[idx, 2, :] = np.vstack((np.mean(p_view, axis=0), np.var(p_view, axis=0),
                                                np.amin(p_view, axis=0), np.amax(p_view, axis=0))).T
                else:
                    out[idx, :, :] = np.full((1, 3, 4), np.nan, dtype=np.float32)
        return out

    def summary_string(self):
        s = [str(self)]
        config_fields = ['sampling_frequency', 'samples_per_reduction', 'samples_per_tlv', 'samples_per_block']
        for field in config_fields:
            s.append('    %s = %r' % (field, self.config[field]))
        return '\n'.join(s)

    def time_to_sample_id(self, t):
        s_min, s_max = self.sample_id_range
        s = int(t * self.sampling_frequency)
        if s < s_min or s > s_max:
            return None
        return s

    def statistics_get(self, t1, t2):
        """Get the statistics for the collected sample data over a time range.

        :param t1: The starting time in seconds relative to the streaming start time.
        :param t2: The ending time in seconds.
        :return: The statistics data structure.  See :meth:`joulescope.driver.Driver.statistics_get`
            for details.
        """
        log.debug('statistics_get(%s, %s)', t1, t2)
        s1 = self.time_to_sample_id(t1)
        s2 = self.time_to_sample_id(t2)
        if s1 is None or s2 is None:
            return None

        (k1, k2), s = self._get_reduction_stats(s1, s2)
        if s1 < k1:
            length = k1 - s1
            s_start = self.get(s1, k1, increment=length)
            s.combine(Statistics(length=length, stats=s_start[0, :, :]))
        if s2 > k2:
            length = s2 - k2
            s_stop = self.get(k2, s2, increment=length)
            s.combine(Statistics(length=length, stats=s_stop[0, :, :]))

        t_start = s1 / self.sampling_frequency
        t_stop = s2 / self.sampling_frequency
        return stats_to_api(s.value, t_start, t_stop)