Ejemplo n.º 1
0
    def _get_pytable_desc(self, col_list, col_names):
        d = {}
        for c, cn in zip(col_list, col_names):
            col_type = tables.Col.from_dtype(c.dtype)
            d[cn] = col_type

        return tables.Description(d)
Ejemplo n.º 2
0
def __descr_from_dtype__(dtype_, shape_={}):
    """
    Get a description instance and byteorder from a (nested) NumPy dtype.

    Parameters
    ----------
    dtype_:  dtype
        dtype of a ndarray

    shape_:  dict
        a dictionary of column's shapes

    returns
    -------
    r: tables.Description
        Description of a new table
    """
    fields = {}
    fbyteorder = "|"
    for (name, (dtype, pos)) in list(dtype_.fields.items()):
        kind = dtype.base.kind
        shape = shape_.get(name, ())
        byteorder = dtype.base.byteorder
        if byteorder in "><=":
            if fbyteorder not in ["|", byteorder]:
                raise NotImplementedError(
                    "record arrays with mixed byteorders "
                    "are not supported yet, sorry")
            fbyteorder = byteorder
        # Non-nested column
        if kind in "biufSc":
            # col = tables.Col.from_dtype(dtype, pos=pos)
            if len(shape) > 1:
                col = _typeDict[dtype.base.type](shape=shape)
            else:
                col = _typeDict[dtype.type]()
        # Nested column
        elif kind == "V" and dtype.shape in [(), (1, )]:
            col, _ = __descr_from_dtype__(dtype)
            col._v_pos = pos
        else:
            raise NotImplementedError(
                "record arrays with columns with type description ``%s`` "
                "are not supported yet, sorry" % dtype)
        fields[name] = col

    return tables.Description(fields)
Ejemplo n.º 3
0
    def descr_from_dtype(self, dtype_, shape_={}):
        """
        Get a description instance and byteorder from a (nested) NumPy dtype.

        INPUTS:

            dtype_  dtype   dtype of a ndarray
            shape_  dict    a dictionary of column's shapes

        OUTPUTS:
            Description of a new table
        """
        fields = {}
        fbyteorder = '|'
        for (name, (dtype, pos)) in dtype_.fields.items():
            kind = dtype.base.kind
            shape = shape_.get(name, ())
            byteorder = dtype.base.byteorder
            if byteorder in '><=':
                if fbyteorder not in ['|', byteorder]:
                    raise NotImplementedError(
                        "record arrays with mixed byteorders "
                        "are not supported yet, sorry")
                fbyteorder = byteorder
            # Non-nested column
            if kind in 'biufSc':
                #col = tables.Col.from_dtype(dtype, pos=pos)
                if len(shape) > 1:
                    col = typeDict[dtype.type](shape=shape)
                else:
                    col = typeDict[dtype.type]()
            # Nested column
            elif kind == 'V' and dtype.shape in [(), (1, )]:
                col, _ = self.descr_from_dtype(dtype)
                col._v_pos = pos
            else:
                raise NotImplementedError(
                    "record arrays with columns with type description ``%s`` "
                    "are not supported yet, sorry" % dtype)
            fields[name] = col

        return tables.Description(fields)
Ejemplo n.º 4
0
def dtype_to_pytables(dtype):
    """ Convert NumPy dtype to PyTable descriptor
    Examples
    --------
    >>> from tables import Int32Col, StringCol, Time64Col
    >>> dt = np.dtype([('name', 'S7'), ('amount', 'i4'), ('time', 'M8[us]')])
    >>> dtype_to_pytables(dt)  # doctest: +SKIP
    {'amount': Int32Col(shape=(), dflt=0, pos=1),
     'name': StringCol(itemsize=7, shape=(), dflt='', pos=0),
     'time': Time64Col(shape=(), dflt=0.0, pos=2)}
    """
    d = {}
    for pos, name in enumerate(dtype.names):
        dt, _ = dtype.fields[name]
        if issubclass(dt.type, np.datetime64):
            tdtype = tb.Description({name: tb.Time64Col(pos=pos)}),
        else:
            tdtype = tb.descr_from_dtype(np.dtype([(name, dt)]))
        el = tdtype[0]  # removed dependency on toolz -DJC
        getattr(el, name)._v_pos = pos
        d.update(el._v_colobjects)
    return d
Ejemplo n.º 5
0
def dtype_to_table(dtype):
    """ Convert a NumPy dtype to a PyTables Table description

    Essentially just :ref:`tables.descr_from_dtype` but it works on
    :ref:`np.datetime64`

    Args:
        dtype (np.dtype): NumPy data type

    Returns:
        dict: PyTables description
    """
    desc = {}

    for idx, name in enumerate(dtype.names):
        _dt, _ = dtype.fields[name]
        if issubclass(_dt.type, np.datetime64):
            tb_dtype = tb.Description({name: tb.Time64Col(pos=idx)})
        else:
            tb_dtype, byteorder = tb.descr_from_dtype(np.dtype([(name, _dt)]))
        _tb_dtype = tb_dtype._v_colobjects
        _tb_dtype[name]._v_pos = idx
        desc.update(_tb_dtype)
    return desc
Ejemplo n.º 6
0
def createFile(filename, totalrows, recsize, verbose):

    # Open a 'n'ew file
    dd = db.DB()
    if recsize == "big":
        isrec = tb.Description(Big)
    elif recsize == "medium":
        isrec = Medium()
    else:
        isrec = tb.Description(Small)
    # dd.set_re_len(struct.calcsize(isrec._v_fmt))  # fixed length records
    dd.open(filename, db.DB_RECNO, db.DB_CREATE | db.DB_TRUNCATE)

    rowswritten = 0
    # Get the record object associated with the new table
    if recsize == "big":
        isrec = Big()
        arr = np.array(np.arange(32), type=np.float64)
        arr2 = np.array(np.arange(32), type=np.float64)
    elif recsize == "medium":
        isrec = Medium()
        arr = np.array(np.arange(2), type=np.float64)
    else:
        isrec = Small()
    # print d
    # Fill the table
    if recsize == "big" or recsize == "medium":
        d = {
            "name": " ",
            "float1": 1.0,
            "float2": 2.0,
            "ADCcount": 12,
            "grid_i": 1,
            "grid_j": 1,
            "pressure": 1.9,
            "energy": 1.8,
        }
        for i in range(totalrows):
            #d['name']  = 'Particle: %6d' % (i)
            #d['TDCcount'] = i % 256
            d['ADCcount'] = (i * 256) % (1 << 16)
            if recsize == "big":
                #d.float1 = np.array([i]*32, np.float64)
                #d.float2 = np.array([i**2]*32, np.float64)
                arr[0] = 1.1
                d['float1'] = arr
                arr2[0] = 2.2
                d['float2'] = arr2
                pass
            else:
                d['float1'] = float(i)
                d['float2'] = float(i)
            d['grid_i'] = i
            d['grid_j'] = 10 - i
            d['pressure'] = float(i * i)
            d['energy'] = d['pressure']
            dd.append(cPickle.dumps(d))


#             dd.append(struct.pack(isrec._v_fmt,
#                                   d['name'], d['float1'], d['float2'],
#                                   d['ADCcount'],
#                                   d['grid_i'], d['grid_j'],
#                                   d['pressure'],  d['energy']))
    else:
        d = {"var1": " ", "var2": 1, "var3": 12.1e10}
        for i in range(totalrows):
            d['var1'] = str(i)
            d['var2'] = i
            d['var3'] = 12.1e10
            dd.append(cPickle.dumps(d))
            #dd.append(
            #    struct.pack(isrec._v_fmt, d['var1'], d['var2'], d['var3']))

    rowswritten += totalrows

    # Close the file
    dd.close()
    return (rowswritten, struct.calcsize(isrec._v_fmt))
Ejemplo n.º 7
0
    def __init__(self,
                 h5file,
                 reconstructor,
                 cam_id2camns=None,
                 min_observations_to_save=0,
                 textlog_save_lines=None,
                 dynamic_model_name=None,
                 dynamic_model=None,
                 fake_timestamp=None,
                 debug=False):
        self.cam_id2camns = cam_id2camns
        self.min_observations_to_save = min_observations_to_save
        self.debug = debug

        self.kalman_saver_info_instance = flydra_kalman_utils.KalmanSaveInfo(
            name=dynamic_model_name)
        kalman_estimates_description = (
            self.kalman_saver_info_instance.get_description())

        filters = tables.Filters(1, complib='zlib')  # compress

        self.h5file = h5file
        reconstructor.save_to_h5file(self.h5file)
        self.h5_xhat = self.h5file.create_table(
            self.h5file.root,
            'kalman_estimates',
            kalman_estimates_description,
            "Kalman a posteriori estimates of tracked object",
            filters=filters)
        self.h5_xhat.attrs.dynamic_model_name = dynamic_model_name
        self.h5_xhat.attrs.dynamic_model = dynamic_model

        self.h5_obs = self.h5file.create_table(
            self.h5file.root,
            'ML_estimates',
            FilteredObservations,
            "observations of tracked object",
            filters=filters)

        self.h5_2d_obs_next_idx = 0

        # Note that ML_estimates_2d_idxs_type() should
        # match dtype with tro.observations_2d.

        self.h5_2d_obs = self.h5file.create_vlarray(
            self.h5file.root, 'ML_estimates_2d_idxs',
            ML_estimates_2d_idxs_type(), "camns and idxs")

        self.obj_id = 0

        self.h5textlog = self.h5file.create_table(self.h5file.root, 'textlog',
                                                  TextLogDescription,
                                                  'text log')

        if 1:
            textlog_row = self.h5textlog.row
            cam_id = 'mainbrain'
            if fake_timestamp is None:
                timestamp = time.time()
            else:
                timestamp = fake_timestamp

            list_of_textlog_data = [(timestamp, cam_id, timestamp, text)
                                    for text in textlog_save_lines]
            for textlog_data in list_of_textlog_data:
                (mainbrain_timestamp, cam_id, host_timestamp,
                 message) = textlog_data
                textlog_row['mainbrain_timestamp'] = mainbrain_timestamp
                textlog_row['cam_id'] = cam_id
                textlog_row['host_timestamp'] = host_timestamp
                textlog_row['message'] = message
                textlog_row.append()

            self.h5textlog.flush()

        self.h5_xhat_names = PT.Description(
            kalman_estimates_description().columns)._v_names
        self.h5_obs_names = PT.Description(
            FilteredObservations().columns)._v_names
        self.all_kalman_calibration_data = []
Ejemplo n.º 8
0
from distutils.version import LooseVersion
import tables as PT
import tables
import flydra_core.data_descriptions
from flydra_core.reconstruct import Reconstructor
import time
import flydra_analysis.version

Info2D = flydra_core.data_descriptions.Info2D
Info2DCol_description = tables.Description(Info2D().columns)._v_nested_descr
CamSyncInfo = flydra_core.data_descriptions.CamSyncInfo
TextLogDescription = flydra_core.data_descriptions.TextLogDescription


def startup_message(h5textlog, fps):
    textlog_row = h5textlog.row
    cam_id = 'mainbrain'
    timestamp = time.time()

    # This line is important (including the formatting). It is
    # read by flydra_analysis.a2.check_atmel_clock.

    list_of_textlog_data = [
        (timestamp, cam_id, timestamp,
         ('MainBrain running at %s fps, '
          '(flydra_version %s, '
          'time_tzname0 %s)' % (
              str(fps),
              flydra_analysis.version.__version__,
              time.tzname[0],
          ))),
Ejemplo n.º 9
0
import sys, os
import tables
import numpy as np
# Use simplejson or Python 2.6 json, prefer simplejson.
try:
    import simplejson as json
except ImportError:
    import json

import data_format

AnalogInputWordstreamDescription = data_format.AnalogInputWordstreamDescription
AnalogInputWordstream_dtype = tables.Description(
    AnalogInputWordstreamDescription().columns)._v_nestedDescr

TimeDataDescription = data_format.TimeDataDescription
TimeData_dtype = tables.Description(
    TimeDataDescription().columns)._v_nestedDescr


def doit(filename):
    base, ext = os.path.splitext(filename)
    output_fname = base + '.h5'
    print 'converting %s to %s' % (filename, output_fname)
    contents = open(filename).read()
    input = json.loads(contents)

    h5 = tables.openFile(output_fname, mode='w')
    stream_ain_table = h5.createTable(h5.root, 'ain_wordstream',
                                      AnalogInputWordstreamDescription,
                                      "AIN data")
Ejemplo n.º 10
0
def convert_to_flydrah5(bag_file,
                        topic_name="pointcloud",
                        out_h5=None,
                        reconstructor=None):
    import rosbag

    if out_h5 is None:
        out_h5 = bag_file + ".h5"

    bag = rosbag.Bag(bag_file, "r")

    h5file = tables.open_file(out_h5,
                              mode="w",
                              title="Flydra data file (from ROS bag)")
    ct = h5file.create_table  # shorthand
    root = h5file.root  # shorthand

    # save data as both "observations" (ML estimates) and "kalman estimates" (MAP estimates)

    FilteredObservations = flydra_kalman_utils.FilteredObservations
    h5data3d_ML_estimates = ct(root, "ML_estimates", FilteredObservations,
                               "3d data (input to Kalman filter)")
    h5_obs_names = tables.Description(FilteredObservations().columns)._v_names

    # we're not actually doing any kalman filtering, so just get a model with position
    kalman_saver_info_instance = flydra_kalman_utils.KalmanSaveInfo(
        name="mamarama, units: mm")
    KalmanEstimatesDescription = kalman_saver_info_instance.get_description()
    h5data3d_kalman_estimates = ct(root, "kalman_estimates",
                                   KalmanEstimatesDescription, "3d data")
    h5_xhat_names = tables.Description(
        KalmanEstimatesDescription().columns)._v_names

    obj_id = 0
    for topic, cloud, t in bag.read_messages(topics=[topic_name]):

        pts = []
        for p in read_points(cloud):
            pts.append((p[0], p[1], p[2], 0, 0, 0))  # velocity = 0
        obj_id += 1
        pts = np.array(pts)

        # save observations
        shape1d = (len(pts), )
        shape2d_6 = (len(pts), 6)
        this_idxs = np.zeros(shape1d, dtype=np.uint64)
        # this_idxs = numpy.array( this_idxs, dtype=numpy.uint64 ) # becomes obs_2d_idx (index into 'ML_estimates_2d_idxs')

        observations_frames = np.arange(len(pts), dtype=np.uint64)
        obj_id_array = np.empty(observations_frames.shape, dtype=np.uint32)
        obj_id_array.fill(obj_id)
        observations_data = np.array(pts[:, :3], dtype=np.float32)
        observations_Lcoords = np.zeros(shape2d_6, dtype=np.float32)
        list_of_obs = [
            observations_data[:, i] for i in range(observations_data.shape[1])
        ]
        list_of_lines = [
            observations_Lcoords[:, i]
            for i in range(observations_Lcoords.shape[1])
        ]
        array_list = ([obj_id_array, observations_frames] + list_of_obs +
                      [this_idxs] + list_of_lines)
        obs_recarray = np.rec.fromarrays(array_list, names=h5_obs_names)

        h5data3d_ML_estimates.append(obs_recarray)
        h5data3d_ML_estimates.flush()

        # save xhat info (kalman estimates)
        shape3d_6x6 = (len(pts), 6, 6)
        frames = np.arange(len(pts), dtype=np.uint64)
        timestamps = np.zeros(shape1d, dtype=np.float64)
        xhat_data = np.array(pts, dtype=np.float32)
        P_data_full = np.zeros(shape3d_6x6, dtype=np.float32)
        obj_id_array = np.empty(frames.shape, dtype=np.uint32)
        obj_id_array.fill(obj_id)
        list_of_xhats = [xhat_data[:, i] for i in range(xhat_data.shape[1])]
        ksii = kalman_saver_info_instance
        list_of_Ps = ksii.covar_mats_to_covar_entries(P_data_full)
        xhats_recarray = np.rec.fromarrays(
            [obj_id_array, frames, timestamps] + list_of_xhats + list_of_Ps,
            names=h5_xhat_names,
        )

        h5data3d_kalman_estimates.append(xhats_recarray)
        h5data3d_kalman_estimates.flush()

    if reconstructor is not None:
        R = flydra_core.reconstruct.Reconstructor(reconstructor)
        R.save_to_h5file(h5file)
    h5file.close()
    bag.close()