コード例 #1
0
ファイル: daq.py プロジェクト: rogerlew/undaqTools
    def write_hd5(self, filename=None):
        """
        write_hd5(filename=None)
        
        writes Daq object to HDF5 container

        Parameters
        ----------
        filename : None or string (optional)
            None : file written to daq.filename.replace('.daq', '.hdf5')
            string : specify output file (will overwrite)

        Return
        ------
        None
        """
        info = self.info
        frame = self.frame
        _header = self._rebuild_header()
        str_type = h5py.new_vlen(str)
        
        if filename is None:
            filename = info.filename[:-4] + '.hdf5'

        if filename.endswith('.daq'):
            # yes, I did this accidently
            msg = 'Writing Daq in HDF5 with a .daq extension is not allowed'
            raise ValueError(msg)
        
        root = h5py.File(filename, 'w')

        # info
        root.create_group('info')
        root['info'].attrs['run'] = info.run
        root['info'].attrs['runinst'] = info.runinst
        root['info'].attrs['title'] = info.title
        root['info'].attrs['numentries'] = info.numentries
        root['info'].attrs['frequency'] = info.frequency
        root['info'].attrs['date'] = info.date
        root['info'].attrs['magic'] = info.magic
        root['info'].attrs['subject'] = info.subject
        root['info'].attrs['filename'] = info.filename

        # frame
        root.create_group('frame')
        root['frame'].create_dataset('frame', data=frame.frame)
        root['frame'].create_dataset('count', data=frame.count)
        root['frame'].create_dataset('code', data=frame.code)

        # header
        root.create_group('header')        
        root['header'].create_dataset('id', data=_header.id)
        root['header'].create_dataset('numvalues', data=_header.numvalues)
        root['header'].create_dataset('name', data=_header.name)
        root['header'].create_dataset('units', data=_header.units)
        root['header'].create_dataset('rate', data=_header.rate)
        root['header'].create_dataset('type', data=_header.type)
        root['header'].create_dataset('varrateflag', data=_header.varrateflag)
        root['header'].create_dataset('bytes', data=_header.bytes)

        nptype_asstr = np.array(_header.nptype, dtype=str_type)
        root['header'].create_dataset('nptype', data=nptype_asstr)

        # data
        root.create_group('data')
        for name, elem in self.items():
            root['data'].create_dataset(name, data=elem.toarray())

            if elem.isCSSDC():
                root['data'].create_dataset(name+'_Frames',
                                            data=elem.frames)
        # misc
        root.create_dataset('elemlist',
                            data=np.array(self.elemlist, dtype=str_type))
        root.attrs['f0'] = (self.f0, '')[self.f0 is None]
        root.attrs['fend'] = (self.fend, '')[self.fend is None]
        root.attrs['cursor'] = self.cursor

        # dynobjects
        root.create_group('dynobjs')
        for (name, do) in self.dynobjs.items():
            root.create_group('dynobjs/%s'%name)
            do.write_hd5(root=root['dynobjs/%s'%name])
            
        # etc
        root.create_group('etc')
        for (name, obj) in self.etc.items():
            s = _literal_repr(obj)
            
            try:
                _literal_eval(s)
            except:
                root.close()
                msg = "'%s' is not a Python literal, "\
                      "FrameSlice, or FrameIndex"%str(obj)
                raise ValueError(msg)
            root['etc'].attrs[name] = s
            
        root.close()
コード例 #2
0
ファイル: daq.py プロジェクト: vigsiv/undaqTools
    def write_hd5(self, filename=None):
        """
        write_hd5(filename=None)
        
        writes Daq object to HDF5 container

        Parameters
        ----------
        filename : None or string (optional)
            None : file written to daq.filename.replace('.daq', '.hdf5')
            string : specify output file (will overwrite)

        Return
        ------
        None
        """
        info = self.info
        frame = self.frame
        _header = self._rebuild_header()
        str_type = h5py.new_vlen(str)

        if filename is None:
            filename = info.filename[:-4] + '.hdf5'

        if filename.endswith('.daq'):
            # yes, I did this accidently
            msg = 'Writing Daq in HDF5 with a .daq extension is not allowed'
            raise ValueError(msg)

        root = h5py.File(filename, 'w')

        # info
        root.create_group('info')
        root['info'].attrs['run'] = info.run
        root['info'].attrs['runinst'] = info.runinst
        root['info'].attrs['title'] = info.title
        root['info'].attrs['numentries'] = info.numentries
        root['info'].attrs['frequency'] = info.frequency
        root['info'].attrs['date'] = info.date
        root['info'].attrs['magic'] = info.magic
        root['info'].attrs['subject'] = info.subject
        root['info'].attrs['filename'] = info.filename

        # frame
        root.create_group('frame')
        root['frame'].create_dataset('frame', data=frame.frame)
        root['frame'].create_dataset('count', data=frame.count)
        root['frame'].create_dataset('code', data=frame.code)

        # header
        root.create_group('header')
        root['header'].create_dataset('id', data=_header.id)
        root['header'].create_dataset('numvalues', data=_header.numvalues)
        root['header'].create_dataset('name', data=_header.name)
        root['header'].create_dataset('units', data=_header.units)
        root['header'].create_dataset('rate', data=_header.rate)
        root['header'].create_dataset('type', data=_header.type)
        root['header'].create_dataset('varrateflag', data=_header.varrateflag)
        root['header'].create_dataset('bytes', data=_header.bytes)

        nptype_asstr = np.array(_header.nptype, dtype=str_type)
        root['header'].create_dataset('nptype', data=nptype_asstr)

        # data
        root.create_group('data')
        for name, elem in self.items():
            root['data'].create_dataset(name, data=elem.toarray())

            if elem.isCSSDC():
                root['data'].create_dataset(name + '_Frames', data=elem.frames)
        # misc
        root.create_dataset('elemlist',
                            data=np.array(self.elemlist, dtype=str_type))
        root.attrs['f0'] = (self.f0, '')[self.f0 is None]
        root.attrs['fend'] = (self.fend, '')[self.fend is None]
        root.attrs['cursor'] = self.cursor

        # dynobjects
        root.create_group('dynobjs')
        for (name, do) in self.dynobjs.items():
            root.create_group('dynobjs/%s' % name)
            do.write_hd5(root=root['dynobjs/%s' % name])

        # etc
        root.create_group('etc')
        for (name, obj) in self.etc.items():
            s = _literal_repr(obj)

            try:
                _literal_eval(s)
            except:
                root.close()
                msg = "'%s' is not a Python literal, "\
                      "FrameSlice, or FrameIndex"%str(obj)
                raise ValueError(msg)
            root['etc'].attrs[name] = s

        root.close()
コード例 #3
0
ファイル: daq.py プロジェクト: rogerlew/undaqTools
    def read_hd5(self, filename, f0=None, fend=None):
        """
        read_hd5(filename[, f0=None][, fend=None])

        writes Daq object to HDF5 container

        f0 and fend specify frame range to read.
        Will export daq.dynobjs to HDF5 container
        
        Parameters
        ----------
        filename : string
            file to read
        
        f0 : None or int
            None -> read from beginning of file
            int -> read from this frame

        fend : None or int
            None -> read to end of file
            int -> read to this frame

        Return
        ------
        None
        """
        if filename.endswith('daq'):
            # Normal Python mantra usually goes against this sort of
            # of checking but when you make this error the traceback
            # is somewhat obtuse. It can be difficult to tell whether
            # it is in the wrong format or something else is wrong.
            raise ValueError('You are trying to open a .daq as .hd5')
        
        root = h5py.File(filename, 'r')

        # info
        self.info = \
            Info(run = root['info'].attrs['run'], 
                 runinst = root['info'].attrs['runinst'], 
                 title = root['info'].attrs['title'], 
                 numentries = root['info'].attrs['numentries'], 
                 frequency = root['info'].attrs['frequency'], 
                 date = root['info'].attrs['date'],  
                 magic = root['info'].attrs['magic'], 
                 subject = root['info'].attrs['subject'],
                 filename = root['info'].attrs['filename'])

        # header
        # The [:] unpacks the data from a h5py.dataset.Dataset
        # object to a numpy.ndarray object
        try:
            _header = \
                Header(id = root['header/id'][:],
                       numvalues = root['header/numvalues'][:],
                       name = root['header/name'][:],
                       units = root['header/units'][:],
                       rate = root['header/rate'][:],
                       type = root['header/type'][:],
                       nptype = root['header/nptype'][:],
                       varrateflag = root['header/varrateflag'][:],
                       bytes = root['header/bytes'][:])
        except:
            _header = \
                Header(id = array('i'),
                       numvalues = array('i'),
                       name = [],
                       units = [],
                       rate = array('i'),
                       type = array('c'),
                       nptype = [],
                       varrateflag = array('i'),
                       bytes = array('i'))
        
        # Find the indices cooresponding to the first and last
        # frames requested. We can use these indices to
        # slice out all the non-CSSDC elements.
        #
        # For the CSSDC elements we will have to find the
        # appropriate indices as we go.
        i0, iend = None, None
    
        try:
            all_frames = root['frame/frame'][:]
        except:
            all_frames = None

        if all_frames is not None:
            if f0 is not None or fend is not None:
                if f0 is not None:
                    i0 = _searchsorted(all_frames, f0)
                if fend is not None:
                    iend = _searchsorted(all_frames, fend)
                    if iend < len(all_frames):
                        iend += 1
                    
        indx = slice(i0,iend)

        # frame
        try:
            self.frame = \
                Frame(code = root['frame/code'][indx],
                      frame = root['frame/frame'][indx],
                      count = root['frame/count'][indx])
        except:
            self.frame = \
                Frame(code = array('i'),
                      frame = array('i'),
                      count = array('i'))

        # elemlist
        try:
            # Fails if slice is zero-length
            self.elemlist = root['elemlist'][:] 
        except:
            self.elemlist = None
            
        # data
        # Procedure is similar to read_daq. Data is unpacked
        # to tmpdata dict and then the Elements are instantiated.
        _elemid_lookup = dict(zip(_header.name, _header.id))
        
        tmpdata = {}
        for k, v in root['data'].iteritems():
            
            if self.elemlist is not None:
               if not any(fnmatch(k, wc) for wc in self.elemlist):
                   continue
                
            i = _elemid_lookup[k.replace('_Frames','')]
            
            if _header.rate[i] == 1:
                tmpdata[k] = v[:,i0:iend]
                
            else: #CSSDC measure
                if len(v.shape) == 1:
                    v = np.array(v, ndmin=2) # _Frames
                    
                # Need to find indices
                _i0 = 0
                _iend= v.shape[1]

                if f0 is not None or fend is not None:
                    _name = k.replace('_Frames','')
                    _all_frames = root['data/%s_Frames'%_name][:].flatten()

                    if f0 is not None:
                        _i0 = _searchsorted(_all_frames, f0)
                    if fend is not None:
                        _iend = _searchsorted(_all_frames, fend)
                        if _iend < len(_all_frames):
                            _iend += 1

                # Now we can slice the data
                tmpdata[k] = v[:,_i0:_iend]

        # hdf5 doesn't have a None type (or atleast, I don't know how
        # to use it) so None is stored as an empty string in the hdf5 file
        self.f0 = (root.attrs['f0'], None)[root.attrs['f0'] == '']
        self.fend = (root.attrs['fend'], None)[root.attrs['fend'] == '']
        self.cursor = root.attrs['cursor']

        # read dynobjs
        self.dynobjs = OrderedDict()  
        for (name, dynobj) in root['dynobjs'].iteritems():
            do = DynObj()
            do.read_hd5(root=dynobj)
            self.dynobjs[name] = do
                    
        # read etc dict
        self.etc = {}  
        for (name, obj) in root['etc'].attrs.iteritems():
            self.etc[name] = _literal_eval(obj)
            
        root.close()

        # cast as Element objects
        # 'varrateflag' variables remain lists of lists
        #
        # There are obvious more compact ways to write this but I'm
        # paranoid about reference counting and garbage collection not
        # functioning properly
        for name, i, rate in zip(_header.name, _header.id, _header.rate):
            if rate != 1:
                self[name] = \
                    Element(tmpdata[name],
                            tmpdata[name+'_Frames'].flatten(),
                            rate=_header.rate[i],
                            name=_header.name[i],
                            dtype=_header.type[i],
                            varrateflag=_header.varrateflag[i],
                            elemid=_header.id[i],
                            units=_header.units[i])
                
                del tmpdata[name+'_Frames']
            else:
                self[name] = \
                    Element(tmpdata[name],
                            self.frame.frame[:],
                            rate=_header.rate[i],
                            name=_header.name[i],
                            dtype=_header.type[i],
                            varrateflag=_header.varrateflag[i],
                            elemid=_header.id[i],
                            units=_header.units[i])
                            
                # delete tmpdata arrays as we go to save memory
                del tmpdata[name]
                
        del _header
コード例 #4
0
ファイル: daq.py プロジェクト: vigsiv/undaqTools
    def read_hd5(self, filename, f0=None, fend=None):
        """
        read_hd5(filename[, f0=None][, fend=None])

        writes Daq object to HDF5 container

        f0 and fend specify frame range to read.
        Will export daq.dynobjs to HDF5 container
        
        Parameters
        ----------
        filename : string
            file to read
        
        f0 : None or int
            None -> read from beginning of file
            int -> read from this frame

        fend : None or int
            None -> read to end of file
            int -> read to this frame

        Return
        ------
        None
        """
        if filename.endswith('daq'):
            # Normal Python mantra usually goes against this sort of
            # of checking but when you make this error the traceback
            # is somewhat obtuse. It can be difficult to tell whether
            # it is in the wrong format or something else is wrong.
            raise ValueError('You are trying to open a .daq as .hd5')

        root = h5py.File(filename, 'r')

        # info
        self.info = \
            Info(run = root['info'].attrs['run'],
                 runinst = root['info'].attrs['runinst'],
                 title = root['info'].attrs['title'],
                 numentries = root['info'].attrs['numentries'],
                 frequency = root['info'].attrs['frequency'],
                 date = root['info'].attrs['date'],
                 magic = root['info'].attrs['magic'],
                 subject = root['info'].attrs['subject'],
                 filename = root['info'].attrs['filename'])

        # header
        # The [:] unpacks the data from a h5py.dataset.Dataset
        # object to a numpy.ndarray object
        try:
            _header = \
                Header(id = root['header/id'][:],
                       numvalues = root['header/numvalues'][:],
                       name = root['header/name'][:],
                       units = root['header/units'][:],
                       rate = root['header/rate'][:],
                       type = root['header/type'][:],
                       nptype = root['header/nptype'][:],
                       varrateflag = root['header/varrateflag'][:],
                       bytes = root['header/bytes'][:])
        except:
            _header = \
                Header(id = array('i'),
                       numvalues = array('i'),
                       name = [],
                       units = [],
                       rate = array('i'),
                       type = array('c'),
                       nptype = [],
                       varrateflag = array('i'),
                       bytes = array('i'))

        # Find the indices cooresponding to the first and last
        # frames requested. We can use these indices to
        # slice out all the non-CSSDC elements.
        #
        # For the CSSDC elements we will have to find the
        # appropriate indices as we go.
        i0, iend = None, None

        try:
            all_frames = root['frame/frame'][:]
        except:
            all_frames = None

        if all_frames is not None:
            if f0 is not None or fend is not None:
                if f0 is not None:
                    i0 = _searchsorted(all_frames, f0)
                if fend is not None:
                    iend = _searchsorted(all_frames, fend)
                    if iend < len(all_frames):
                        iend += 1

        indx = slice(i0, iend)

        # frame
        try:
            self.frame = \
                Frame(code = root['frame/code'][indx],
                      frame = root['frame/frame'][indx],
                      count = root['frame/count'][indx])
        except:
            self.frame = \
                Frame(code = array('i'),
                      frame = array('i'),
                      count = array('i'))

        # elemlist
        try:
            # Fails if slice is zero-length
            self.elemlist = root['elemlist'][:]
        except:
            self.elemlist = None

        # data
        # Procedure is similar to read_daq. Data is unpacked
        # to tmpdata dict and then the Elements are instantiated.
        _elemid_lookup = dict(zip(_header.name, _header.id))

        tmpdata = {}
        for k, v in root['data'].iteritems():

            if self.elemlist is not None:
                if not any(fnmatch(k, wc) for wc in self.elemlist):
                    continue

            i = _elemid_lookup[k.replace('_Frames', '')]

            if _header.rate[i] == 1:
                tmpdata[k] = v[:, i0:iend]

            else:  #CSSDC measure
                if len(v.shape) == 1:
                    v = np.array(v, ndmin=2)  # _Frames

                # Need to find indices
                _i0 = 0
                _iend = v.shape[1]

                if f0 is not None or fend is not None:
                    _name = k.replace('_Frames', '')
                    _all_frames = root['data/%s_Frames' % _name][:].flatten()

                    if f0 is not None:
                        _i0 = _searchsorted(_all_frames, f0)
                    if fend is not None:
                        _iend = _searchsorted(_all_frames, fend)
                        if _iend < len(_all_frames):
                            _iend += 1

                # Now we can slice the data
                tmpdata[k] = v[:, _i0:_iend]

        # hdf5 doesn't have a None type (or atleast, I don't know how
        # to use it) so None is stored as an empty string in the hdf5 file
        self.f0 = (root.attrs['f0'], None)[root.attrs['f0'] == '']
        self.fend = (root.attrs['fend'], None)[root.attrs['fend'] == '']
        self.cursor = root.attrs['cursor']

        # read dynobjs
        self.dynobjs = OrderedDict()
        for (name, dynobj) in root['dynobjs'].iteritems():
            do = DynObj()
            do.read_hd5(root=dynobj)
            self.dynobjs[name] = do

        # read etc dict
        self.etc = {}
        for (name, obj) in root['etc'].attrs.iteritems():
            self.etc[name] = _literal_eval(obj)

        root.close()

        # cast as Element objects
        # 'varrateflag' variables remain lists of lists
        #
        # There are obvious more compact ways to write this but I'm
        # paranoid about reference counting and garbage collection not
        # functioning properly
        for name, i, rate in zip(_header.name, _header.id, _header.rate):
            if rate != 1:
                self[name] = \
                    Element(tmpdata[name],
                            tmpdata[name+'_Frames'].flatten(),
                            rate=_header.rate[i],
                            name=_header.name[i],
                            dtype=_header.type[i],
                            varrateflag=_header.varrateflag[i],
                            elemid=_header.id[i],
                            units=_header.units[i])

                del tmpdata[name + '_Frames']
            else:
                self[name] = \
                    Element(tmpdata[name],
                            self.frame.frame[:],
                            rate=_header.rate[i],
                            name=_header.name[i],
                            dtype=_header.type[i],
                            varrateflag=_header.varrateflag[i],
                            elemid=_header.id[i],
                            units=_header.units[i])

                # delete tmpdata arrays as we go to save memory
                del tmpdata[name]

        del _header