Esempio n. 1
0
    def test02_NewFileWithExpectedSize(self):
        """Creation of a new file node with 'expectedsize' argument."""

        try:
            filenode.new_node(
                self.h5file, where='/', name='test', expectedsize=100000)
        except TypeError:
            self.fail("filenode.new_node() failed to accept 'expectedsize'"
                      " argument.")
Esempio n. 2
0
    def test02_NewFileWithExpectedSize(self):
        "Creation of a new file node with 'expectedsize' argument."

        try:
            filenode.new_node(
                self.h5file, where='/', name='test', expectedsize=100000)
        except TypeError:
            self.fail("\
filenode.new_node() failed to accept 'expectedsize' argument.")
Esempio n. 3
0
    def setUp(self):
        """
        This method sets the following instance attributes:

        * ``h5fname``: the name of the temporary HDF5 file.
        * ``h5file``: the writable, temporary HDF5 file with a ``/test`` node.
        * ``fnode``: the readable file node in ``/test``, with text in it.
        """

        super(ReadlineTestCase, self).setUp()

        linesep = self.line_separator

        # Fill the node file with some text.
        fnode = filenode.new_node(self.h5file, where='/', name='test')
        #fnode.line_separator = linesep
        fnode.write(linesep)
        data = 'short line%sshort line%s%s' % ((linesep.decode('ascii'),) * 3)
        data = data.encode('ascii')
        fnode.write(data)
        fnode.write(b'long line ' * 20 + linesep)
        fnode.write(b'unterminated')
        fnode.close()

        # Re-open it for reading.
        self.fnode = filenode.open_node(self.h5file.get_node('/test'))
Esempio n. 4
0
    def setUp(self):
        """This method sets the following instance attributes:

        * ``h5fname``: the name of the temporary HDF5 file.
        * ``h5file``: the writable, temporary HDF5 file with a ``/test`` node.
        * ``fnode``: the readable file node in ``/test``, with text in it.

        """

        super(ReadlineTestCase, self).setUp()

        linesep = self.line_separator

        # Fill the node file with some text.
        fnode = filenode.new_node(self.h5file, where='/', name='test')
        #fnode.line_separator = linesep
        fnode.write(linesep)
        data = 'short line%sshort line%s%s' % ((linesep.decode('ascii'),) * 3)
        data = data.encode('ascii')
        fnode.write(data)
        fnode.write(b'long line ' * 20 + linesep)
        fnode.write(b'unterminated')
        fnode.close()

        # Re-open it for reading.
        self.fnode = filenode.open_node(self.h5file.get_node('/test'))
Esempio n. 5
0
    def stash_current(self):
        """
        Save the current protocol in the history group and delete the node

        Typically this is called when assigning a new protocol.

        Stored as the date that it was changed followed by its name if it has one
        """
        h5f = self.open_hdf()
        try:
            protocol_name = h5f.get_node_attr('/current', 'protocol_name')
            archive_name = '_'.join([self.get_timestamp(simple=True), protocol_name])
        except AttributeError:
            warnings.warn("protocol_name attribute couldn't be accessed, using timestamp to stash protocol")
            archive_name = self.get_timestamp(simple=True)

        # TODO: When would we want to prefer the .h5f copy over the live one?
        #current_node = filenode.open_node(h5f.root.current)
        #old_protocol = current_node.readall()

        archive_node = filenode.new_node(h5f, where='/history/past_protocols', name=archive_name)
        archive_node.write(json.dumps(self.current))

        h5f.remove_node('/current')
        self.close_hdf(h5f)
Esempio n. 6
0
def saveDataAsH5(fName, objSaveD):
    """Save data in a pandas/pytables HDF5 store
    Note: blosc is best compression tradeoff, complevel 9,8, or 5 may be best"""

    # complete file extension
    (fName0, fExt0) = os.path.splitext(fName)
    if fExt0 == '':
        fExt0 = '.h5'
    if not (fExt0 =='.h5'):
        raise RuntimeError('only .h5 extension is allowed on save file')
    fName= fName0+fExt0

    # this warning happens in pytables when close is called on a closed file: safe to ignore
    # avoid catch_warnings context manager: is not thread safe and makes for ugly extra indents
    warnings.filterwarnings('ignore', 'host PyTables file is already closed')


    # move any existing old file to trash
    if os.path.exists(fName):
        os.system('trash %s' % fName) # needs 'brew install trash', also can use os.remove()
    with pd.HDFStore(fName, complib='blosc', complevel=9) as store:
        # initialize node for saving pickles directly
        store._handle.create_group('/', 'objects')
        for (tF,tVal) in iteritems(objSaveD):
            ## for future ref: should add an elif for np arrays here? pickling them is very fast
            if type(tVal) == pd.DataFrame:
                # save DataFrames directly
                store[tF] = tVal
            else:
                # save non-DataFrames by pickling with dill
                fnode = filenode.new_node(store._handle, where='/objects', name=tF)
                fnode.write(dill.dumps(tVal)) 
Esempio n. 7
0
def saveDataAsH5(fName, objSaveD):
    """Save data in a pandas/pytables HDF5 store
    Note: blosc is best compression tradeoff, complevel 9,8, or 5 may be best"""

    # complete file extension
    (fName0, fExt0) = os.path.splitext(fName)
    if fExt0 == "":
        fExt0 = ".h5"
    if not (fExt0 == ".h5"):
        raise RuntimeError("only .h5 extension is allowed on save file")
    fName = fName0 + fExt0

    # move any existing old file to trash
    if os.path.exists(fName):
        os.system("trash %s" % fName)  # needs 'brew install trash', also can use os.remove()
    with pd.HDFStore(fName, complib="blosc", complevel=9) as store:
        # initialize node for saving pickles directly
        store._handle.create_group("/", "objects")
        for (tF, tVal) in iteritems(objSaveD):
            ## for future ref: should add an elif for np arrays here? pickling them is very fast
            if type(tVal) == pd.DataFrame:
                # save DataFrames directly
                store[tF] = tVal
            else:
                # save non-DataFrames by pickling with dill
                fnode = filenode.new_node(store._handle, where="/objects", name=tF)
                fnode.write(dill.dumps(tVal))
Esempio n. 8
0
def add_raw(db, source, dest):
    '''imports raw file into appropriate group
    for later handling'''
    fsize = os.path.getsize(source)
    fnode = filenode.new_node(db, where=db.root.raws.segy.files, name=dest, expectedsize=fsize, filters=filter_)
    fnode.write(open(source).read())
    fnode.attrs.tape_format="segy"
    fnode.close()
Esempio n. 9
0
    def setUp(self):
        """setUp() -> None

        This method sets the following instance attributes:
          * 'h5fname', the name of the temporary HDF5 file
          * 'h5file', the writable, temporary HDF5 file with a '/test' node
          * 'fnode', the writable file node in '/test'
        """
        super(AttrsTestCase, self).setUp()
        self.fnode = filenode.new_node(self.h5file, where='/', name='test')
Esempio n. 10
0
    def setUp(self):
        """setUp() -> None

        This method sets the following instance attributes:
          * 'h5fname', the name of the temporary HDF5 file
          * 'h5file', the writable, temporary HDF5 file with a '/test' node
          * 'fnode', the writable file node in '/test'
        """
        super(AttrsTestCase, self).setUp()
        self.fnode = filenode.new_node(self.h5file, where='/', name='test')
Esempio n. 11
0
    def _create_pyobject_node(cls, pyt_file, node_path, data=None):
        if data is None:
            data = {}

        # Stash the array values in their own h5 nodes and return a dictionary
        # which is appropriate for JSON serialization.
        out_data = cls._handle_array_values(pyt_file, node_path, data)

        kwargs = dict(where=node_path, name=cls._pyobject_data_node)
        with closing(filenode.new_node(pyt_file, **kwargs)) as f:
            f.write(json.dumps(out_data).encode("ascii"))
Esempio n. 12
0
    def _create_pyobject_node(cls, pyt_file, node_path, data=None):
        if data is None:
            data = {}

        # Stash the array values in their own h5 nodes and return a dictionary
        # which is appropriate for JSON serialization.
        out_data = cls._handle_array_values(pyt_file, node_path, data)

        kwargs = dict(where=node_path, name=cls._pyobject_data_node)
        with closing(filenode.new_node(pyt_file, **kwargs)) as f:
            json.dump(out_data, f)
Esempio n. 13
0
    def test00_NewFile(self):
        "Creation of a brand new file node."

        try:
            fnode = filenode.new_node(self.h5file, where='/', name='test')
            node = self.h5file.get_node('/test')
        except LookupError:
            self.fail("filenode.new_node() failed to create a new node.")
        else:
            self.assertEqual(
                fnode.node, node,
                "filenode.new_node() created a node in the wrong place.")
Esempio n. 14
0
    def test00_NewFile(self):
        """Creation of a brand new file node."""

        try:
            fnode = filenode.new_node(self.h5file, where='/', name='test')
            node = self.h5file.get_node('/test')
        except LookupError:
            self.fail("filenode.new_node() failed to create a new node.")
        else:
            self.assertEqual(
                fnode.node, node,
                "filenode.new_node() created a node in the wrong place.")
Esempio n. 15
0
def _write_json_blob(path: Path, entity_key: EntityKey, data: Any):
    """Writes a Python object as json to the HDF file at the given path."""
    with tables.open_file(str(path), "a") as store:

        if entity_key.group_prefix not in store:
            store.create_group('/', entity_key.type)

        if entity_key.group not in store:
            store.create_group(entity_key.group_prefix, entity_key.group_name)

        with filenode.new_node(store,
                               where=entity_key.group,
                               name=entity_key.measure) as fnode:
            fnode.write(bytes(json.dumps(data), "utf-8"))
Esempio n. 16
0
    def flush_current(self):
        """
        Flushes the 'current' attribute in the subject object to the current filenode
        in the .h5

        Used to make sure the stored .json representation of the current task stays up to date
        with the params set in the subject object
        """
        h5f = self.open_hdf()
        h5f.remove_node('/current')
        current_node = filenode.new_node(h5f, where='/', name='current')
        current_node.write(json.dumps(self.current))
        current_node.attrs['step'] = self.step
        current_node.attrs['protocol_name'] = self.protocol_name
        self.close_hdf(h5f)
Esempio n. 17
0
                    def store_data(h5path, name, data):
                        h5path = h5path.replace('//', '/')

                        h5path_splits = [x for x in h5path.split('/') if x != '']

                        for i in range(len(h5path_splits)):
                            try:
                                h5.create_group('/' + '/'.join(h5path_splits[:i]), h5path_splits[i])
                            except NodeError:
                                pass

                        f = filenode.new_node(h5, where=h5path, name=name, filters=compression_filter)
                        if type(data) == str:
                            data = data.encode('utf-8')
                        f.write(data)
                        f.close()
Esempio n. 18
0
    def store_internal_object(self, obj, h5name, where='/'):
        """
        store a python object into the hdf5 file
        object are then retrieve with retrieve_object()

        uses JSON to serialize obj
            - so works only on values, lists, dictionary, etc... but not functions or methods
        """
        node = filenode.new_node(self.hf,
                                 where=where,
                                 name=h5name,
                                 filters=tables.Filters(complevel=1,
                                                        complib='zlib'))
        js = json.dumps(obj, ensure_ascii=True)
        node.write(js.encode())
        node.close()
Esempio n. 19
0
    def filenode_write(self, path, data='', obj=None, mode='w'):
        # TODO: better use of the mode param
        if self.test is False:
            return False
        n = False
        attrs = {}
        self.log.debug('CoreFile.filenode_write', path)
        if self.has_node(path) and mode == 'w':
            self.log.debug('removing old node', path)
            t0 = time()
            attrs = self.get_attributes(path)
            self.log.debug('saved attributes', attrs)
            self.remove_node(path)
        self.log.debug('filenode_write lock')
        self._lock.acquire()
        where = os.path.dirname(path)
        name = os.path.basename(path)
        self.log.debug('newNode', path, where, name)
        try:
            node = filenode.new_node(self.test, where=where, name=name)
        except:
            self._lock.release()
            print_exc()
            return False
        self.log.debug('newNode done', where, name)
        if obj:
            t0 = time()
            data = dumps(obj)
            t1 = time()
            self.log.debug('dumping', t1 - t0)
            node.write(data)
            t2 = time()
            self.log.debug('writing', t2 - t1)
            self.log.debug('total', t2 - t0)
        else:
            node.write(data)
#		node.close()
        # Restore attributes
        self.test.flush()
        self._lock.release()
        if len(attrs) > 0:
            self.log.debug('restoring attrs', attrs)
            self.set_attributes(path, attrs=attrs)

        self.log.debug('DONE CoreFile.filenode_write', path)
        return len(data)
Esempio n. 20
0
    def setUp(self):
        """setUp() -> None

        This method sets the following instance attributes:
          * 'datafile', the opened data file
          * 'h5fname', the name of the temporary HDF5 file
          * 'h5file', the writable, temporary HDF5 file with a '/test' node
          * 'fnode', the readable file node in '/test', with data in it
        """

        self.datafname = self._testFilename(self.datafname)
        self.datafile = open(self.datafname, 'rb')

        super(ReadFileTestCase, self).setUp()

        fnode = filenode.new_node(self.h5file, where='/', name='test')
        copyFileToFile(self.datafile, fnode)
        fnode.close()

        self.datafile.seek(0)
        self.fnode = filenode.open_node(self.h5file.get_node('/test'))
Esempio n. 21
0
    def setUp(self):
        """setUp() -> None

        This method sets the following instance attributes:
          * 'datafile', the opened data file
          * 'h5fname', the name of the temporary HDF5 file
          * 'h5file', the writable, temporary HDF5 file with a '/test' node
          * 'fnode', the readable file node in '/test', with data in it
        """

        self.datafname = self._testFilename(self.datafname)
        self.datafile = open(self.datafname, 'rb')

        super(ReadFileTestCase, self).setUp()

        fnode = filenode.new_node(self.h5file, where='/', name='test')
        copyFileToFile(self.datafile, fnode)
        fnode.close()

        self.datafile.seek(0)
        self.fnode = filenode.open_node(self.h5file.get_node('/test'))
Esempio n. 22
0
    def open_internal_file(self, h5name, access='r', where='/attached'):
        """
        opens a node called h5name in the file, which can be accessed as a file.
        returns a file stram which can be used as a classical file.
        
        access is either 
            'r' : for reading an existing node
            'w' : create a node for writing into it
            'a' : for appending in an existing node
        file is stored in a h5 group called h5name

        eg.
        F = h5.open_internal_file('myfile.txt', 'w', where='/files')
        # create a node called '/files/myfile.txt' (node 'myfile.txt' in the group '/files')
        F.writelines(text)
        F.close()
        # and write some text into it

        # then, latter on
        F = h5.open_internal_file('myfile.txt', 'r', where='/files')
        textback = F.read()
        F.close()

        This is used to add parameter files, audit_trail, etc... to spike/hdf5 files
        
        it is based on the filenode module from pytables
        """
        import warnings
        if access == 'r':
            v = self.hf.get_node(where=where, name=h5name)
            F = filenode.open_node(v, 'r')
        elif access == 'a':
            v = self.hf.get_node(where=where, name=h5name)
            F = filenode.open_node(v, 'a+')
        elif access == 'w':
            with warnings.catch_warnings(
            ):  # remove warnings, as the dot in name activates them
                warnings.simplefilter("ignore")
                F = filenode.new_node(self.hf, where=where, name=h5name)
        return F
Esempio n. 23
0
    def addFontAtlas(self, font_atlas):
        style_group = None
        family_group = self.getFamilyGroup(font_atlas.font_info.family_name)
        if family_group:
            style_group = self.getStyleGroup(font_atlas, family_group)
        if style_group:
            size = font_atlas.size
            dpi = font_atlas.dpi

            # save the original font file to the hdf5 file
            ttf_file_name = os.path.split(font_atlas.font_info.path)[-1]
            ttf_node_name = ttf_file_name.replace(u'.', u'_')
            try:
                ttf_exists = style_group._f_get_child(ttf_node_name)
            except tb.NoSuchNodeError, e:
                import tables
                from tables.nodes import filenode
                f = file(font_atlas.font_info.path, 'rb')
                ttf_node = filenode.new_node(self._tables,
                                             where=style_group,
                                             name=ttf_node_name,
                                             title=ttf_file_name)
                ttf_node.write(f.read())
                f.close()
                ttf_node.close()

            # Create a group for this font size, dpi combo.
            font_size_group = None
            for a in self._tables.list_nodes(style_group.sizes,
                                             classname='Group'):
                if a._v_attrs.TITLE == "%d PT, %d DPI Data" % (size, dpi):
                    font_size_group = a
                    break

            if font_size_group is None:
                font_size_group = self._tables.create_group(
                    style_group.sizes, "D_%d_%d" % (size, dpi),
                    "%d PT, %d DPI Data" % (size, dpi))

                #Save some atlas info for later use..
                font_size_group._v_attrs[
                    'max_ascender'] = font_atlas.max_ascender
                font_size_group._v_attrs[
                    'max_descender'] = font_atlas.max_descender
                font_size_group._v_attrs[
                    'max_tile_width'] = font_atlas.max_tile_width
                font_size_group._v_attrs[
                    'max_tile_height'] = font_atlas.max_tile_height
                font_size_group._v_attrs[
                    'max_bitmap_size'] = font_atlas.max_bitmap_size
                font_size_group._v_attrs[
                    'total_bitmap_area'] = font_atlas.total_bitmap_area

                # create 2d array to store the fontatlas bitmap data in.
                atlas_bmp = tb.Array(
                    font_size_group,
                    "FontGlyphAtlas",
                    obj=font_atlas.atlas.data,
                    title='Array Holding the Font Face Glyph Bitmaps')

                # Save the info for each glyph so atlas data array and glyph
                # location can be used to generate display lists when the font
                # store is retrieved.

                chr_glyph_table = self._tables.create_table(
                    font_size_group,
                    'UnicharGlyphData',
                    FontGlyphData,
                    "Data regarding one char/glyph within the font set.",
                    expectedrows=400)
                tdata = []
                for charcode, gfinfo in font_atlas.charcode2glyph.iteritems():
                    x, y, w, h = gfinfo['atlas_coords']
                    x1, y1, x2, y2 = gfinfo['texcoords']
                    tdata.append((gfinfo['index'], charcode,
                                  gfinfo['unichar'].encode('utf-8'),
                                  gfinfo['offset'][0], gfinfo['offset'][1],
                                  gfinfo['size'][0], gfinfo['size'][1], x, y,
                                  w, h, x1, y1, x2, y2))
                chr_glyph_table.append(tdata)
                chr_glyph_table.flush()
            else:
                print 'Font Size Group already exists!!', '%d pt, %d dpi' % (
                    size, dpi)
Esempio n. 24
0
 def _store_model(self, extractor):
     fn = filenode.new_node(self.root._v_file,
                            where=self.root, name='model')
     pickle.dump(extractor, fn, pickle.HIGHEST_PROTOCOL)
     fn.close()
     return extractor
Esempio n. 25
0
#
#       You should have received a copy of the GNU General Public License
#       along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
#       Author:  Vicent Mas - [email protected]

#
#       This script is based on a script by Ivan Vilata.
"""How to use the filenode module."""

from tables.nodes import filenode
import tables

h5file = tables.open_file('fnode.h5', 'w')

fnode = filenode.new_node(h5file, where='/', name='fnode_test')
print >> fnode, "This is a test text line."
print >> fnode, "And this is another one."
print >> fnode
fnode.write("Of course, file methods can also be used.")
fnode.close()

node = h5file.root.fnode_test
fnode = filenode.open_node(node, 'a+')
print >> fnode, "This is a new line."

fnode.attrs.content_type = 'text/plain; charset=us-ascii'

fnode.attrs.author = "Ivan Vilata i Balaguer"
fnode.attrs.creation_date = '2004-10-20T13:25:25+0200'
fnode.attrs.keywords_en = ["FileNode", "test", "metadata"]
Esempio n. 26
0
    def addFontAtlas(self,font_atlas):
        style_group=None
        family_group=self.getFamilyGroup(font_atlas.font_info.family_name)
        if family_group:
            style_group=self.getStyleGroup(font_atlas,family_group)  
        if style_group:
            size=font_atlas.size
            dpi=font_atlas.dpi        
            
            
            # save the original font file to the hdf5 file
            ttf_file_name=os.path.split(font_atlas.font_info.path)[-1]
            ttf_node_name=ttf_file_name.replace(u'.',u'_')
            try:
                ttf_exists=style_group._f_get_child(ttf_node_name) 
            except tb.NoSuchNodeError,e:
                import tables
                from tables.nodes import filenode
                f=file(font_atlas.font_info.path,'rb')                
                ttf_node=filenode.new_node(self._tables, where=style_group,name=ttf_node_name,title=ttf_file_name)
                ttf_node.write(f.read())
                f.close()
                ttf_node.close()
            
            # Create a group for this font size, dpi combo.
            font_size_group=None
            for a in self._tables.list_nodes(style_group.sizes, classname='Group'):
                if a._v_attrs.TITLE=="%d PT, %d DPI Data"%(size,dpi):
                    font_size_group=a
                    break
            
            if font_size_group is None:
                font_size_group=self._tables.create_group(style_group.sizes, "D_%d_%d"%(size,dpi), "%d PT, %d DPI Data"%(size,dpi))

                #Save some atlas info for later use..
                font_size_group._v_attrs['max_ascender']=font_atlas.max_ascender
                font_size_group._v_attrs['max_descender']=font_atlas.max_descender
                font_size_group._v_attrs['max_tile_width']=font_atlas.max_tile_width
                font_size_group._v_attrs['max_tile_height']=font_atlas.max_tile_height
                font_size_group._v_attrs['max_bitmap_size']=font_atlas.max_bitmap_size
                font_size_group._v_attrs['total_bitmap_area']=font_atlas.total_bitmap_area
        
                # create 2d array to store the fontatlas bitmap data in.
                atlas_bmp=tb.Array(font_size_group, "FontGlyphAtlas", obj=font_atlas.atlas.data, title='Array Holding the Font Face Glyph Bitmaps')
                
                # Save the info for each glyph so atlas data array and glyph 
                # location can be used to generate display lists when the font
                # store is retrieved.


                chr_glyph_table = self._tables.create_table(font_size_group, 'UnicharGlyphData', FontGlyphData, "Data regarding one char/glyph within the font set.",expectedrows = 400)                
                tdata=[]
                for charcode,gfinfo in font_atlas.charcode2glyph.iteritems():
                    x,y,w,h=gfinfo['atlas_coords']
                    x1,y1,x2,y2 = gfinfo['texcoords']
                    tdata.append((gfinfo['index'], 
                                  charcode,
                                 gfinfo['unichar'].encode('utf-8'),
                                 gfinfo['offset'][0],
                                 gfinfo['offset'][1],
                                 gfinfo['size'][0],
                                 gfinfo['size'][1],
                                 x,y,w,h,
                                 x1,y1,x2,y2))
                chr_glyph_table.append(tdata)
                chr_glyph_table.flush()
            else:
                print 'Font Size Group already exists!!','%d pt, %d dpi'%(size,dpi)
Esempio n. 27
0
# Fill the table with 10 particles
particle = table.row
for i in range(10):
    # First, assign the values to the Particle record
    particle['name']  = 'Particle: {:6d}'.format(i)
    particle['lati'] = i
    particle['longi'] = 10 - i
    particle['pressure'] = float(i*i)
    particle['temperature'] = float(i**2)
    # This injects the row values.
    particle.append()

# We need to flush the buffers in table in order to get an
# accurate number of records on it.
table.flush()

# Create a filenode
fnode = filenode.new_node(h5file, where='/', name='filenode')

# Fill the filenode
counter = 0
while counter < 10:
    l = "This is a line inserted programmatically at position {}\n".format(counter)
    fnode.write(l.encode("utf-8"))
    counter += 1
fnode.write(bytearray("This is the last line.\n", "utf-8"))
fnode.attrs.author = "Vicent Mas"
fnode.close()

h5file.close()
Esempio n. 28
0
    def assign_protocol(self, protocol, step_n=0):
        """
        Assign a protocol to the subject.

        If the subject has a currently assigned task, stashes it with :meth:`~.Subject.stash_current`

        Creates groups and tables according to the data descriptions in the task class being assigned.
        eg. as described in :class:`.Task.TrialData`.

        Updates the history table.

        Args:
            protocol (str): the protocol to be assigned. Can be one of

                * the name of the protocol (its filename minus .json) if it is in `prefs.PROTOCOLDIR`
                * filename of the protocol (its filename with .json) if it is in the `prefs.PROTOCOLDIR`
                * the full path and filename of the protocol.

            step_n (int): Which step is being assigned?
        """
        # Protocol will be passed as a .json filename in prefs.PROTOCOLDIR

        h5f = self.open_hdf()



        ## Assign new protocol
        if not protocol.endswith('.json'):
            protocol = protocol + '.json'

        # try prepending the protocoldir if we were passed just the name
        if not os.path.exists(protocol):
            fullpath = os.path.join(prefs.PROTOCOLDIR, protocol)
            if not os.path.exists(fullpath):
                Exception('Could not find either {} or {}'.format(protocol, fullpath))

        # Set name and step
        # Strip off path and extension to get the protocol name
        protocol_name = os.path.splitext(protocol)[0].split(os.sep)[-1]

        # Load protocol to dict
        with open(protocol) as protocol_file:
            prot_dict = json.load(protocol_file)

        # Check if there is an existing protocol, archive it if there is.
        if "/current" in h5f:
            _ = self.close_hdf(h5f)
            self.update_history(type='protocol', name=protocol_name, value = prot_dict)
            self.stash_current()
            h5f = self.open_hdf()

        # Make filenode and save as serialized json
        current_node = filenode.new_node(h5f, where='/', name='current')
        current_node.write(json.dumps(prot_dict))
        h5f.flush()

        # save some protocol attributes
        self.current = prot_dict

        current_node.attrs['protocol_name'] = protocol_name
        self.protocol_name = protocol_name

        current_node.attrs['step'] = step_n
        self.step = int(step_n)

        # Make file group for protocol
        if "/data/{}".format(protocol_name) not in h5f:
            current_group = h5f.create_group('/data', protocol_name)
        else:
            current_group = h5f.get_node('/data', protocol_name)

        # Create groups for each step
        # There are two types of data - continuous and trialwise.
        # Each gets a single table within a group: since each step should have
        # consistent data requirements over time and hdf5 doesn't need to be in
        # memory, we can just keep appending to keep things simple.
        for i, step in enumerate(self.current):
            # First we get the task class for this step
            task_class = TASK_LIST[step['task_type']]
            step_name = step['step_name']
            # group name is S##_'step_name'
            group_name = "S{:02d}_{}".format(i, step_name)

            if group_name not in current_group:
                step_group = h5f.create_group(current_group, group_name)
            else:
                step_group = current_group._f_get_child(group_name)

            # The task class *should* have at least one PyTables DataTypes descriptor
            try:
                if hasattr(task_class, "TrialData"):
                    trial_descriptor = task_class.TrialData
                    # add a session column, everyone needs a session column
                    if 'session' not in trial_descriptor.columns.keys():
                        trial_descriptor.columns.update({'session': tables.Int32Col()})
                    # same thing with trial_num
                    if 'trial_num' not in trial_descriptor.columns.keys():
                        trial_descriptor.columns.update({'trial_num': tables.Int32Col()})
                    # if this task has sounds, make columns for them
                    # TODO: Make stim managers return a list of properties for their sounds
                    if 'stim' in step.keys():
                        if 'manager' in step['stim'].keys():
                            # managers have stim nested within groups, but this is still really ugly
                            sound_params = {}
                            for g in step['stim']['groups']:
                                for side, sounds in g['sounds'].items():
                                    for sound in sounds:
                                        for k, v in sound.items():
                                            if k in STRING_PARAMS:
                                                sound_params[k] = tables.StringCol(1024)
                                            else:
                                                sound_params[k] = tables.Float64Col()
                            trial_descriptor.columns.update(sound_params)

                        elif 'sounds' in step['stim'].keys():
                            # for now we just assume they're floats
                            sound_params = {}
                            for side, sounds in step['stim']['sounds'].items():
                                # each side has a list of sounds
                                for sound in sounds:
                                    for k, v in sound.items():
                                        if k in STRING_PARAMS:
                                            sound_params[k] = tables.StringCol(1024)
                                        else:
                                            sound_params[k] = tables.Float64Col()
                            trial_descriptor.columns.update(sound_params)

                    h5f.create_table(step_group, "trial_data", trial_descriptor)
            except tables.NodeError:
                # we already have made this table, that's fine
                pass
            try:
                if hasattr(task_class, "ContinuousData"):
                    cont_descriptor = task_class.ContinuousData
                    cont_descriptor.columns.update({'session': tables.Int32Col()})
                    h5f.create_table(step_group, "continuous_data", cont_descriptor)
            except tables.NodeError:
                # already made it
                pass

        _ = self.close_hdf(h5f)

        # Update history
        self.update_history('protocol', protocol_name, self.current)
Esempio n. 29
0
#       You should have received a copy of the GNU General Public License
#       along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
#       Author:  Vicent Mas - [email protected]

#
#       This script is based on a script by Ivan Vilata.

"""How to use the filenode module."""

from tables.nodes import filenode
import tables

h5file = tables.open_file('fnode.h5', 'w')

fnode = filenode.new_node(h5file, where='/', name='fnode_test')
print >> fnode, "This is a test text line."
print >> fnode, "And this is another one."
print >> fnode
fnode.write("Of course, file methods can also be used.")
fnode.close()

node = h5file.root.fnode_test
fnode = filenode.open_node(node, 'a+')
print >> fnode, "This is a new line."

fnode.attrs.content_type = 'text/plain; charset=us-ascii'

fnode.attrs.author = "Ivan Vilata i Balaguer"
fnode.attrs.creation_date = '2004-10-20T13:25:25+0200'
fnode.attrs.keywords_en = ["FileNode", "test", "metadata"]
Esempio n. 30
0
particle = table.row
for i in range(10):
    # First, assign the values to the Particle record
    particle['name'] = 'Particle: {:6d}'.format(i)
    particle['lati'] = i
    particle['longi'] = 10 - i
    particle['pressure'] = float(i * i)
    particle['temperature'] = float(i**2)
    # This injects the row values.
    particle.append()

# We need to flush the buffers in table in order to get an
# accurate number of records on it.
table.flush()

# Create a filenode
fnode = filenode.new_node(h5file, where='/', name='filenode')

# Fill the filenode
counter = 0
while counter < 10:
    l = "This is a line inserted programmatically at position {}\n".format(
        counter)
    fnode.write(l.encode("utf-8"))
    counter += 1
fnode.write(bytearray("This is the last line.\n", "utf-8"))
fnode.attrs.author = "Vicent Mas"
fnode.close()

h5file.close()
Esempio n. 31
0
    def _init_meta(self):
        class MetaAccess(object):
            def __init__(self, meta_attributes=None):
                self._meta_attributes = meta_attributes

            def __getitem__(self, item):
                if self._meta_attributes is None:
                    raise KeyError("No such key: {}", item)
                return self._meta_attributes[item]

            def __getattr__(self, item):
                if item == '_meta_attributes':
                    return object.__getattribute__(self, item)
                if self._meta_attributes is None:
                    raise AttributeError("No such attribute: {}", item)
                return getattr(self._meta_attributes, item)

            def __setattr__(self, key, value):
                if key == '_meta_attributes':
                    object.__setattr__(self, key, value)
                    return

                if self._meta_attributes is None:
                    raise t.FileModeError(
                        "File not writable, attribute cannot be set!")
                self._meta_attributes[key] = value

            def __setitem__(self, key, value):
                if self._meta_attributes is None:
                    raise t.FileModeError(
                        "File not writable, item cannot be set!")
                setattr(self._meta_attributes, key, value)

            def __contains__(self, item):
                if self._meta_attributes is None:
                    return False
                try:
                    _ = self._meta_attributes[item]
                    return True
                except KeyError:
                    return False

            def keys(self):
                return vars(self._meta_attributes).keys()

            def values(self):
                return vars(self._meta_attributes).values()

        # existing?
        try:
            meta_group = self.file.get_node('/' + self._meta_group_name)
        except t.NoSuchNodeError:
            try:
                meta_group = self.file.create_group('/', self._meta_group_name)
            except t.FileModeError:
                logger.debug(
                    "File not open for writing, not creating meta group.")
                self.meta = MetaAccess()
                return

        try:
            meta_node = self.file.get_node(meta_group, 'meta_node')
        except t.NoSuchNodeError:
            try:
                meta_node = filenode.new_node(self.file,
                                              where=meta_group,
                                              name='meta_node')
            except t.FileModeError:
                logger.debug(
                    "File not open for writing, not creating meta node.")
                self.meta = MetaAccess()
                return

        self.meta = MetaAccess(meta_node.attrs)