def __init__(self, path: str,
                 event_table_desc: Type[tables.IsDescription]):
        """
        Writer for a collection of Photoelectrons to HDF5 file using pytables

        Stores the photoelectron info as variable length arrays

        Parameters
        ----------
        path : str
            Path to store the file (overwrites if already exists)
        event_table_desc : Type[tables.IsDescription]
            Uninstanced `tables.IsDescription` class describing the columns of
            the event metadata table
        """
        self._file = tables.File(path, mode='w', filters=FILTERS)
        group = self._file.create_group(self._file.root, "data", "Event Data")
        self._event_metadata_table = self._file.create_table(
            group, "event_metadata", event_table_desc, "Event Metadata")
        self._pixel_column = self._file.create_vlarray(
            group, "photoelectron_arrival_pixel", tables.UInt16Atom(shape=()),
            "Pixel hit by the photoelectron")
        self._time_column = self._file.create_vlarray(
            group, "photoelectron_arrival_time", tables.Float64Atom(shape=()),
            "Arrival time of the photoelectrons")
        self._charge_column = self._file.create_vlarray(
            group, "photoelectron_measured_charge",
            tables.Float64Atom(shape=()),
            "Charge reported by photosensor for each photoelectron")
Example #2
0
def compute_particles(hrfile,
                      centers,
                      nr_particles=10,
                      avg_dist=10.0,
                      avg_radial_vel=1.0,
                      avg_rand_vel=0.1,
                      min_mass=0.01,
                      max_mass=10.0):
    '''compute nr_particles around the given center of gravity,
       with average distane avg_dist and average velocity avg_vel'''
    particles = h5file.createGroup(h5file.root, 'particles')
    position = h5file.create_earray(particles, 'position',
                                    tables.Float64Atom(), (0, 3),
                                    'particle positions')
    velocity = h5file.create_earray(particles, 'velocity',
                                    tables.Float64Atom(), (0, 3),
                                    'particle velocities')
    mass = h5file.create_earray(particles, 'mass', tables.Float64Atom(), (0, ),
                                'particle masses')
    center = np.sum(centers, axis=0)
    pos = avg_dist * (center + np.random.randn(nr_particles, 3))
    position.append(pos)
    vel = (avg_rand_vel * np.random.randn(nr_particles, 3) + avg_radial_vel *
           (pos - center))
    velocity.append(vel)
    mass.append(np.random.uniform(min_mass, max_mass, nr_particles))
    h5file.flush()
Example #3
0
 def init_db(self, db, size):
     """ Takes a Pytables Group object (group) and the total number of samples expected and
     expands or creates the necessary groups.
     """
     D = self.dist0.D
     objroot = db.root.object
     db.createEArray(objroot.objfxn, 'mu0', t.Float64Atom(shape=(D,)), (0,), expectedrows=size)
     db.createEArray(objroot.objfxn, 'mu1', t.Float64Atom(shape=(D,)), (0,), expectedrows=size)
     db.createEArray(objroot.objfxn, 'sigma0', t.Float64Atom(shape=(D,D)), (0,), expectedrows=size)
     db.createEArray(objroot.objfxn, 'sigma1', t.Float64Atom(shape=(D,D)), (0,), expectedrows=size)
     db.createEArray(objroot.objfxn, 'c', t.Float64Atom(), (0,), expectedrows=size)
Example #4
0
    def __init__(self, n_dict, spk, dt, debug=False, LPU_id=None):
        self.num_neurons = len(n_dict['id'])
        self.LPU_id = None
        super(Olfactory_receptor, self).__init__(n_dict, spk, dt, debug,
                                                 LPU_id)
        self.debug = debug

        self.dt = dt
        #self.steps = max(int(round(dt / 1e-5)),1)
        #self.ddt = dt / self.steps

        self.I_drive = garray.to_gpu(
            np.asarray(np.zeros((self.num_neurons, 1)), dtype=np.double))

        binding_rate_data = np.random.rand(self.num_neurons, 1)
        self.binding_rate = garray.to_gpu(
            np.asarray(binding_rate_data, dtype=np.double))
        init_state = np.zeros((self.num_neurons, 13))
        init_state[:, 4] = 1e-5
        init_state[:, 7] = 1
        self.state = garray.to_gpu(np.asarray(init_state, dtype=np.double))

        #self.V = V
        self.spk = spk
        self.spk_flag = 0
        self.V = garray.to_gpu(np.asarray(n_dict['V'], dtype=np.double))
        #self.spk = garray.to_gpu(np.asarray(np.zeros((self.num_neurons,1)), dtype=np.int32))
        self.V_prev = garray.to_gpu(
            np.asarray(n_dict['V_prev'], dtype=np.double))
        self.X_1 = garray.to_gpu(np.asarray(n_dict['X_1'], dtype=np.double))
        self.X_2 = garray.to_gpu(np.asarray(n_dict['X_2'], dtype=np.double))
        self.X_3 = garray.to_gpu(np.asarray(n_dict['X_3'], dtype=np.double))
        #self.I_ext = garray.to_gpu(np.asarray([1]*self.num_neurons, dtype=np.double))

        #cuda.memcpy_htod(int(self.V), np.asarray(n_dict['V'], dtype=np.double))
        #cuda.memcpy_htod(int(self.spk), np.asarray(np.zeros((self.num_neurons,1)), dtype=np.double))
        self.update_olfactory_transduction = self.get_olfactory_transduction_kernel(
        )
        self.update_hhn = self.get_multi_step_hhn_kernel()

        if self.debug:
            if self.LPU_id is None:
                self.LPU_id = "anon"
            self.I_file = tables.openFile(self.LPU_id + "_I.h5", mode="w")
            self.I_file.createEArray("/","array", \
                                     tables.Float64Atom(), (0,self.num_neurons))

            self.V_file = tables.openFile(self.LPU_id + "_V.h5", mode="w")
            self.V_file.createEArray("/","array", \
                                     tables.Float64Atom(), (0,self.num_neurons))
Example #5
0
def initUV(L,M,N):

    f = tb.open_file('./matrix-pt.h5', 'w')

    filters = tb.Filters(complevel=5, complib='blosc')
    ad = f.create_carray(f.root, 'a', tb.Float64Atom(), (L,M), filters=filters)
    a =  np.random.normal(0, .1, (M))*0.1
    for i in range(L):
        ad[i,:] = a

    bd = f.create_carray(f.root, 'b', tb.Float64Atom(), (N, M), filters=filters)
    for i in range(N):
        bd[i,:] = a

    return ad , bd
Example #6
0
def write_new_file(f_name, Phi, m, n):
    file_name = f_name + '.h5'
    #f = tb.open_file('Phi.h5', 'w')
    f = tb.open_file(file_name, 'w')
    filters = tb.Filters(complevel=5, complib='blosc')

    #because by default numpy (in other scripts) operates using float64, this is necessary
    out = f.create_carray(f.root,
                          'data',
                          tb.Float64Atom(),
                          shape=(m, n),
                          filters=filters)

    print "h5 file created, now putting Phi from memory to file..."

    step = 1000  #this is the number of rows we calculate each loop (example was using bl)
    #this may not the most efficient value
    #look into buffersize usage in PyTables and adopt the buffersite of the
    #carray accordingly to improve specifically fetching performance

    #b = b.tocsc() #we slice b on columns, csc improves performance #not necessary in this case

    #this can also be changed to slice on rows instead of columns (which is what will be done for Phi)
    for i in range(0, n, step):
        try:
            out[:, i:min(i + step, n)] = Phi[:, i:min(
                i + step, n
            )]  # initially, example was using this => (a.dot(b[:,i:min(i+bl, l)])).toarray()
        except Exception as e:
            print e
            break
        print i
    print "Phi saving done, closing file..."

    f.close()
Example #7
0
def contact_maps_from_traj(pdb_file,
                           traj_file,
                           contact_cutoff=8.0,
                           savefile=None):
    """
    Get contact map from trajectory.
    """

    mda_traj = mda.Universe(pdb_file, traj_file)
    traj_length = len(mda_traj.trajectory)
    ca = mda_traj.select_atoms('name CA')

    if savefile:
        savefile = os.path.abspath(savefile)
        outfile = tables.open_file(savefile, 'w')
        atom = tables.Float64Atom()
        cm_table = outfile.create_earray(outfile.root,
                                         'contact_maps',
                                         atom,
                                         shape=(traj_length, 0))

    contact_matrices = []
    for frame in mda_traj.trajectory:
        cm_matrix = (distances.self_distance_array(ca.positions) <
                     contact_cutoff) * 1.0
        contact_matrices.append(cm_matrix)

    if savefile:
        cm_table.append(contact_matrices)
        outfile.close()

    return contact_matrices
Example #8
0
def save_data(main_path, exp_path, data):
    """
    save_data(main_path, exp_path, data)

    Parameters
    ----------
    main_path : Str
    exp_path : Str
    data : ndarray

    Returns
    -------

    """

    try:
        # Saving Parameters
        atom = tables.Float64Atom()  # declare data type
        fsave = tables.open_file(os.path.join(main_path, exp_path + '.h5'),
                                 mode='w')  # open tables object
        ds = fsave.create_earray(
            fsave.root,
            'data',
            atom,  # create data store 
            [0, data.shape[1], data.shape[2]])
        ds.append(data)  # append data
        fsave.close()  # close tables object
        return 1

    except:
        print('File could not be saved')
        return 0
    def write_jointState(self, topic_group, data):

        fields = ['position', 'velocity', 'effort', 'time']
        self.pytable_writer_helper(topic_group, fields, tables.Float64Atom(),
                                   data)
        self.pytable_writer_helper(topic_group, ['name'],
                                   tables.StringAtom(itemsize=20), data)
Example #10
0
def write_zlib(data):
    zlib_filter = tables.Filters(complib='zlib', complevel=9)
    zlib_out = tables.openFile('zlib.h5', mode='w')
    atom = tables.Float64Atom()
    ca = zlib_out.createCArray(zlib_out.root, 'ca', atom, data.shape, filters=zlib_filter)
    ca[:] = data
    zlib_out.close()
Example #11
0
    def _draw_frame(self, framedata):
        self.framedata = framedata
        try:
            try:
                myGUI.ppathh
            except:
                myGUI.set_path()
            spectrum = myGUI.acquireData() - self.background
            self.wv_ax = myGUI.wv
            self.plot_spectrum(spectrum)

            if myGUI.measure_chkBox.isChecked():
                self.dt = myGUI.measure_SpinBox.value()
                if time.time() > self.timestamp + self.dt:
                    self.filename = str(myGUI.ppathh + "sp_array_" +
                                        str(self.save_file_num) + '.h5')
                    if os.path.exists(self.filename):
                        f = tables.open_file(self.filename, mode='a')
                        f.root.data.append(spectrum)
                        f.close()
                    else:
                        data_to_save = np.row_stack((self.wv_ax, spectrum))
                        f = tables.open_file(self.filename, mode='w')
                        data_format = tables.Float64Atom()
                        array_c = f.create_earray(f.root, 'data', data_format,
                                                  (0, ))
                        array_c.append(self.wv_ax)
                        array_c.append(spectrum)
                        f.close()
                    self.timestamp = time.time()
        except:
            pass
Example #12
0
    def _initialize(self):
        if not self._disabled:
            self._logger.debug('Initializing.')
            self._logger.debug('Disabled = %s' % self._disabled)
            if self._out_file:  # for resets
                self._out_file.close()

            info = self.traverse_back_and_find("mne_info")
            col_size = info["nchan"]
            self._out_file = tables.open_file(self.output_path, mode="w")
            atom = tables.Float64Atom()

            self.output_array = self._out_file.create_earray(
                self._out_file.root, "data", atom, (col_size, 0))
            self.timestamps_array = self._out_file.create_earray(
                self._out_file.root, "timestamps", atom, (1, 0))
            self.ch_names = self._out_file.create_array(
                self._out_file.root,
                "ch_names",
                np.array(info["ch_names"]),
                "Channel names in data",
            )
            self._out_file.root.data.attrs.sfreq = info["sfreq"]
            try:
                fwd = self.traverse_back_and_find("_fwd")
                self._out_file.create_array(
                    self._out_file.root,
                    "src_xyz",
                    fwd['source_rr'],
                    "Source space coordinates",
                )
            except Exception as e:
                self._logger.exception(e)
                self._logger.warning('Forward model not found.'
                                     ' Skip adding source coordinates.')
Example #13
0
def generate_dataset():
    FOLDER = "car_images/"
    FILENAME = "car_dataset_%dx%d.h5" % (IMG_WIDTH, IMG_HEIGHT)
    TARGET_SIZE = 50000
    os.chdir(ROOT)
    fd = tables.open_file(FILENAME, mode='w')
    atom = tables.Float64Atom()
    filters = tables.Filters(complevel=5, complib='blosc')
    dataset = fd.create_earray(fd.root,
                               'data',
                               atom, (0, 3, IMG_WIDTH, IMG_HEIGHT),
                               filters=filters,
                               expectedrows=TARGET_SIZE)

    os.chdir(ROOT + "/" + FOLDER)
    count = 0
    for f in glob.glob("*.jpg"):
        img = Image.open(f)
        count += 1
        print("%d : %s" % (count, f))
        img = img.resize((IMG_WIDTH, IMG_HEIGHT))
        arr = np.asarray(img)
        arr = np.reshape(arr, (1, arr.shape[2], arr.shape[1], arr.shape[0]))
        dataset.append(arr)

    fd.close()
Example #14
0
 def create_data_file(self):
     total_lines = self.filesize
     repeats = 1000
     batch_size = int(total_lines / repeats)
     self.filesize = batch_size * repeats
     filename = self.filename
     #initial creation of the data file (if statement is added for multiprocessing). To be safe don't have a preexisting file with the same filename in the database folder
     if not os.path.exists(filename):
         ROW_SIZE = self.architecture[0] + self.architecture[-1]
         f = tables.open_file(filename, mode='w')
         atom = tables.Float64Atom()
         array_c = f.create_earray(f.root, 'data', atom, (0, ROW_SIZE))
         f.close()
         f = tables.open_file(filename,
                              mode='a')  #file opened for concatenation
         for i in range(repeats):
             print(int(i / repeats * 100),
                   "% complete (data file generation)          ",
                   end="\r")
             x_input = self.generate_input(batch_size=batch_size)
             y_output = self.predict(x_input)
             data_rows = np.concatenate((x_input, y_output), axis=1)
             f.root.data.append(data_rows)
         f.close()
     else:
         print(
             'Data file exists. Waiting for 200 seconds before proceeding.')
         time.sleep(200)
Example #15
0
def unify_channel(out_fname, in_fnames, process_signs):
    """
    do unification for one channel
    """

    # find total number of spikes
    num_pos, num_neg, nsamp = count_spikes(in_fnames)
    print('Positive: {}, Negative: {}'.format(num_pos, num_neg))
    print('opening ' + out_fname)

    outfile = tables.open_file(out_fname, 'w')
    print(num_pos, num_neg, nsamp)

    start = {}
    stop = {}

    operate_signs = []

    for sign, nspk in zip(('pos', 'neg'), (num_pos, num_neg)):
        if sign not in process_signs:
            print('Skipping {} as requested'.format(sign))
            continue
        if nspk:
            operate_signs.append(sign)
            start[sign] = 0
        else:
            print('{} has no {} spikes'.format(out_fname, sign))
            continue

        outfile.create_group('/', sign)
        outfile.create_array('/' + sign,
                             'times',
                             atom=tables.Float64Atom(),
                             shape=(nspk, ))
        outfile.create_array('/' + sign,
                             'spikes',
                             atom=tables.Float32Atom(),
                             shape=(nspk, nsamp))

    for h5file in in_fnames:
        fid = tables.open_file(h5file, 'r')

        for sign in operate_signs:
            # times
            data = fid.get_node('/' + sign + '/times')
            stop[sign] = start[sign] + data.shape[0]
            out = outfile.get_node('/' + sign + '/times')
            print("Copying {} spikes to {}-{}".format(data.shape[0],
                                                      start[sign], stop[sign]))
            out[start[sign]:stop[sign]] = data[:]

            data = fid.get_node('/' + sign + '/spikes')
            out = outfile.get_node('/' + sign + '/spikes')
            out[start[sign]:stop[sign]] = data[:]

            start[sign] = stop[sign]

        fid.close()

    outfile.close()
Example #16
0
def check_and_write(array, filename, NUM_ENTRIES):
    if not os.path.isfile(filename):
        f = tables.open_file(filename, mode='w')
        atom = tables.Float64Atom()
        array_c = f.create_earray(f.root, 'data', atom, (0, 8))
        print (array)
        array_c.append(array)
        f.close()
        print ("new file")
        return True
    f = tables.open_file(filename, mode='r')
    i = 0
    check = False
    for item in f.root.data[0]:
        c = np.array((f.root.data[i:i+NUM_ENTRIES,0:]))
        idx = np.where(abs((c[:,np.newaxis,:] - array)).sum(axis=2) == 0)
        i = i + NUM_ENTRIES
        if len(idx[0]) == NUM_ENTRIES:
            check = True
            break
    f.close()
    if check:
        print ("Duplicate Solution")
        return False
    else:
        print ("Unique Solution")
        f = tables.open_file(filename, mode='a')
        f.root.data.append(array)
        f.close()
        return True
def check_and_write(array, filename, NUM_ENTRIES, count2):
    if not os.path.isfile(filename):
        f = tables.open_file(filename, mode='w')
        atom = tables.Float64Atom()
        array_c = f.create_earray(f.root, 'data', atom, (0, 8))
        print(array)
        array_c.append(array)
        f.close()
        print("new file")
        return True
    f = tables.open_file(filename, mode='r')
    i = 0
    check = 0
    count = 0
    for arr in f.root.data:
        if (count % 7 == 0):
            c = np.array((f.root.data[count:count + NUM_ENTRIES, 0:]))
            idx = np.where(abs((c[:, np.newaxis, :] - array)).sum(axis=2) == 0)
            if len(idx[0]) == NUM_ENTRIES:
                check += 1

        count += 1
    f.close()
    if check > 0:
        print("Duplicate Solution")
        print(check)
        return False, check
    else:
        print("Unique Solution")
        f = tables.open_file(filename, mode='a')
        f.root.data.append(array)
        f.close()
        return True, 0
Example #18
0
File: hdf5.py Project: nalamat/ears
    def __init__(self,
                 hdf5Node,
                 compLib='zlib',
                 compLevel=1,
                 expectedRows=300,
                 **kwargs):
        '''
        Args:
            hdf5Node (str): Path of the node to store data in HDF5 file.
            compLib (str): Compression library, should be one of the following:
                zlib, lzo, bzip2, blosc, blosc:blosclz, blosc:lz4,
                blosc:lz4hc, blosc:snappy, blosc:zlib, blosc:zstd
            compLevel: Level of compression can vary from 0 (no compression)
                to 9 (maximum compression)
        '''
        if not isinstance(hdf5Node, str):
            raise TypeError('`hdf5Node` should be a string')
        if contains(hdf5Node):
            raise NameError('HDF5 node %s already exists' % hdf5Node)

        self._hdf5Node = hdf5Node
        self._hdf5Filters = tb.Filters(complib=compLib, complevel=compLevel)
        self._lock = threading.Lock()
        self._partial = None

        createEArray(hdf5Node,
                     tb.Float64Atom(), (0, 2),
                     '',
                     self._hdf5Filters,
                     expectedrows=expectedRows)

        super().__init__(**kwargs)
Example #19
0
def write_bz2(data):
    bz2_filter = tables.Filters(complib='bzip2', complevel=9)
    bz2_out = tables.openFile('bzip2.h5', mode='w')
    atom = tables.Float64Atom()
    ca = bz2_out.createCArray(bz2_out.root, 'ca', atom, data.shape, filters=bz2_filter)
    ca[:] = data
    bz2_out.close()
Example #20
0
        def assign_attribute(obj_attr, attr_name, path, node):
            """ subfunction to serialize a given attribute """
            if isinstance(obj_attr, pq.Quantity) or isinstance(obj_attr, np.ndarray):
                if not lazy:
                    # we need to simplify custom quantities
                    if isinstance(obj_attr, pq.Quantity):
                        for un in obj_attr.dimensionality.keys():
                            if not un.name in pq.units.__dict__ or \
                                    not isinstance(pq.units.__dict__[un.name], pq.Quantity):
                                obj_attr = obj_attr.simplified
                                break

                    # we try to create new array first, so not to loose the
                    # data in case of any failure
                    if obj_attr.size == 0:
                        atom = tb.Float64Atom(shape=(1,))
                        new_arr = self._data.createEArray(path, attr_name + "__temp", atom, shape=(0,), expectedrows=1)
                    else:
                        new_arr = self._data.createArray(path, attr_name + "__temp", obj_attr)

                    if hasattr(obj_attr, "dimensionality"):
                        for un in obj_attr.dimensionality.items():
                            new_arr._f_setAttr("unit__" + un[0].name, un[1])
                    try:
                        self._data.removeNode(path, attr_name)
                    except:
                        pass # there is no array yet or object is new
                    self._data.renameNode(path, attr_name, name=attr_name + "__temp")
            elif obj_attr is not None:
                node._f_setAttr(attr_name, obj_attr)
Example #21
0
def write_lzo(data):
    lzo_filter = tables.Filters(complib='lzo', complevel=9)
    lzo_out = tables.openFile('lzo.h5', mode='w')
    atom = tables.Float64Atom()
    ca = lzo_out.createCArray(lzo_out.root, 'ca', atom, data.shape, filters=lzo_filter)
    ca[:] = data
    lzo_out.close()
Example #22
0
def compute_vector_field(h5file,
                         centers,
                         xs,
                         ys,
                         zs,
                         max_field=50.0,
                         group='vector'):
    '''compute a vector field that decreases quadratically with the
       distance to the center'''
    vector = h5file.createGroup(h5file.root, group)
    dims = ['x', 'y', 'z']
    for i, dim in enumerate(dims):
        comp = h5file.create_earray(vector, dim, tables.Float64Atom(),
                                    (len(xs), len(ys), 0),
                                    'field {0}-component'.format(dim))
        for z in zs:
            coords = np.meshgrid(xs, ys, np.array(z))
            field_slice = np.zeros((len(xs), len(ys), 1))
            for center in centers:
                dist = np.sqrt((coords[0] - center[0])**2 +
                               (coords[1] - center[1])**2 +
                               (coords[2] - center[2])**2 + 1.0)
                field_slice += max_field * (coords[i] - center[i]) / dist**2
            comp.append(field_slice)
        h5file.flush()
Example #23
0
def _make_float_vlarray(h5file: tables.File, name: str,
                        attribute: np.ndarray) -> None:
    vlarray = h5file.create_vlarray(h5file.root,
                                    name=name,
                                    atom=tables.Float64Atom(shape=()))
    for a in attribute:
        vlarray.append(a)
Example #24
0
    def resize(h5file, start, stop):
        ensure_tables()
        # TODO is there any smarter and more efficient way to this?

        data = h5file.getNode('/', "Data")
        try:
            gcolumns = h5file.createGroup('/', "Data_", "Data")
        except tables.exceptions.NodeError:
            h5file.removeNode('/', "Data_", 1)
            gcolumns = h5file.createGroup('/', "Data_", "Data")

        start = 0 if start is None else start
        stop = gcolumns.X.nrows if stop is None else stop

        atom = tables.Float32Atom() if config.floatX == 'float32' else tables.Float64Atom()
        filters = DenseDesignMatrixPyTables.filters
        x = h5file.createCArray(gcolumns, 'X', atom = atom, shape = ((stop - start, data.X.shape[1])),
                            title = "Data values", filters = filters)
        y = h5file.createCArray(gcolumns, 'y', atom = atom, shape = ((stop - start, 10)),
                            title = "Data targets", filters = filters)
        x[:] = data.X[start:stop]
        y[:] = data.y[start:stop]

        h5file.removeNode('/', "Data", 1)
        h5file.renameNode('/', "Data", "Data_")
        h5file.flush()
        return h5file, gcolumns
Example #25
0
    def init_hdf5(path, shapes):
        """
        Initialize hdf5 file to be used ba dataset
        """

        x_shape, y_shape = shapes
        # make pytables
        ensure_tables()
        h5file = tables.openFile(path, mode="w", title="SVHN Dataset")
        gcolumns = h5file.createGroup(h5file.root, "Data", "Data")
        atom = tables.Float32Atom(
        ) if config.floatX == 'float32' else tables.Float64Atom()
        filters = DenseDesignMatrixPyTables.filters
        h5file.createCArray(gcolumns,
                            'X',
                            atom=atom,
                            shape=x_shape,
                            title="Data values",
                            filters=filters)
        h5file.createCArray(gcolumns,
                            'y',
                            atom=atom,
                            shape=y_shape,
                            title="Data targets",
                            filters=filters)
        return h5file, gcolumns
    def __init__(self, store_shape, file):
        try:
            import tables
        except:
            raise Exception('Use of DiskBackedStorage required PyTables to be installed (try pip install tables).')

        self.__store_shape__ = store_shape
        self.__array__ = np.empty(store_shape)
        self.__max_rows__ = store_shape[0]

        self.__current_index__ = 0

        if type(file) in (str, unicode):
            self.__f__ = tables.open_file(file, 'a')
        else:
            self.__f__ = file

        # check whether samples already exist in the file
        try:
            self.__f__.get_node('/samples')
            raise Exception('File already contains samples!')
        except:
            pass

        self.__dest_table__ = self.__f__.create_earray('/', 'samples', tables.Float64Atom(), (0, store_shape[1]))
Example #27
0
    def save_hdf5(self, filepath):
        import tables
        import datetime

        h5f = tables.open_file(filepath, 'w')
        filters = tables.Filters(complevel=5, complib='zlib', shuffle=True)
        h5f.set_node_attr(h5f.root, 'version', self._version)
        h5f.set_node_attr(h5f.root, 'created_at', self.created_at)
        h5f.set_node_attr(
            h5f.root, 'saved_at',
            datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"))
        h5f.set_node_attr(h5f.root, 'label', self.label)
        h5f.set_node_attr(h5f.root, 'units', self.units)

        metadata_node = h5f.create_group(h5f.root, 'metadata')
        self.metadata.save_pytables(metadata_node)

        params = h5f.create_carray(h5f.root,
                                   'values',
                                   tables.Float64Atom(),
                                   self.values.shape,
                                   filters=filters)
        params[:] = self.values

        h5f.close()
Example #28
0
    def init_hdf5(self, path, shapes):
        """
        .. todo::

            WRITEME properly

        Initialize hdf5 file to be used ba dataset
        """

        x_shape, y_shape = shapes
        # make pytables
        ensure_tables()
        h5file = tables.openFile(path, mode="w", title="SVHN Dataset")
        gcolumns = h5file.createGroup(h5file.root, "Data", "Data")
        atom = (tables.Float32Atom()
                if config.floatX == 'float32' else tables.Float64Atom())
        h5file.createCArray(gcolumns,
                            'X',
                            atom=atom,
                            shape=x_shape,
                            title="Data values",
                            filters=self.filters)
        h5file.createCArray(gcolumns,
                            'y',
                            atom=atom,
                            shape=y_shape,
                            title="Data targets",
                            filters=self.filters)
        return h5file, gcolumns
Example #29
0
    def write_hist(group_name: 'string with folder name to save histograms',
                   table_name: 'histogram name',
                   entries: 'np.array with bin content',
                   bins: 'list of np.array of bins',
                   out_of_range: 'np.array lenght=2 with events out of range',
                   errors: 'np.array with bins uncertainties',
                   labels: 'list with labels of the histogram',
                   scales: 'list with the scales of the histogram'):

        try:
            hist_group = getattr(file.root, group_name)
        except tb.NoSuchNodeError:
            hist_group = file.create_group(file.root, group_name)

        if table_name in hist_group:
            raise ValueError(f"Histogram {table_name} already exists")

        vlarray = file.create_vlarray(hist_group,
                                      table_name + '_bins',
                                      atom=tb.Float64Atom(shape=()),
                                      filters=tbl.filters(compression))
        for ibin in bins:
            vlarray.append(ibin)
        add_carray(hist_group, table_name, entries)
        add_carray(hist_group, table_name + '_outRange', out_of_range)
        add_carray(hist_group, table_name + '_errors', errors)
        file.create_array(hist_group, table_name + '_labels', labels)
        file.create_array(hist_group, table_name + '_scales', scales)
Example #30
0
def store_datadist(data_source, model, batch_size=10, fname='datamatrix.h5'):
    """
       Store the log-probability matrix for a given method.
    """
    # Turn on evaluation mode which disables dropout.
    model.eval()
    if args.model == 'QRNN': model.reset()
    hidden = model.init_hidden(batch_size)

    # Initialize a data matrix structure which can be stored directly to the disk.
    f = tables.open_file(filename, mode='w')
    atom = tables.Float64Atom()
    array_c = f.create_earray(f.root, 'data', atom, (0, 10000))

    # Add a row sequentially to the matrix for each different context.
    for i in range(0, data_source.size(0) - 1, args.bptt):
        data, targets = get_batch(data_source, i, args, evaluation=True)
        output, weight, bias, hidden = model(data, hidden)
        pred_targets = torch.mm(output, weight.t()) + bias
        hidden = repackage_hidden(hidden)
        datadist = nn.LogSoftmax()(pred_targets)
        array_c.append(datadist.detach().cpu().numpy())

    # Close file.
    f.close()