コード例 #1
0
ファイル: hdf5.py プロジェクト: jakeogh/SDF-Python
def _create_group(gobj, datasets):
    """ Create an sdf.Group from an h5py group """

    ds_obj_list = []
    g_obj_list = []

    group_attrs = {key: gobj.attrs[key] for key in gobj.attrs.keys() if key != 'COMMENT'}
    comment = gobj.attrs.get('COMMENT')

    for ds_name in gobj.keys():
        # TODO: fix this?
        if isinstance(gobj[ds_name], h5py._hl.dataset.Dataset):
            ds_obj_list.append(gobj[ds_name])
        elif isinstance(gobj[ds_name], h5py._hl.group.Group):
            g_obj_list.append(gobj[ds_name])

    child_groups = []

    for cgobj in g_obj_list:
        child_groups.append(_create_group(cgobj, datasets))

    ds_list = [_create_dataset(dsobj, datasets) for dsobj in ds_obj_list]

    name = gobj.name.split('/')[-1]

    return sdf.Group(name=name, comment=comment, attributes=group_attrs, groups=child_groups, datasets=ds_list)
コード例 #2
0
ファイル: test.py プロジェクト: PyWilhelm/Converter
    def test_validate_group(self):
        g = sdf.Group('8')
        errors = sdf._validate_group(g, is_root=False)
        self.assertEqual([
            'Object names must only contain letters, digits and underscores ("_") and must start with a letter'
        ], errors)

        g.name = 'G1'
        errors = sdf._validate_group(g, is_root=False)
        self.assertEqual([], errors)
コード例 #3
0
ファイル: test_sdf.py プロジェクト: jakeogh/SDF-Python
    def test_hierarchy(self):

        # create a scale
        ds_time = sdf.Dataset('Time',
                              comment="A scale",
                              data=np.linspace(0, 10, 101),
                              unit='s',
                              is_scale=True)

        ds_sine = sdf.Dataset('sine',
                              comment="A 1-d dataset /w attached scale",
                              data=np.sin(ds_time.data),
                              scales=[ds_time])

        # create the root group
        g = sdf.Group(name='/',
                      comment="A test file",
                      attributes={'A1': "my string"},
                      datasets=[ds_time, ds_sine])

        # create a scalar dataset
        ds_alpha = sdf.Dataset(
            'alpha',
            comment="A scalar /w unit, display unit and display name",
            data=np.pi,
            display_name='Angle',
            unit='rad',
            display_unit='deg')

        # create a sub group
        g1 = sdf.Group(name='g1',
                       comment="A sub-group",
                       attributes={'A2': "Attribute in sub group"},
                       datasets=[ds_alpha])

        g.groups.append(g1)

        # save the group
        sdf.save('roundtrip.sdf', g)

        # load the group from the file
        g2 = sdf.load('roundtrip.sdf', '/')
コード例 #4
0
ファイル: test_sdf.py プロジェクト: jakeogh/SDF-Python
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment="dataset 1",
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment="dataset 2",
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          relative_quantity=True,
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment="my comment",
                      attributes={'A1': 'my string'},
                      datasets=[ds1, ds2])

        g2 = sdf.Group(name='G2')
        g.groups.append(g2)

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertDatasetsEqual(ds2, ds2r)
        self.assertDatasetsEqual(ds2.scales[0], ds2r.scales[0])
コード例 #5
0
ファイル: test_sdf.py プロジェクト: jakeogh/SDF-Python
    def test_data_types(self):

        ds_f = sdf.Dataset(name='f',
                           data=np.asarray([1, 2, 3], dtype=np.float32))
        ds_d = sdf.Dataset(name='d',
                           data=np.asarray([1, 2, 3], dtype=np.float64))
        ds_i = sdf.Dataset(name='i',
                           data=np.asarray([1, 2, 3], dtype=np.int32))

        g = sdf.Group(name='/', datasets=[ds_f, ds_d, ds_i])

        sdf.save('data_types.sdf', g)

        g = sdf.load('data_types.sdf')

        self.assertEqual(g['f'].data.dtype, np.float32)
        self.assertEqual(g['d'].data.dtype, np.float64)
        self.assertEqual(g['i'].data.dtype, np.int32)
コード例 #6
0
ファイル: test.py プロジェクト: PyWilhelm/Converter
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment='my comment',
                      attributes={
                          'A1': 'my string',
                          'A2': 0.1,
                          'A3': 1
                      },
                      datasets=[ds1, ds2])

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertEqual(ds2, ds2r)
コード例 #7
0
#   Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG, lenx, lenP))
for i in xrange(lenG):
    for j in xrange(lenx):
        for k in xrange(lenP):
            q[i, j, k] = q_raw[i + k * lenG, j]

# Create the datasets:
ds_G = sdf.Dataset('G',
                   data=G,
                   unit='kg/(m2.s)',
                   is_scale=True,
                   display_name='Mass Flux')
ds_x = sdf.Dataset('x',
                   data=x,
                   unit='1',
                   is_scale=True,
                   display_name='Quality')
ds_P = sdf.Dataset('P',
                   data=P,
                   unit='Pa',
                   is_scale=True,
                   display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G, ds_x, ds_P])

# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G, ds_x, ds_P, ds_q])
sdf.save('../Data/2006LUT.sdf', g)
コード例 #8
0
# get the first sheet
sh = book.sheet_by_index(0)

# get the names, quantities and units
n_t = sh.cell_value(0, 1)
u_t = sh.cell_value(1, 1)

n_u = sh.cell_value(0, 2)
u_u = sh.cell_value(1, 2)

# get the data
col_t = sh.col_values(1, 2, sh.nrows)
col_u = sh.col_values(2, 2, sh.nrows)

# create the data arrays
t = array(col_t)
u = array(col_u)

# create the datasets
ds_t = sdf.Dataset(n_t, data=t, unit=u_t, is_scale=True, display_name='Time')
ds_u = sdf.Dataset(n_u, data=u, unit=u_u, scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='Imported from ' + filename, datasets=[ds_t, ds_u])

# change the file extension
outfile = os.path.splitext(filename)[0] + '.sdf'

# write the SDF file
sdf.save(outfile, g)
コード例 #9
0
ファイル: sine.py プロジェクト: jakeogh/SDF-Python
"""
Create a simple SDF file
"""

import sdf
import numpy as np

# create the data arrays
t = np.linspace(0, 10, 100)
v = np.sin(t)

# create the datasets
ds_t = sdf.Dataset('t', data=t, unit='s', is_scale=True, display_name='Time')
ds_v = sdf.Dataset('v', data=v, unit='V', scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='A sine voltage', datasets=[ds_t, ds_v])

# write the SDF file
sdf.save('sine.sdf', g)

# read the SDF file
ds_v2 = sdf.load('sine.sdf', '/v', unit='V', scale_units=['s'])
ds_t2 = ds_v2.scales[0]

t2 = ds_t2.data
v2 = ds_v2.data
コード例 #10
0
ファイル: save_file.py プロジェクト: PyWilhelm/Converter
def save_to_sdf(dataset_list, output_filename):
    group = sdf.Group('/', comment='converted file', datasets=dataset_list)
    sdf.save(output_filename, group)
コード例 #11
0
ファイル: test.py プロジェクト: PyWilhelm/Converter
    def test_3D_example(self):

        RPM2RADS = 2 * math.pi / 60

        kfric = 1  # [Ws/rad] angular damping coefficient [0;100]
        kfric3 = 1.5e-6  # [Ws3/rad3] angular damping coefficient (3rd order) [0;10-3]
        psi = 0.2  # [Vs] flux linkage [0.001;10]
        res = 5e-3  # [Ohm] resistance [0;100]
        u_ref = 200  # [V] reference DC voltage [0;1000]
        k_u = 5  # linear voltage coefficient [-100;100]

        tau = np.arange(0, 230 + 10, 10)
        w = np.concatenate((np.arange(
            0, 500, 100), np.arange(500, 12e3 + 500, 500))) * RPM2RADS
        u = np.asarray([200, 300, 400])

        # calculate the power losses
        TAU, W, U = np.meshgrid(tau, w, u, indexing='ij')

        P_loss = kfric * W + kfric3 * W**3 + (
            res * (TAU / psi)**2) + k_u * (U - u_ref)

        # create the scales
        ds_tau = sdf.Dataset('tau',
                             comment='Torque',
                             data=tau,
                             scale_name='Torque',
                             quantity='Torque',
                             unit='N.m',
                             is_scale=True)

        ds_w = sdf.Dataset('w',
                           comment='Speed',
                           data=w,
                           scale_name='Speed',
                           quantity='AngularVelocity',
                           unit='rad/s',
                           display_unit='rpm',
                           is_scale=True)

        ds_u = sdf.Dataset('u',
                           comment='DC voltage',
                           data=u,
                           scale_name='DC voltage',
                           quantity='Voltage',
                           unit='V',
                           is_scale=True)

        # create the dataset
        ds_P_loss = sdf.Dataset('P_loss',
                                comment='Power losses',
                                data=P_loss,
                                quantity='Voltage',
                                unit='V',
                                scales=[ds_tau, ds_w, ds_u])

        # create a group
        g = sdf.Group(
            name='/',
            comment=
            'Example loss characteristics of an e-machine w.r.t. torque, speed and DC voltage',
            attributes={'AUTHOR': 'John Doe'},
            datasets=[ds_tau, ds_w, ds_u, ds_P_loss])

        errors = sdf.validate(g)
        self.assertEqual([], errors)

        sdf.save('emachine.sdf', g)
コード例 #12
0
                   data=x,
                   unit='1',
                   is_scale=True,
                   display_name='Quality')
ds_dT = sdf.Dataset('dT',
                    data=dT,
                    unit='K',
                    is_scale=True,
                    display_name='deltaT')
ds_G = sdf.Dataset('G',
                   data=G,
                   unit='kg/(m2.s)',
                   is_scale=True,
                   display_name='Mass Flux')
ds_P = sdf.Dataset('P',
                   data=P,
                   unit='Pa',
                   is_scale=True,
                   display_name='Pressure')
ds_alpha = sdf.Dataset('alpha',
                       data=alpha,
                       unit='W/(m2.K)',
                       scales=[ds_x, ds_dT, ds_G, ds_P])

# Create the root group and write the file:
g = sdf.Group('/',
              comment='2001 FBCoef LUT',
              datasets=[ds_x, ds_dT, ds_G, ds_P, ds_alpha])
sdf.save('../Data/2001LUTFB.sdf', g)

sdf.save('../Data/2006LUT.sdf', g)