예제 #1
0
    def test_validate_dataset(self):
        ds1 = sdf.Dataset('DS1')

        ds1.data = 1
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(["Dataset.data must be a numpy.ndarray"], errors)

        ds1.data = np.array([])
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(["Dataset.data must not be empty"], errors)

        ds1.data = np.array(1).astype(np.float32)
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(["Dataset.data.dtype must be numpy.float64"], errors)

        ds1.data = np.array([1.0, 2.0])
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(
            ["The number of scales does not match the number of dimensions"],
            errors)

        ds2 = sdf.Dataset('DS2', data=np.array([0.0, 1.0, 2.0]), is_scale=True)
        ds1.scales = [ds2]
        errors = sdf._validate_dataset(ds1)
        self.assertEqual([], errors)

        ds2.data = np.array([[1.0, 2.0], [3.0, 4.0]])
        errors = sdf._validate_dataset(ds2)
        self.assertEqual(["Scales must be one-dimensional"], errors)

        ds2.data = np.array([0, 1.0, 1.0])
        errors = sdf._validate_dataset(ds2)
        self.assertEqual(["Scales must be strictly monotonic increasing"],
                         errors)
예제 #2
0
파일: hdf5.py 프로젝트: jakeogh/SDF-Python
def _create_dataset(dsobj, datasets):
    """ Create a dataset from an h5py dataset """

    _, name = os.path.split(dsobj.name)
    ds = sdf.Dataset(name, data=dsobj.value)

    for attr in dsobj.attrs:
        if attr == 'COMMENT':
            ds.comment = _to_python_str(dsobj.attrs[attr])
        elif attr == 'NAME':
            ds.display_name = _to_python_str(dsobj.attrs[attr])
        elif attr == 'RELATIVE_QUANTITY' and _to_python_str(dsobj.attrs[attr]) == 'TRUE':
            ds.relative_quantity = True
        elif attr == 'UNIT':
            ds.unit = _to_python_str(dsobj.attrs[attr])
        elif attr == 'DISPLAY_UNIT':
            ds.display_unit = _to_python_str(dsobj.attrs[attr])
        elif attr == 'CLASS' and _to_python_str(dsobj.attrs[attr]) == 'DIMENSION_SCALE':
            ds.is_scale = True
        elif attr == 'REFERENCE_LIST':
            ds.is_scale = True
        elif attr in ['REFERENCE_LIST', 'DIMENSION_LIST']:
            pass
        else:
            ds.attributes[attr] = _to_python_str(dsobj.attrs[attr])

    ds.scales = [None] * ds.data.ndim

    datasets[dsobj] = ds

    return ds
예제 #3
0
    def test_data_types(self):

        ds_f = sdf.Dataset(name='f',
                           data=np.asarray([1, 2, 3], dtype=np.float32))
        ds_d = sdf.Dataset(name='d',
                           data=np.asarray([1, 2, 3], dtype=np.float64))
        ds_i = sdf.Dataset(name='i',
                           data=np.asarray([1, 2, 3], dtype=np.int32))

        g = sdf.Group(name='/', datasets=[ds_f, ds_d, ds_i])

        sdf.save('data_types.sdf', g)

        g = sdf.load('data_types.sdf')

        self.assertEqual(g['f'].data.dtype, np.float32)
        self.assertEqual(g['d'].data.dtype, np.float64)
        self.assertEqual(g['i'].data.dtype, np.int32)
예제 #4
0
    def test_hierarchy(self):

        # create a scale
        ds_time = sdf.Dataset('Time',
                              comment="A scale",
                              data=np.linspace(0, 10, 101),
                              unit='s',
                              is_scale=True)

        ds_sine = sdf.Dataset('sine',
                              comment="A 1-d dataset /w attached scale",
                              data=np.sin(ds_time.data),
                              scales=[ds_time])

        # create the root group
        g = sdf.Group(name='/',
                      comment="A test file",
                      attributes={'A1': "my string"},
                      datasets=[ds_time, ds_sine])

        # create a scalar dataset
        ds_alpha = sdf.Dataset(
            'alpha',
            comment="A scalar /w unit, display unit and display name",
            data=np.pi,
            display_name='Angle',
            unit='rad',
            display_unit='deg')

        # create a sub group
        g1 = sdf.Group(name='g1',
                       comment="A sub-group",
                       attributes={'A2': "Attribute in sub group"},
                       datasets=[ds_alpha])

        g.groups.append(g1)

        # save the group
        sdf.save('roundtrip.sdf', g)

        # load the group from the file
        g2 = sdf.load('roundtrip.sdf', '/')
예제 #5
0
파일: test.py 프로젝트: PyWilhelm/Converter
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment='my comment',
                      attributes={
                          'A1': 'my string',
                          'A2': 0.1,
                          'A3': 1
                      },
                      datasets=[ds1, ds2])

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertEqual(ds2, ds2r)
예제 #6
0
def load_from_mat(input_filename, schema_data=None):
    mat_data = loadmat(input_filename)
    try:
        del mat_data['__header__']
        del mat_data['__globals__']
        del mat_data['__version__']
    except:
        pass
    data_list = [sdf.Dataset(key, data=mat_data[key][0].tolist()) for key in mat_data.keys()]
    if schema_data == None:
        return data_list
    else:
        return load_from_mat_padding(data_list, schema_data)
예제 #7
0
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment="dataset 1",
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment="dataset 2",
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          relative_quantity=True,
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment="my comment",
                      attributes={'A1': 'my string'},
                      datasets=[ds1, ds2])

        g2 = sdf.Group(name='G2')
        g.groups.append(g2)

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertDatasetsEqual(ds2, ds2r)
        self.assertDatasetsEqual(ds2.scales[0], ds2r.scales[0])
예제 #8
0
파일: test.py 프로젝트: PyWilhelm/Converter
    def test_validate_dataset(self):
        ds1 = sdf.Dataset('DS1')

        ds1.data = 1
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(['Dataset.data must not be a numpy.ndarray'], errors)

        ds1.data = np.array([])
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(['Dataset.data must not be empty'], errors)

        ds1.data = np.array(1).astype(np.float32)
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(
            ['Dataset.data.dtype must be one of numpy.float64 or numpy.int32'],
            errors)

        ds1.data = np.array([1.0, 2.0])
        errors = sdf._validate_dataset(ds1)
        self.assertEqual(
            ['The number of scales does not match the number of dimensions'],
            errors)

        ds2 = sdf.Dataset('DS2', data=np.array([0, 1, 2]), is_scale=True)
        ds1.scales = [ds2]
        errors = sdf._validate_dataset(ds1)
        self.assertEqual([], errors)

        ds2.data = np.array([[1, 2], [3, 4]])
        errors = sdf._validate_dataset(ds2)
        self.assertEqual(['Scales must be one-dimensional'], errors)

        ds2.data = np.array([0, 1, 1])
        errors = sdf._validate_dataset(ds2)
        self.assertEqual(['Scales must be strictly monotonic increasing'],
                         errors)
예제 #9
0
#   M is number of mass flux divisions
#   N is number of quality divisions
#   Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG, lenx, lenP))
for i in xrange(lenG):
    for j in xrange(lenx):
        for k in xrange(lenP):
            q[i, j, k] = q_raw[i + k * lenG, j]

# Create the datasets:
ds_G = sdf.Dataset('G',
                   data=G,
                   unit='kg/(m2.s)',
                   is_scale=True,
                   display_name='Mass Flux')
ds_x = sdf.Dataset('x',
                   data=x,
                   unit='1',
                   is_scale=True,
                   display_name='Quality')
ds_P = sdf.Dataset('P',
                   data=P,
                   unit='Pa',
                   is_scale=True,
                   display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G, ds_x, ds_P])

# Create the root group and write the file:
예제 #10
0
# get the first sheet
sh = book.sheet_by_index(0)

# get the names, quantities and units
n_t = sh.cell_value(0, 1)
u_t = sh.cell_value(1, 1)

n_u = sh.cell_value(0, 2)
u_u = sh.cell_value(1, 2)

# get the data
col_t = sh.col_values(1, 2, sh.nrows)
col_u = sh.col_values(2, 2, sh.nrows)

# create the data arrays
t = array(col_t)
u = array(col_u)

# create the datasets
ds_t = sdf.Dataset(n_t, data=t, unit=u_t, is_scale=True, display_name='Time')
ds_u = sdf.Dataset(n_u, data=u, unit=u_u, scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='Imported from ' + filename, datasets=[ds_t, ds_u])

# change the file extension
outfile = os.path.splitext(filename)[0] + '.sdf'

# write the SDF file
sdf.save(outfile, g)
예제 #11
0
파일: sine.py 프로젝트: jakeogh/SDF-Python
"""
Create a simple SDF file
"""

import sdf
import numpy as np

# create the data arrays
t = np.linspace(0, 10, 100)
v = np.sin(t)

# create the datasets
ds_t = sdf.Dataset('t', data=t, unit='s', is_scale=True, display_name='Time')
ds_v = sdf.Dataset('v', data=v, unit='V', scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='A sine voltage', datasets=[ds_t, ds_v])

# write the SDF file
sdf.save('sine.sdf', g)

# read the SDF file
ds_v2 = sdf.load('sine.sdf', '/v', unit='V', scale_units=['s'])
ds_t2 = ds_v2.scales[0]

t2 = ds_t2.data
v2 = ds_v2.data
예제 #12
0
파일: test.py 프로젝트: PyWilhelm/Converter
    def test_3D_example(self):

        RPM2RADS = 2 * math.pi / 60

        kfric = 1  # [Ws/rad] angular damping coefficient [0;100]
        kfric3 = 1.5e-6  # [Ws3/rad3] angular damping coefficient (3rd order) [0;10-3]
        psi = 0.2  # [Vs] flux linkage [0.001;10]
        res = 5e-3  # [Ohm] resistance [0;100]
        u_ref = 200  # [V] reference DC voltage [0;1000]
        k_u = 5  # linear voltage coefficient [-100;100]

        tau = np.arange(0, 230 + 10, 10)
        w = np.concatenate((np.arange(
            0, 500, 100), np.arange(500, 12e3 + 500, 500))) * RPM2RADS
        u = np.asarray([200, 300, 400])

        # calculate the power losses
        TAU, W, U = np.meshgrid(tau, w, u, indexing='ij')

        P_loss = kfric * W + kfric3 * W**3 + (
            res * (TAU / psi)**2) + k_u * (U - u_ref)

        # create the scales
        ds_tau = sdf.Dataset('tau',
                             comment='Torque',
                             data=tau,
                             scale_name='Torque',
                             quantity='Torque',
                             unit='N.m',
                             is_scale=True)

        ds_w = sdf.Dataset('w',
                           comment='Speed',
                           data=w,
                           scale_name='Speed',
                           quantity='AngularVelocity',
                           unit='rad/s',
                           display_unit='rpm',
                           is_scale=True)

        ds_u = sdf.Dataset('u',
                           comment='DC voltage',
                           data=u,
                           scale_name='DC voltage',
                           quantity='Voltage',
                           unit='V',
                           is_scale=True)

        # create the dataset
        ds_P_loss = sdf.Dataset('P_loss',
                                comment='Power losses',
                                data=P_loss,
                                quantity='Voltage',
                                unit='V',
                                scales=[ds_tau, ds_w, ds_u])

        # create a group
        g = sdf.Group(
            name='/',
            comment=
            'Example loss characteristics of an e-machine w.r.t. torque, speed and DC voltage',
            attributes={'AUTHOR': 'John Doe'},
            datasets=[ds_tau, ds_w, ds_u, ds_P_loss])

        errors = sdf.validate(g)
        self.assertEqual([], errors)

        sdf.save('emachine.sdf', g)
예제 #13
0
def load_from_xlsx(input_filename):
    workbook = xlrd.open_workbook(input_filename)
    xlsx_data_list = __get_workbook_dict(workbook)
    return [sdf.Dataset(**data) for data in xlsx_data_list]
lenx = len(x)
lendT = len(dT)
lenP = len(P)
alpha = np.zeros((lenx, lendT, lenG, lenP))

for i in xrange(lenx):
    for j in xrange(lendT):
        for k in xrange(lenG):
            for r in xrange(lenP):
                alpha[i, j, k, r] = alpha_raw[i + k * lenx + r * lenG * lenx,
                                              j]

# Create the datasets:
ds_x = sdf.Dataset('x',
                   data=x,
                   unit='1',
                   is_scale=True,
                   display_name='Quality')
ds_dT = sdf.Dataset('dT',
                    data=dT,
                    unit='K',
                    is_scale=True,
                    display_name='deltaT')
ds_G = sdf.Dataset('G',
                   data=G,
                   unit='kg/(m2.s)',
                   is_scale=True,
                   display_name='Mass Flux')
ds_P = sdf.Dataset('P',
                   data=P,
                   unit='Pa',