Exemple #1
0
    def test_roundtrip_load_sdf(self):
        
        # create a scale
        ds0 = sdf.Dataset('DS0',
                          comment='dataset 0',
                          data=np.array([0.1, 0.2, 0.3, 0.4]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)
        
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)
        
        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([[0.1, 0.2, 0.3, 1], [0.1, 0.2, 0.3, 1], [0.1, 0.2, 0.3, 1]]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds0, ds1])
        
        # create a group
        g = sdf.Group(name='/',
                      comment='my comment',
                      attributes={'A1': 'my string', 'A2': 0.1, 'A3': 1},
                      datasets=[ds2, ds1, ds0])
        
        
        # save the group
        sdf.save('test.sdf', g)
        

        # load DS2 from the file        
        sdf1 = sdf.load_sdf('test.sdf')
        
        
        sdf.save('test1.sdf', sdf1)
        
        sdf2 = sdf.load_sdf('test1.sdf')
        
        with open('p1.txt', 'w') as f:
            pickle.dump(sdf1, f)
        with open('p2.txt', 'w') as f:
            pickle.dump(sdf2, f)
            
        self.assertEqual(hashlib.md5(open('p1.txt', 'rb').read()).hexdigest(), 
                         hashlib.md5(open('p2.txt', 'rb').read()).hexdigest())
Exemple #2
0
    def save_as_sdf(self):
        # TODO: plausibility checks

        datasets = self.get_sdf_datasets()
        _group = sdf.Group(name='/', comment='my comment',
                           attributes=self._attributes, datasets=datasets)

        filename = os.path.join(__conf__['outputPath'], 'tmp_sdf', 'report-%f.sdf' % (time.time(),))
        sdf.save(filename, _group)
        return filename
Exemple #3
0
    def test_roundtrip(self):
        
        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)
        
        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])
        ds3 = sdf.Dataset('DS3',
                          comment='dataset 3',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)
        
        # create a group
        g = sdf.Group(name='/DS2',
                      comment='my comment',
                      attributes={'A1': 'my string', 'A2': 0.1, 'A3': 1},
                      datasets=[ds1, ds2])
        
        g2 = sdf.Group(name='/G2',
                      comment='my comment111',
                      attributes={'A1': 'my string', 'A2': 0.1, 'A3': 1},
                      datasets=[ds3])
        g.groups.append(g2)
        
        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file        
        ds2r = sdf.load('test.sdf', '/DS2')
        
        print ds2r.name
        

        # make sure the content is still the same
        self.assertEqual(ds2, ds2r)
Exemple #4
0
    def merge_and_save_sdf(self, other_sde, modify_ds=lambda x: x, modify_attrib=lambda x: x, comment=""):
        datasets_new = modify_ds(self.get_sdf_datasets() + other_sde.get_sdf_datasets())
        attributes = copy.deepcopy(self._attributes)
        attributes.update(other_sde._attributes)
        attributes_new = modify_attrib(attributes)

        _group = sdf.Group(name='/', comment=comment,
                           attributes=attributes_new, datasets=datasets_new)

        filename = os.path.join(__conf__['outputPath'], 'tmp_sdf', 'report-%f.sdf' % (time.time(),))
        sdf.save(filename, _group)
        return filename
Exemple #5
0
    def test_data_types(self):

        ds_f = sdf.Dataset(name='f',
                           data=np.asarray([1, 2, 3], dtype=np.float32))
        ds_d = sdf.Dataset(name='d',
                           data=np.asarray([1, 2, 3], dtype=np.float64))
        ds_i = sdf.Dataset(name='i',
                           data=np.asarray([1, 2, 3], dtype=np.int32))

        g = sdf.Group(name='/', datasets=[ds_f, ds_d, ds_i])

        sdf.save('data_types.sdf', g)

        g = sdf.load('data_types.sdf')

        self.assertEqual(g['f'].data.dtype, np.float32)
        self.assertEqual(g['d'].data.dtype, np.float64)
        self.assertEqual(g['i'].data.dtype, np.int32)
Exemple #6
0
    def test_hierarchy(self):

        # create a scale
        ds_time = sdf.Dataset('Time',
                              comment="A scale",
                              data=np.linspace(0, 10, 101),
                              unit='s',
                              is_scale=True)

        ds_sine = sdf.Dataset('sine',
                              comment="A 1-d dataset /w attached scale",
                              data=np.sin(ds_time.data),
                              scales=[ds_time])

        # create the root group
        g = sdf.Group(name='/',
                      comment="A test file",
                      attributes={'A1': "my string"},
                      datasets=[ds_time, ds_sine])

        # create a scalar dataset
        ds_alpha = sdf.Dataset(
            'alpha',
            comment="A scalar /w unit, display unit and display name",
            data=np.pi,
            display_name='Angle',
            unit='rad',
            display_unit='deg')

        # create a sub group
        g1 = sdf.Group(name='g1',
                       comment="A sub-group",
                       attributes={'A2': "Attribute in sub group"},
                       datasets=[ds_alpha])

        g.groups.append(g1)

        # save the group
        sdf.save('roundtrip.sdf', g)

        # load the group from the file
        g2 = sdf.load('roundtrip.sdf', '/')
Exemple #7
0
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment='my comment',
                      attributes={
                          'A1': 'my string',
                          'A2': 0.1,
                          'A3': 1
                      },
                      datasets=[ds1, ds2])

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertEqual(ds2, ds2r)
Exemple #8
0
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment="dataset 1",
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment="dataset 2",
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          relative_quantity=True,
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment="my comment",
                      attributes={'A1': 'my string'},
                      datasets=[ds1, ds2])

        g2 = sdf.Group(name='G2')
        g.groups.append(g2)

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertDatasetsEqual(ds2, ds2r)
        self.assertDatasetsEqual(ds2.scales[0], ds2r.scales[0])
Exemple #9
0
def save_to_sdf(dataset_list, output_filename):
    group = sdf.Group('/', comment='converted file', datasets = dataset_list)
    sdf.save(output_filename, group)
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG, lenx, lenP))
for i in xrange(lenG):
    for j in xrange(lenx):
        for k in xrange(lenP):
            q[i, j, k] = q_raw[i + k * lenG, j]

# Create the datasets:
ds_G = sdf.Dataset('G',
                   data=G,
                   unit='kg/(m2.s)',
                   is_scale=True,
                   display_name='Mass Flux')
ds_x = sdf.Dataset('x',
                   data=x,
                   unit='1',
                   is_scale=True,
                   display_name='Quality')
ds_P = sdf.Dataset('P',
                   data=P,
                   unit='Pa',
                   is_scale=True,
                   display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G, ds_x, ds_P])

# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G, ds_x, ds_P, ds_q])
sdf.save('../Data/2006LUT.sdf', g)
Exemple #11
0
# get the first sheet
sh = book.sheet_by_index(0)

# get the names, quantities and units
n_t = sh.cell_value(0, 1)
u_t = sh.cell_value(1, 1)

n_u = sh.cell_value(0, 2)
u_u = sh.cell_value(1, 2)

# get the data
col_t = sh.col_values(1, 2, sh.nrows)
col_u = sh.col_values(2, 2, sh.nrows)

# create the data arrays
t = array(col_t)
u = array(col_u)

# create the datasets
ds_t = sdf.Dataset(n_t, data=t, unit=u_t, is_scale=True, display_name='Time')
ds_u = sdf.Dataset(n_u, data=u, unit=u_u, scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='Imported from ' + filename, datasets=[ds_t, ds_u])

# change the file extension
outfile = os.path.splitext(filename)[0] + '.sdf'

# write the SDF file
sdf.save(outfile, g)
Exemple #12
0
    def test_3D_example(self):
        
        RPM2RADS = 2 * math.pi / 60
        
        kfric=1                 # [Ws/rad] angular damping coefficient [0;100]
        kfric3=1.5e-6           # [Ws3/rad3] angular damping coefficient (3rd order) [0;10-3]
        psi=0.2                 # [Vs] flux linkage [0.001;10]
        res=5e-3                # [Ohm] resistance [0;100]
        u_ref=200               # [V] reference DC voltage [0;1000]
        k_u=5                   # linear voltage coefficient [-100;100]
    
        tau = np.arange(0, 230+10, 10)
        w = np.concatenate((np.arange(0, 500, 100), np.arange(500, 12e3+500, 500))) * RPM2RADS
        u = np.asarray([200, 300, 400])
    
        # calculate the power losses
        TAU, W, U = np.meshgrid(tau, w, u, indexing='ij')
    
        P_loss = kfric * W + kfric3 * W ** 3 + (res * (TAU / psi) ** 2) + k_u * (U - u_ref)
    
        # create the scales
        ds_tau = sdf.Dataset('tau',
                          comment='Torque',
                          data=tau,
                          scale_name='Torque',
                          quantity='Torque',
                          unit='N.m',
                          is_scale=True)
        
        ds_w = sdf.Dataset('w',
                          comment='Speed',
                          data=w,
                          scale_name='Speed',
                          quantity='AngularVelocity',
                          unit='rad/s',
                          display_unit='rpm',
                          is_scale=True)
        
        ds_u = sdf.Dataset('u',
                          comment='DC voltage',
                          data=u,
                          scale_name='DC voltage',
                          quantity='Voltage',
                          unit='V',
                          is_scale=True)
        
        # create the dataset
        ds_P_loss = sdf.Dataset('P_loss',
                          comment='Power losses',
                          data=P_loss,
                          quantity='Voltage',
                          unit='V',
                          scales=[ds_tau, ds_w, ds_u])
        
        # create a group
        g = sdf.Group(name='/',
                      comment='Example loss characteristics of an e-machine w.r.t. torque, speed and DC voltage',
                      attributes={'AUTHOR': 'John Doe'},
                      datasets=[ds_tau, ds_w, ds_u, ds_P_loss])

        errors = sdf.validate(g)
        self.assertEqual([], errors)
        
        sdf.save('emachine.sdf', g)
G = np.array((0,50,100,300,500,750,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000,7500,8000))

# Quality range from 2006 LUT
x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00))

# Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2]
q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3

# Convert the imported array into a (MxNxQ) where:
#   M is number of mass flux divisions
#   N is number of quality divisions
#   Q is number of pressure divisions
lenG = len(G)
lenx = len(x)
lenP = len(P)
q = np.zeros((lenG,lenx,lenP))
for i in xrange(lenG):
    for j in xrange(lenx):
        for k in xrange(lenP):
            q[i,j,k] = q_raw[i + k*lenG,j]    
            
# Create the datasets:
ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux')
ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality')
ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure')
ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P])

# Create the root group and write the file:
g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q])
sdf.save('../Data/2006LUT.sdf', g)
Exemple #14
0
"""
Create a simple SDF file
"""

import sdf
import numpy as np

# create the data arrays
t = np.linspace(0, 10, 100)
v = np.sin(t)

# create the datasets
ds_t = sdf.Dataset('t', data=t, unit='s', is_scale=True, display_name='Time')
ds_v = sdf.Dataset('v', data=v, unit='V', scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='A sine voltage', datasets=[ds_t, ds_v])

# write the SDF file
sdf.save('sine.sdf', g)

# read the SDF file
ds_v2 = sdf.load('sine.sdf', '/v', unit='V', scale_units=['s'])
ds_t2 = ds_v2.scales[0]

t2 = ds_t2.data
v2 = ds_v2.data
Exemple #15
0
    def test_3D_example(self):

        RPM2RADS = 2 * math.pi / 60

        kfric = 1  # [Ws/rad] angular damping coefficient [0;100]
        kfric3 = 1.5e-6  # [Ws3/rad3] angular damping coefficient (3rd order) [0;10-3]
        psi = 0.2  # [Vs] flux linkage [0.001;10]
        res = 5e-3  # [Ohm] resistance [0;100]
        u_ref = 200  # [V] reference DC voltage [0;1000]
        k_u = 5  # linear voltage coefficient [-100;100]

        tau = np.arange(0, 230 + 10, 10)
        w = np.concatenate((np.arange(
            0, 500, 100), np.arange(500, 12e3 + 500, 500))) * RPM2RADS
        u = np.asarray([200, 300, 400])

        # calculate the power losses
        TAU, W, U = np.meshgrid(tau, w, u, indexing='ij')

        P_loss = kfric * W + kfric3 * W**3 + (
            res * (TAU / psi)**2) + k_u * (U - u_ref)

        # create the scales
        ds_tau = sdf.Dataset('tau',
                             comment='Torque',
                             data=tau,
                             scale_name='Torque',
                             quantity='Torque',
                             unit='N.m',
                             is_scale=True)

        ds_w = sdf.Dataset('w',
                           comment='Speed',
                           data=w,
                           scale_name='Speed',
                           quantity='AngularVelocity',
                           unit='rad/s',
                           display_unit='rpm',
                           is_scale=True)

        ds_u = sdf.Dataset('u',
                           comment='DC voltage',
                           data=u,
                           scale_name='DC voltage',
                           quantity='Voltage',
                           unit='V',
                           is_scale=True)

        # create the dataset
        ds_P_loss = sdf.Dataset('P_loss',
                                comment='Power losses',
                                data=P_loss,
                                quantity='Voltage',
                                unit='V',
                                scales=[ds_tau, ds_w, ds_u])

        # create a group
        g = sdf.Group(
            name='/',
            comment=
            'Example loss characteristics of an e-machine w.r.t. torque, speed and DC voltage',
            attributes={'AUTHOR': 'John Doe'},
            datasets=[ds_tau, ds_w, ds_u, ds_P_loss])

        errors = sdf.validate(g)
        self.assertEqual([], errors)

        sdf.save('emachine.sdf', g)
Exemple #16
0
        if (item_index is not None) and (item_index[0].size > 0): 
            return item_index[0][0]
        elif (item_index_rough is not None) and (item_index_rough[0].size > 0): 
            return item_index_rough[0][0]
        else:
            raise Exception('not cannot find the value ' + str(value) + " in scale " + scale.name + ':' + str(scale.data))

    def _get_scale_index_from_name(self, scale_name):
        for i, scale in  enumerate(self._dataset.scales):
            if scale_name == scale.name:
                return  i
        raise Exception('not cannot find the scale with name' + scale_name)


if __name__ == "__main__":
    
    sds = SplittableDataset()
    sds.load_ds_and_scales('test.sdf', '/P_cont')
    ds = sds.sub_dataset(['RCI_P_cont', 'T_P_cont'], {'w_P_cont' :314.159265352})


    # create a group
    g = sdf.Group(name='/',
                  comment='my comment',
                  attributes={},
                  datasets=ds.scales + [ds]  )
    
    # save the group
    sdf.save('test_2.sdf', g)

Exemple #17
0
def save_to_sdf(dataset_list, output_filename):
    group = sdf.Group('/', comment='converted file', datasets=dataset_list)
    sdf.save(output_filename, group)