예제 #1
0
    def test_dsres_load_dataset(self):

        path, _ = os.path.split(sdf.__file__)
        filename = os.path.join(path, 'examples', 'IntegerNetwork1.mat')

        ds = sdf.load(filename, objectname='/booleanPulse2/period')

        self.assertEqual(ds.data, 2.0)
        self.assertEqual(ds.unit, 's')
        self.assertEqual(ds.comment, 'Time for one period')

        ds = sdf.load(filename, objectname='/booleanPulse2/y')
        self.assertEqual(ds.data.dtype, np.dtype(np.int32))
        self.assertEqual(ds.data.size, 552)
        self.assertEqual(ds.data[0], True)
        self.assertEqual(ds.data[93], False)

        s = ds.scales[0]
        self.assertEqual(s.data.size, 552)
        self.assertEqual(s.data.dtype, np.dtype(np.float32))
        self.assertEqual(s.unit, 's')
        self.assertEqual(s.comment, 'Simulation time')

        ds = sdf.load(filename, objectname='/integerConstant/k')
        self.assertEqual(ds.data.dtype, np.dtype(np.int32))
        self.assertEqual(ds.data, 1)
예제 #2
0
    def test_dsres_load_all(self):
        path, _ = os.path.split(sdf.__file__)
        filename = os.path.join(path, 'examples', 'IntegerNetwork1.mat')

        g = sdf.load(filename)

        s = g['Time']
        self.assertEqual(s.data.size, 552)
        self.assertEqual(s.data.dtype, np.dtype(np.float32))
        self.assertEqual(s.unit, 's')
        self.assertEqual(s.comment, 'Simulation time')

        ds = g['booleanPulse2']['period']
        self.assertEqual(ds.data, 2.0)
        self.assertEqual(ds.unit, 's')
        self.assertEqual(ds.comment, 'Time for one period')

        ds = g['booleanPulse2']['y']
        self.assertEqual(ds.data.dtype, np.int32)
        self.assertEqual(ds.data.size, 552)
        self.assertEqual(ds.data[0], True)
        self.assertEqual(ds.data[93], False)
        self.assertEqual(ds.scales[0], s)

        ds = g['integerConstant']['k']
        self.assertEqual(ds.data.dtype, np.int32)
        self.assertEqual(ds.data, 1)
예제 #3
0
파일: test.py 프로젝트: PyWilhelm/EDRIS_DS
    def test_roundtrip(self):
        
        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)
        
        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])
        ds3 = sdf.Dataset('DS3',
                          comment='dataset 3',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)
        
        # create a group
        g = sdf.Group(name='/DS2',
                      comment='my comment',
                      attributes={'A1': 'my string', 'A2': 0.1, 'A3': 1},
                      datasets=[ds1, ds2])
        
        g2 = sdf.Group(name='/G2',
                      comment='my comment111',
                      attributes={'A1': 'my string', 'A2': 0.1, 'A3': 1},
                      datasets=[ds3])
        g.groups.append(g2)
        
        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file        
        ds2r = sdf.load('test.sdf', '/DS2')
        
        print ds2r.name
        

        # make sure the content is still the same
        self.assertEqual(ds2, ds2r)
예제 #4
0
def plot_time_series(filename, datasets):

    params = {
        # 'legend.fontsize': 'medium',
        'figure.figsize': (10, 8),
        'axes.labelsize': 'small',
        #   'axes.titlesize': 'medium',
        'xtick.labelsize': 'small',
        'ytick.labelsize': 'small'
    }

    pylab.rcParams.update(params)

    figure, axes = plt.subplots(len(datasets), sharex=True)

    # figure = plt.figure()
    figure.canvas.set_window_title(filename)
    figure.patch.set_facecolor('white')

    for ax, path in zip(axes, datasets):

        dataset = sdf.load(filename, path)

        scale = dataset.scales[0]

        y = dataset.data
        x = scale.data if scale is not None else range(len(y))

        # ax = figure.add_subplot(len(datasets), 1, i + 1)

        ax.plot(x, y, 'b')

        ax.grid(b=True, which='both', color='0.8', linestyle='-')

        ax.set_xlim([np.nanmin(x), np.nanmax(x)])
        ylabel = path
        if dataset.unit is not None:
            ylabel += " / %s" % dataset.unit
        ax.set_ylabel(ylabel)
        ax.margins(y=0.05)

    # x-axis label
    if scale is not None:
        xlabel = scale.name
        if scale.unit is not None:
            xlabel += " / %s" % scale.unit
        else:
            xlabel = 'Index'
        ax.set_xlabel(xlabel)

    figure.tight_layout()

    plt.show()
예제 #5
0
    def test_data_types(self):

        ds_f = sdf.Dataset(name='f',
                           data=np.asarray([1, 2, 3], dtype=np.float32))
        ds_d = sdf.Dataset(name='d',
                           data=np.asarray([1, 2, 3], dtype=np.float64))
        ds_i = sdf.Dataset(name='i',
                           data=np.asarray([1, 2, 3], dtype=np.int32))

        g = sdf.Group(name='/', datasets=[ds_f, ds_d, ds_i])

        sdf.save('data_types.sdf', g)

        g = sdf.load('data_types.sdf')

        self.assertEqual(g['f'].data.dtype, np.float32)
        self.assertEqual(g['d'].data.dtype, np.float64)
        self.assertEqual(g['i'].data.dtype, np.int32)
예제 #6
0
    def test_hierarchy(self):

        # create a scale
        ds_time = sdf.Dataset('Time',
                              comment="A scale",
                              data=np.linspace(0, 10, 101),
                              unit='s',
                              is_scale=True)

        ds_sine = sdf.Dataset('sine',
                              comment="A 1-d dataset /w attached scale",
                              data=np.sin(ds_time.data),
                              scales=[ds_time])

        # create the root group
        g = sdf.Group(name='/',
                      comment="A test file",
                      attributes={'A1': "my string"},
                      datasets=[ds_time, ds_sine])

        # create a scalar dataset
        ds_alpha = sdf.Dataset(
            'alpha',
            comment="A scalar /w unit, display unit and display name",
            data=np.pi,
            display_name='Angle',
            unit='rad',
            display_unit='deg')

        # create a sub group
        g1 = sdf.Group(name='g1',
                       comment="A sub-group",
                       attributes={'A2': "Attribute in sub group"},
                       datasets=[ds_alpha])

        g.groups.append(g1)

        # save the group
        sdf.save('roundtrip.sdf', g)

        # load the group from the file
        g2 = sdf.load('roundtrip.sdf', '/')
예제 #7
0
파일: test.py 프로젝트: PyWilhelm/Converter
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment='dataset 1',
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          scale_name='Scale 1',
                          quantity='Q1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment='dataset 2',
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          quantity='Q2',
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment='my comment',
                      attributes={
                          'A1': 'my string',
                          'A2': 0.1,
                          'A3': 1
                      },
                      datasets=[ds1, ds2])

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertEqual(ds2, ds2r)
예제 #8
0
    def test_roundtrip(self):

        # create a scale
        ds1 = sdf.Dataset('DS1',
                          comment="dataset 1",
                          data=np.array([0.1, 0.2, 0.3]),
                          display_name='Scale 1',
                          unit='U1',
                          display_unit='DU1',
                          is_scale=True)

        # create a 1D dataset
        ds2 = sdf.Dataset('DS2',
                          comment="dataset 2",
                          data=np.array([1, 2, 3]),
                          display_name='Dataset 2',
                          relative_quantity=True,
                          unit='U2',
                          display_unit='DU2',
                          scales=[ds1])

        # create a group
        g = sdf.Group(name='/',
                      comment="my comment",
                      attributes={'A1': 'my string'},
                      datasets=[ds1, ds2])

        g2 = sdf.Group(name='G2')
        g.groups.append(g2)

        # save the group
        sdf.save('test.sdf', g)

        # load DS2 from the file
        ds2r = sdf.load('test.sdf', '/DS2')

        # make sure the content is still the same
        self.assertDatasetsEqual(ds2, ds2r)
        self.assertDatasetsEqual(ds2.scales[0], ds2r.scales[0])
예제 #9
0
def get_x(filename, ds_name):
    print ds_name
    ds = sdf.load(filename, ds_name)
    return PIModelData.convert_from_si(ds_name, ds.data)
예제 #10
0
def create_plot(filename, datasets):

    params = {
        # 'legend.fontsize': 'medium',
        # 'figure.figsize': (10, 8),
        'legend.fontsize': 'small',
        'axes.labelsize': 'small',
        'axes.titlesize': 'small',
        'xtick.labelsize': 'small',
        'ytick.labelsize': 'small'
    }

    pylab.rcParams.update(params)

    figure = plt.figure(figsize=(12, 8))
    figure.canvas.set_window_title(filename)
    figure.patch.set_facecolor('white')

    nrows = len(datasets)

    for row, dataset in enumerate(datasets):

        # load the datasets
        C1 = sdf.load(filename, dataset)

        ncols = C1.scales[2].data.size

        norm = colors.Normalize(vmin=np.nanmin(C1.data),
                                vmax=np.nanmax(C1.data))

        subs = [0] * C1.data.ndim
        subs[0] = slice(None)
        subs[1] = slice(None)

        ax0 = None

        for i in range(ncols):

            x = C1.scales[1].data
            y = C1.scales[0].data

            subs[2] = i

            Z = C1.data[subs].T

            X, Y = np.meshgrid(x, y, indexing='ij')

            ax = figure.add_subplot(len(datasets),
                                    C1.data.shape[2], (row * ncols) + i + 1,
                                    sharex=ax0,
                                    sharey=ax0)

            if i == 0:
                ax0 = ax
                ax.set_ylabel(C1.display_name + " / " + C1.unit + "\n" +
                              C1.scales[0].display_name + " / " +
                              C1.scales[0].unit)
            # else:
            #     ax.get_yaxis().set_ticklabels([])

            ax.grid(True)

            CSF = plt.contourf(X, Y, Z, 10, cmap=plt.cm.viridis, norm=norm)

            CS = plt.contour(X, Y, Z, 10, colors='k')

            plt.clabel(CS=CS, fontsize=9, inline=1, colors='k')

            scale3 = C1.scales[2]

            ax.set_title(scale3.display_name + "=" + ("%g" % scale3.data[i]) +
                         " " + scale3.unit)
            ax.set_xlabel(C1.scales[1].display_name + " / " +
                          C1.scales[1].unit)

        cbar = figure.colorbar(CSF)

    plt.tight_layout()

    plt.show()
"""


df = pd.read_csv("data/Series3_6.15.17_padel.csv")
df.head()

# Which have missing values? How do they show when clustering?
df[df.IC50.isnull()]


df = pd.read_csv("data/Selleck_filtered_padel_corrected.csv", encoding='cp1252')
akt1 = pd.read_csv("data\Akt1_decoys_padel.csv")

akt1.head()

# Read sdf file to view contents
import sdf
sdf_file = sdf.load("data/malaria-2018-04-16.sdf", '/v')
# SDF File parser https://github.com/Actelion/openchemlib/tree/4633da5f2bfc3fbd59e2e01897c42d7be2b22b2d

df = pd.read_csv("data/result-2018-04-16.txt", delimiter="\t")
df.head()


len(df)




예제 #12
0
파일: sine.py 프로젝트: jakeogh/SDF-Python
"""
Create a simple SDF file
"""

import sdf
import numpy as np

# create the data arrays
t = np.linspace(0, 10, 100)
v = np.sin(t)

# create the datasets
ds_t = sdf.Dataset('t', data=t, unit='s', is_scale=True, display_name='Time')
ds_v = sdf.Dataset('v', data=v, unit='V', scales=[ds_t])

# create the root group
g = sdf.Group('/', comment='A sine voltage', datasets=[ds_t, ds_v])

# write the SDF file
sdf.save('sine.sdf', g)

# read the SDF file
ds_v2 = sdf.load('sine.sdf', '/v', unit='V', scale_units=['s'])
ds_t2 = ds_v2.scales[0]

t2 = ds_t2.data
v2 = ds_v2.data
예제 #13
0
 def load_ds_and_scales(self, filename, dsname):
     ds = sdf.load(filename, dsname)
     self.dataset = ds
예제 #14
0
 def assert_results(self, sdf_filename, variable_name, position, value, message='assertion failed'):
     ds = sdf.load(sdf_filename, variable_name)
     # value = ds.data[position]
     self.assertAlmostEqual(first, second, places=7, msg=message, delta=None)
예제 #15
0
 def test_dsres_inverted_signals(self):
     path = os.path.dirname(__file__)
     filename = os.path.join(path, 'DoublePendulum.mat')
     rvisobj = sdf.load(filename, '/world/y_label/cylinders[2]/rvisobj[1]')
     self.assertTrue(rvisobj.data < 0)