示例#1
0
 def test_nd_array_to_df(self):
     data = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
     col = VectorData(name='name', description='desc', data=data)
     df = DynamicTable('test', 'desc', np.arange(3, dtype='int'), (col, )).to_dataframe()
     df2 = pd.DataFrame({'name': [x for x in data]},
                        index=pd.Index(name='id', data=[0, 1, 2]))
     assert_frame_equal(df, df2)
示例#2
0
 def with_table_columns(self):
     cols = [TableColumn(**d) for d in self.spec]
     table = DynamicTable("with_table_columns",
                          'PyNWB unit test',
                          'a test table',
                          columns=cols)
     return table
示例#3
0
def infer_columns_to_plot(dynamic_table: DynamicTable):
    """Infer which columns can be plotted in summary widgets

    Parameters
    ----------
    dynamic_table : DynamicTable

    Returns
    -------
    column_names_to_plot: list
        Columns that can be plotted with the dynamic table summary
    """
    categorical_cols = infer_categorical_columns(dynamic_table)

    column_names_to_plot = []

    df = dynamic_table.to_dataframe()
    categorical_columns = []
    for name in dynamic_table.colnames:
        # if name not in categorical_cols.keys():  # categorical columns can always be plotted
        value = df[name].values[0]

        if isinstance(value, (int, float, np.integer)):
            column_names_to_plot.append(name)
        elif isinstance(value, (str, bool, bytes)):
            column_names_to_plot.append(name)
            categorical_columns.append(name)

    return column_names_to_plot, categorical_columns
示例#4
0
 def test_constructor_ids_default(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     table = DynamicTable("with_spec", 'a test table', columns=columns)
     self.check_table(table)
示例#5
0
 def addContainer(self, nwbfile):
     nwbfile.units = DynamicTable.from_dataframe(pd.DataFrame({
         'a': [1, 2, 3],
         'b': ['4', '5', '6']
     }), 'units')
     # reset the thing
     self.container = nwbfile.units
示例#6
0
    def setUpContainer(self):
        self.timeseries = TimeSeries(name='dummy timeseries',
                                     description='desc',
                                     data=np.ones((3, 3)),
                                     unit='flibs',
                                     timestamps=np.ones((3, )))
        bands = DynamicTable(name='bands',
                             description='band info for LFPSpectralAnalysis',
                             columns=[
                                 VectorData(name='band_name',
                                            description='name of bands',
                                            data=['alpha', 'beta', 'gamma']),
                                 VectorData(
                                     name='band_limits',
                                     description='low and high cutoffs in Hz',
                                     data=np.ones((3, 2)))
                             ])
        spec_anal = DecompositionSeries(name='LFPSpectralAnalysis',
                                        description='my description',
                                        data=np.ones((3, 3, 3)),
                                        timestamps=np.ones((3, )),
                                        source_timeseries=self.timeseries,
                                        metric='amplitude',
                                        bands=bands)

        return spec_anal
示例#7
0
 def setUpContainer(self):
     # this will get ignored
     return DynamicTable.from_dataframe(
         pd.DataFrame({
             'a': [[1, 2, 3], [1, 2, 3], [1, 2, 3]],
             'b': ['4', '5', '6']
         }), 'test_table')
示例#8
0
 def with_columns_and_data(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     return DynamicTable("with_columns_and_data",
                         'a test table',
                         columns=columns)
示例#9
0
    def test_from_dataframe(self):
        df = pd.DataFrame({
            'foo': [1, 2, 3, 4, 5],
            'bar': [10.0, 20.0, 30.0, 40.0, 50.0],
            'baz': ['cat', 'dog', 'bird', 'fish', 'lizard']
        }).loc[:, ('foo', 'bar', 'baz')]

        obtained_table = DynamicTable.from_dataframe(df, 'test')
        self.check_table(obtained_table)
示例#10
0
    def test_pandas_roundtrip(self):
        df = pd.DataFrame({
            'a': [1, 2, 3, 4],
            'b': ['a', 'b', 'c', '4']
        }, index=pd.Index(name='an_index', data=[2, 4, 6, 8]))

        table = DynamicTable.from_dataframe(df, 'foo')
        obtained = table.to_dataframe()

        assert df.equals(obtained)
示例#11
0
 def test_constructor_ids(self):
     columns = [
         TableColumn(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     table = DynamicTable("with_columns",
                          'a test table',
                          id=[0, 1, 2, 3, 4],
                          columns=columns)
     self.check_table(table)
示例#12
0
 def test_constructor_ids_bad_ids(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     msg = "must provide same number of ids as length of columns"
     with self.assertRaisesRegex(ValueError, msg):
         DynamicTable("with_columns",
                      'a test table',
                      id=[0, 1],
                      columns=columns)
示例#13
0
 def test_constructor_ElementIdentifier_ids(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     ids = ElementIdentifiers('ids', [0, 1, 2, 3, 4])
     table = DynamicTable("with_columns",
                          'a test table',
                          id=ids,
                          columns=columns)
     self.check_table(table)
示例#14
0
    def test_roundtrip(self):
        """Test writing and reading the ontology_terms and ontology_objects tables."""

        container = TimeSeries(
            name='test_ts',
            data=[1, 2, 3],
            unit='si_unit',
            timestamps=[0.1, 0.2, 0.3],
        )
        self.nwbfile.add_acquisition(container)

        table = DynamicTable(name='test_table',
                             description='test table description')
        table.add_column(name='test_col',
                         description='test column description')
        table.add_row(test_col='Mouse')

        self.nwbfile.add_acquisition(table)

        self.nwbfile.ontology_terms.add_row(
            id=np.uint64(1),
            key='meter',
            ontology='si_ontology',
            uri='si_ontology:m',
        )
        self.nwbfile.ontology_objects.add_row(
            id=np.uint64(5),
            object_id=container.object_id,
            field='unit',
            item=np.uint64(1),
        )
        self.nwbfile.ontology_terms.add_row(
            id=np.uint64(2),
            key='Mouse',
            ontology='species_ontology',
            uri='species_ontology:Mus musculus',
        )
        self.nwbfile.ontology_objects.add_row(
            id=np.uint64(6),
            object_id=table.object_id,
            field='test_col',
            item=np.uint64(2),
        )

        with NWBHDF5IO(self.path, mode='w') as io:
            io.write(self.nwbfile)

        with NWBHDF5IO(self.path, mode='r', load_namespaces=True) as io:
            read_nwbfile = io.read()
            self.assertContainerEqual(self.nwbfile.ontology_objects,
                                      read_nwbfile.ontology_objects)
            self.assertContainerEqual(self.nwbfile.ontology_terms,
                                      read_nwbfile.ontology_terms)
def test_infer_categorical_columns():
    data1 = np.array([1, 2, 2, 3, 1, 1, 3, 2, 3])
    data2 = np.array([3, 4, 2, 4, 3, 2, 2, 4, 4])

    vd1 = VectorData('Data1',
                     'vector data for creating a DynamicTable',
                     data=data1)
    vd2 = VectorData('Data2',
                     'vector data for creating a DynamicTable',
                     data=data2)
    vd = [vd1, vd2]

    dynamic_table = DynamicTable(name='test table',
                                 description='This is a test table',
                                 columns=vd,
                                 colnames=['Data1', 'Data2'])

    assert dicts_exact_equal(infer_categorical_columns(dynamic_table), {
        'Data1': np.array([1, 2, 3]),
        'Data2': np.array([2, 3, 4])
    })
示例#16
0
def test_infer_categorical_columns():
    data1 = np.array([1, 2, 2, 3, 1, 1, 3, 2, 3])
    data2 = np.array([3, 4, 2, 4, 3, 2, 2, 4, 4])
    device = Device(name="device")
    eg_1 = ElectrodeGroup(name="electrodegroup1",
                          description="desc",
                          location="brain",
                          device=device)
    eg_2 = ElectrodeGroup(name="electrodegroup2",
                          description="desc",
                          location="brain",
                          device=device)
    data3 = [eg_1, eg_2, eg_1, eg_1, eg_1, eg_1, eg_1, eg_1, eg_1]
    vd1 = VectorData("Data1",
                     "vector data for creating a DynamicTable",
                     data=data1)
    vd2 = VectorData("Data2",
                     "vector data for creating a DynamicTable",
                     data=data2)
    vd3 = VectorData("ElectrodeGroup",
                     "vector data for creating a DynamicTable",
                     data=data3)
    vd = [vd1, vd2, vd3]

    dynamic_table = DynamicTable(
        name="test table",
        description="This is a test table",
        columns=vd,
        colnames=["Data1", "Data2", "ElectrodeGroup"],
    )
    assert dicts_exact_equal(
        infer_categorical_columns(dynamic_table),
        {
            "Data1": data1,
            "Data2": data2,
            "ElectrodeGroup": [i.name for i in data3]
        },
    )
示例#17
0
def transform(block_path, filter='default', bands_vals=None):
    """
    Takes raw LFP data and does the standard Hilbert algorithm:
    1) CAR
    2) notch filters
    3) Hilbert transform on different bands

    Takes about 20 minutes to run on 1 10-min block.

    Parameters
    ----------
    block_path : str
        subject file path
    filter: str, optional
        Frequency bands to filter the signal.
        'default' for Chang lab default values (Gaussian filters)
        'custom' for user defined (Gaussian filters)
    bands_vals: 2D array, necessary only if filter='custom'
        [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]

    Returns
    -------
    Saves preprocessed signals (LFP) and spectral power (DecompositionSeries) in
    the current NWB file. Only if containers for these data do not exist in the
    file.
    """
    write_file = 1
    rate = 400.

    # Define filter parameters
    if filter == 'default':
        band_param_0 = bands.chang_lab['cfs']
        band_param_1 = bands.chang_lab['sds']
    elif filter == 'high_gamma':
        band_param_0 = bands.chang_lab['cfs'][(bands.chang_lab['cfs'] > 70)
                                              & (bands.chang_lab['cfs'] < 150)]
        band_param_1 = bands.chang_lab['sds'][(bands.chang_lab['cfs'] > 70)
                                              & (bands.chang_lab['cfs'] < 150)]
        #band_param_0 = [ bands.neuro['min_freqs'][-1] ]  #for hamming window filter
        #band_param_1 = [ bands.neuro['max_freqs'][-1] ]
        #band_param_0 = bands.chang_lab['cfs'][29:37]      #for average of gaussian filters
        #band_param_1 = bands.chang_lab['sds'][29:37]
    elif filter == 'custom':
        band_param_0 = bands_vals[0, :]
        band_param_1 = bands_vals[1, :]

    block_name = os.path.splitext(block_path)[0]

    start = time.time()

    with NWBHDF5IO(block_path, 'a') as io:
        nwb = io.read()

        # Storage of processed signals on NWB file -----------------------------
        if 'ecephys' not in nwb.modules:
            # Add module to NWB file
            nwb.create_processing_module(
                name='ecephys',
                description='Extracellular electrophysiology data.')
        ecephys_module = nwb.modules['ecephys']

        # LFP: Downsampled and power line signal removed
        if 'LFP' in nwb.modules['ecephys'].data_interfaces:
            lfp_ts = nwb.modules['ecephys'].data_interfaces[
                'LFP'].electrical_series['preprocessed']
            X = lfp_ts.data[:].T
            rate = lfp_ts.rate
        else:

            # 1e6 scaling helps with numerical accuracy
            X = nwb.acquisition['ECoG'].data[:].T * 1e6
            fs = nwb.acquisition['ECoG'].rate
            bad_elects = load_bad_electrodes(nwb)
            print('Load time for h5 {}: {} seconds'.format(
                block_name,
                time.time() - start))
            print('rates {}: {} {}'.format(block_name, rate, fs))
            if not np.allclose(rate, fs):
                assert rate < fs
                start = time.time()
                X = resample(X, rate, fs)
                print('resample time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

            if bad_elects.sum() > 0:
                X[bad_elects] = np.nan

            # Subtract CAR
            start = time.time()
            X = subtract_CAR(X)
            print('CAR subtract time for {}: {} seconds'.format(
                block_name,
                time.time() - start))

            # Apply Notch filters
            start = time.time()
            X = linenoise_notch(X, rate)
            print('Notch filter time for {}: {} seconds'.format(
                block_name,
                time.time() - start))

            lfp = LFP()
            # Add preprocessed downsampled signals as an electrical_series
            lfp_ts = lfp.create_electrical_series(
                name='preprocessed',
                data=X.T,
                electrodes=nwb.acquisition['ECoG'].electrodes,
                rate=rate,
                description='')
            ecephys_module.add_data_interface(lfp)

        # Spectral band power
        if 'Bandpower_' + filter not in nwb.modules['ecephys'].data_interfaces:

            # Apply Hilbert transform
            X = X.astype('float32')  # signal (nChannels,nSamples)
            nChannels = X.shape[0]
            nSamples = X.shape[1]
            nBands = len(band_param_0)
            Xp = np.zeros((nBands, nChannels,
                           nSamples))  # power (nBands,nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                # if filter=='high_gamma':
                #    kernel = hamming(X, rate, bp0, bp1)
                # else:
                kernel = gaussian(X, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(X,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii] = abs(X_analytic).astype('float32')

            # Scales signals back to Volt
            X /= 1e6

            band_param_0V = VectorData(
                name='filter_param_0',
                description='frequencies for bandpass filters',
                data=band_param_0)
            band_param_1V = VectorData(
                name='filter_param_1',
                description='frequencies for bandpass filters',
                data=band_param_1)
            bandsTable = DynamicTable(
                name='bands',
                description='Series of filters used for Hilbert transform.',
                columns=[band_param_0V, band_param_1V],
                colnames=['filter_param_0', 'filter_param_1'])

            # data: (ndarray) dims: num_times * num_channels * num_bands
            Xp = np.swapaxes(Xp, 0, 2)
            decs = DecompositionSeries(
                name='Bandpower_' + filter,
                data=Xp,
                description='Band power estimated with Hilbert transform.',
                metric='power',
                unit='V**2/Hz',
                bands=bandsTable,
                rate=rate,
                source_timeseries=lfp_ts)
            ecephys_module.add_data_interface(decs)
        io.write(nwb)

        print('done', flush=True)
示例#18
0
def spectral_decomposition(block_path, bands_vals):
    """
    Takes preprocessed LFP data and does the standard Hilbert transform on
    different bands. Takes about 20 minutes to run on 1 10-min block.

    Parameters
    ----------
    block_path : str
        subject file path
    bands_vals : [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]

    Returns
    -------
    Saves spectral power (DecompositionSeries) in the current NWB file.
    Only if container for this data do not exist in the file.
    """

    # Get filter parameters
    band_param_0 = bands_vals[0, :]
    band_param_1 = bands_vals[1, :]

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()
        lfp = nwb.processing['ecephys'].data_interfaces[
            'LFP'].electrical_series['preprocessed']
        rate = lfp.rate

        nBands = len(band_param_0)
        nSamples = lfp.data.shape[0]
        nChannels = lfp.data.shape[1]
        Xp = np.zeros(
            (nBands, nChannels, nSamples))  #power (nBands,nChannels,nSamples)

        # Apply Hilbert transform ----------------------------------------------
        print('Running Spectral Decomposition...')
        start = time.time()
        for ch in np.arange(nChannels):
            Xch = lfp.data[:,
                           ch] * 1e6  # 1e6 scaling helps with numerical accuracy
            Xch = Xch.reshape(1, -1)
            Xch = Xch.astype('float32')  # signal (nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                kernel = gaussian(Xch, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(Xch,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii, ch, :] = abs(X_analytic).astype('float32')
        print('Spectral Decomposition finished in {} seconds'.format(
            time.time() - start))

        # data: (ndarray) dims: num_times * num_channels * num_bands
        Xp = np.swapaxes(Xp, 0, 2)

        # Spectral band power
        # bands: (DynamicTable) frequency bands that signal was decomposed into
        band_param_0V = VectorData(
            name='filter_param_0',
            description='frequencies for bandpass filters',
            data=band_param_0)
        band_param_1V = VectorData(
            name='filter_param_1',
            description='frequencies for bandpass filters',
            data=band_param_1)
        bandsTable = DynamicTable(
            name='bands',
            description='Series of filters used for Hilbert transform.',
            columns=[band_param_0V, band_param_1V],
            colnames=['filter_param_0', 'filter_param_1'])
        decs = DecompositionSeries(
            name='DecompositionSeries',
            data=Xp,
            description='Analytic amplitude estimated with Hilbert transform.',
            metric='amplitude',
            unit='V',
            bands=bandsTable,
            rate=rate,
            source_timeseries=lfp)

        # Storage of spectral decomposition on NWB file ------------------------
        ecephys_module = nwb.processing['ecephys']
        ecephys_module.add_data_interface(decs)
        io.write(nwb)
        print('Spectral decomposition saved in ' + block_path)
示例#19
0
 def with_spec(self):
     table = DynamicTable("with_spec", 'a test table', columns=self.spec)
     return table
示例#20
0
 def setUpContainer(self):
     return DynamicTable('electrodes',
                         'metadata about extracellular electrodes',
                         'autogenerated by PyNWB API')
示例#21
0
 def with_table_columns(self):
     cols = [VectorData(**d) for d in self.spec]
     table = DynamicTable("with_table_columns",
                          'a test table',
                          columns=cols)
     return table
示例#22
0
def get_bipolar_referenced_electrodes(X,
                                      electrodes,
                                      rate,
                                      grid_size=None,
                                      grid_step=1):
    '''
    Bipolar referencing of electrodes according to the scheme of Dr. John Burke

    Each electrode (with obvious exceptions at the edges) yields two bipolar-
    referenced channels, one for the "right" and one for the "below" neighbors.
    These are returned as an ElectricalSeries.

    Input arguments:
    --------
    X:
        numpy array containing (raw) electrode traces (Nelectrodes x T)
    electrodes:
        DynamicTableRegion containing the metadata for the electrodes whose
        traces are in X
    rate:
        sampling rate of X; for storage in ElectricalSeries
    grid_size:
        numpy array with the two dimensions of the grid (2, )

    Returns:
    --------
    bipolarElectrodes:
        ElectricalSeries containing the bipolar-referenced (pseudo) electrodes
        and associated metadata.  (NB that a, e.g., 16x16 grid yields 960 of
        these pseudo-electrodes.)
    '''

    # set mutable default argument(s)
    if grid_size is None:
        grid_size = np.array([16, 16])

    # malloc
    elec_layout = np.arange(np.prod(grid_size) - 1, -1,
                            -1).reshape(grid_size).T
    elec_layout = elec_layout[::grid_step, ::grid_step]
    grid_size = elec_layout.T.shape  # in case grid_step > 1
    Nchannels = 2 * np.prod(grid_size) - np.sum(grid_size)
    XX = np.zeros((Nchannels, X.shape[1]))

    # create a new dynamic table to hold the metadata
    column_names = ['x', 'y', 'z', 'imp', 'location', 'label', 'bad']
    columns = [
        VectorData(name=name, description=electrodes.table[name].description)
        for name in column_names
    ]
    bipolarTable = DynamicTable(
        name='bipolar-referenced metadata',
        description=('pseudo-channels derived via John Burke style'
                     ' bipolar referencing'),
        colnames=column_names,
        columns=columns,
    )

    # compute bipolar-ref'd channel and add a new row of metadata to table
    def add_new_channel(iChannel, iElectrode, jElectrode):
        jElectrode = int(jElectrode)

        # "bipolar referencing": the difference of neighboring electrodes
        XX[iChannel, :] = X[iElectrode, :] - X[jElectrode, :]

        # add a row to the table for this new pseudo-electrode
        bipolarTable.add_row({
            'location':
            '_'.join({
                electrodes.table['location'][iElectrode],
                electrodes.table['location'][jElectrode]
            }),
            'label':
            '-'.join([
                electrodes.table['label'][iElectrode],
                electrodes.table['label'][jElectrode]
            ]),
            'bad': (electrodes.table['bad'][iElectrode]
                    or electrodes.table['bad'][jElectrode]),
            **{
                name: electrodes.table[name][iElectrode]
                for name in ['x', 'y', 'z', 'imp']
            },
        })
        return iChannel + 1

    iChannel = 0

    # loop across columns and rows (remembering that grid is transposed)
    for i in range(grid_size[1]):
        for j in range(grid_size[0]):
            if j < grid_size[0] - 1:
                iChannel = add_new_channel(iChannel, elec_layout[i, j],
                                           elec_layout[i, j + 1])
            if i < grid_size[1] - 1:
                iChannel = add_new_channel(iChannel, elec_layout[i, j],
                                           elec_layout[i + 1, j])

    # create one big region for the entire table
    bipolarTableRegion = bipolarTable.create_region(
        'electrodes', [i for i in range(Nchannels)], 'all bipolar electrodes')

    return XX, bipolarTable, bipolarTableRegion
示例#23
0
# a "category". This is similar to a table with "sub-headings". In the case of the
# :py:class:`~pynwb.icephys.IntracellularRecordingsTable`, we have three predefined categories,
# i.e., electrodes, stimuli, and responses. We can also dynamically add new categories to
# the table. As each category corresponds to a ``DynamicTable``, this means we have to create a
# new ``DynamicTable`` and add it to our table.

# Create a new DynamicTable for our category that contains a location column of type VectorData
location_column = VectorData(name='location',
                             data=['Mordor', 'Gondor', 'Rohan'],
                             description='Recording location in Middle Earth')

lab_category = DynamicTable(
    name='recording_lab_data',
    description='category table for lab-specific recording metadata',
    colnames=[
        'location',
    ],
    columns=[
        location_column,
    ])
# Add the table as a new category to our intracellular_recordings
nwbfile.intracellular_recordings.add_category(category=lab_category)
# Note, the name of the category is name of the table, i.e., 'recording_lab_data'

#####################################################################
# .. note:: In an ``AlignedDynamicTable`` all category tables MUST align with the main table,
#           i.e., all tables must have the same number of rows and rows are expected to
#           correspond to each other by index

#####################################################################
# We can also add custom columns to any of the subcategory tables, i.e.,
示例#24
0
 def setUpContainer(self):
     # this will get ignored
     return DynamicTable('units', 'unit table integration test',
                         'a placeholder table')
示例#25
0
 def setUpContainer(self):
     return DynamicTable('electrodes', 'metadata about extracellular electrodes')
示例#26
0
    def test_auto_ragged_array(self):

        df = pd.DataFrame({'a': [[1], [1, 2]]})
        df2 = DynamicTable.from_dataframe(df, name='test').to_dataframe()
        df.equals(df2)
示例#27
0
 def setUpContainer(self):
     # this will get ignored
     return DynamicTable('trials', 'a placeholder table')
示例#28
0
def test_show_dynamic_table():
    d = {'col1': [1, 2], 'col2': [3, 4]}
    dt = DynamicTable.from_dataframe(df=pd.DataFrame(data=d),
                                     name='Test Dtable',
                                     table_description='no description')
    show_dynamic_table(dt)
示例#29
0
def test_show_dynamic_table():
    d = {"col1": [1, 2], "col2": [3, 4]}
    dt = DynamicTable.from_dataframe(df=pd.DataFrame(data=d),
                                     name="Test Dtable",
                                     table_description="no description")
    show_dynamic_table(dt)
示例#30
0
 def setUpContainer(self):
     return DynamicTable('trials', 'DynamicTable integration test',
                         'a test table')