Пример #1
0
def test_append_simple():
    """Test appending without regard to dimensions."""
    points = 100
    data1 = np.random.random(points)
    data2 = np.random.random(points)
    coords1 = {'time': np.linspace(0, points, points)}
    coords2 = {'time': np.linspace(points, points * 2, points)}
    dims = ["time"]
    samplerate = 10.

    # Base case: everything should Just Work
    ts1 = TimeSeriesX.create(data1, samplerate, coords=coords1, dims=dims)
    ts2 = TimeSeriesX.create(data2, samplerate, coords=coords2, dims=dims)
    combined = ts1.append(ts2)
    assert combined.samplerate == samplerate
    assert (combined.data == np.concatenate([data1, data2])).all()
    assert combined.dims == ts1.dims
    assert combined.dims == ts2.dims
    assert (combined.coords['time'] == np.concatenate(
        [coords1['time'], coords2['time']])).all()

    # Incompatible sample rates
    ts1 = TimeSeriesX.create(data1, samplerate, coords=coords1, dims=dims)
    ts2 = TimeSeriesX.create(data2, samplerate + 1, coords=coords2, dims=dims)
    with pytest.raises(ConcatenationError):
        ts1.append(ts2)
Пример #2
0
def test_coords_ops():
    data = np.arange(1000).reshape(10,10,10)

    ts_1 = TimeSeriesX.create(data, None, dims=['x','y','z'], coords={'x':np.arange(10),
                                                                'y':np.arange(10),
                                                                'z':np.arange(10)*2,
                                                                    'samplerate': 1})
    ts_2 = TimeSeriesX.create(data, None, dims=['x','y','z'], coords={'x':np.arange(10),
                                                                'y':np.arange(10),
                                                                'z':np.arange(10),
                                                                    'samplerate': 1})
    ts_out = ts_1 + ts_2
    assert ts_out.z.shape[0] == 5

    ts_out_1 = ts_1 + ts_2[...,::2]

    assert (ts_out_1 == ts_out).all()

    ts_out_2 = ts_2[...,1::2] + ts_2[...,::2]

    assert ts_out_2.shape[-1] ==0

    ts_out_3 = ts_2[...,[0,2,3,4,8]] + ts_2[...,[3,4,8,9]]

    assert (ts_out_3.z.data == np.array([3,4,8])).all()
Пример #3
0
    def read_session_data(self):
        """
        Reads entire session worth of data

        :return: TimeSeriesX object (channels x events x time) with data for entire session the events dimension has length 1
        """
        brr = self.READER_FILETYPE_DICT[self.session_dataroot](dataroot=self.session_dataroot, channels=self.channels)
        session_array,read_ok_mask = brr.read()
        self.channel_name = brr.channel_name

        offsets_axis = session_array['offsets']
        number_of_time_points = offsets_axis.shape[0]
        samplerate = float(session_array['samplerate'])
        physical_time_array = np.arange(number_of_time_points) * (1.0 / samplerate)

        # session_array = session_array.rename({'start_offsets': 'events'})

        session_time_series = TimeSeriesX(session_array.values,
                                          dims=[self.channel_name, 'start_offsets', 'time'],
                                          coords={
                                              self.channel_name: session_array[self.channel_name],
                                              'start_offsets': session_array['start_offsets'],
                                              'time': physical_time_array,
                                              'offsets': ('time', session_array['offsets']),
                                              'samplerate': session_array['samplerate']
                                          }
                                          )
        session_time_series.attrs = session_array.attrs.copy()
        session_time_series.attrs['dataroot'] = self.session_dataroot

        return session_time_series
Пример #4
0
    def filter(self):
        """
        Applies Butterwoth filter to input time series and returns filtered TimeSeriesX object

        Returns
        -------
        filtered: TimeSeriesX
            The filtered time series

        """
        time_axis_index = get_axis_index(self.time_series, axis_name='time')
        filtered_array = buttfilt(self.time_series,
                                  self.freq_range,
                                  float(self.time_series['samplerate']),
                                  self.filt_type,
                                  self.order,
                                  axis=time_axis_index)

        coords_dict = {
            coord_name: DataArray(coord.copy())
            for coord_name, coord in list(self.time_series.coords.items())
        }
        coords_dict['samplerate'] = self.time_series['samplerate']
        dims = [dim_name for dim_name in self.time_series.dims]
        filtered_time_series = TimeSeriesX(filtered_array,
                                           dims=dims,
                                           coords=coords_dict)

        # filtered_time_series = TimeSeriesX(filtered_time_series)
        filtered_time_series.attrs = self.time_series.attrs.copy()
        return filtered_time_series
Пример #5
0
    def filter(self):
        """
        Applies Butterwoth filter to input time series and returns filtered TimeSeriesX object

        Returns
        -------
        filtered: TimeSeriesX
            The filtered time series

        """
        time_axis_index = get_axis_index(self.time_series, axis_name='time')
        filtered_array = buttfilt(self.time_series,
                                  self.freq_range, float(self.time_series['samplerate']), self.filt_type,
                                  self.order, axis=time_axis_index)

        coords_dict = {coord_name: DataArray(coord.copy()) for coord_name, coord in list(self.time_series.coords.items())}
        coords_dict['samplerate'] = self.time_series['samplerate']
        dims = [dim_name for dim_name in self.time_series.dims]
        filtered_time_series = TimeSeriesX(
            filtered_array,
            dims=dims,
            coords=coords_dict
        )

        # filtered_time_series = TimeSeriesX(filtered_time_series)
        filtered_time_series.attrs = self.time_series.attrs.copy()
        return filtered_time_series
Пример #6
0
def test_concatenate():
    """make sure we can concatenate easily time series x - test it with rec
    array as one of the coords.

    This fails for xarray > 0.7. See https://github.com/pydata/xarray/issues/1434
    for details.

    """
    p1 = np.array([('John', 180), ('Stacy', 150), ('Dick',200)], dtype=[('name', '|S256'), ('height', int)])
    p2 = np.array([('Bernie', 170), ('Donald', 250), ('Hillary',150)], dtype=[('name', '|S256'), ('height', int)])

    data = np.arange(50, 80, 1, dtype=np.float)
    dims = ['measurement', 'participant']

    ts1 = TimeSeriesX.create(data.reshape(10, 3), None, dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p1,
                                 'samplerate': 1
                             })

    ts2 = TimeSeriesX.create(data.reshape(10, 3)*2, None, dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p2,
                                 'samplerate': 1
                             })

    combined = xr.concat((ts1, ts2), dim='participant')

    assert isinstance(combined, TimeSeriesX)
    assert (combined.participant.data['height'] ==
            np.array([180, 150, 200, 170, 250, 150])).all()
    assert (combined.participant.data['name'] ==
            np.array(['John', 'Stacy', 'Dick', 'Bernie', 'Donald', 'Hillary'])).all()
Пример #7
0
def test_append_simple():
    """Test appending without regard to dimensions."""
    points = 100
    data1 = np.random.random(points)
    data2 = np.random.random(points)
    coords1 = {'time': np.linspace(0, points, points)}
    coords2 = {'time': np.linspace(points, points*2, points)}
    dims = ["time"]
    samplerate = 10.

    # Base case: everything should Just Work
    ts1 = TimeSeriesX.create(data1, samplerate, coords=coords1, dims=dims)
    ts2 = TimeSeriesX.create(data2, samplerate, coords=coords2, dims=dims)
    combined = ts1.append(ts2)
    assert combined.samplerate == samplerate
    assert (combined.data == np.concatenate([data1, data2])).all()
    assert combined.dims == ts1.dims
    assert combined.dims == ts2.dims
    assert (combined.coords['time'] == np.concatenate([coords1['time'], coords2['time']])).all()

    # Incompatible sample rates
    ts1 = TimeSeriesX.create(data1, samplerate, coords=coords1, dims=dims)
    ts2 = TimeSeriesX.create(data2, samplerate + 1, coords=coords2, dims=dims)
    with pytest.raises(ConcatenationError):
        ts1.append(ts2)
Пример #8
0
def test_hdf(tempdir):
    """Test saving/loading with HDF5."""
    data = np.random.random((10, 10, 10, 10))
    dims = ('time', 'x', 'y', 'z')
    coords = {label: np.linspace(0, 1, 10) for label in dims}
    rate = 1

    ts = TimeSeriesX.create(data, rate, coords=coords, dims=dims, name="test")

    filename = osp.join(tempdir, "timeseries.h5")
    ts.to_hdf(filename)

    with h5py.File(filename, 'r') as hfile:
        assert "data" in hfile
        assert "dims" in hfile
        assert "coords" in hfile
        assert "name" in list(hfile['/'].attrs.keys())
        assert "ptsa_version" in hfile.attrs
        assert "created" in hfile.attrs

    loaded = TimeSeriesX.from_hdf(filename)
    assert (loaded.data == data).all()
    for coord in loaded.coords:
        assert (loaded.coords[coord] == ts.coords[coord]).all()
    for n, dim in enumerate(dims):
        assert loaded.dims[n] == dim
    assert loaded.name == "test"

    ts_with_attrs = TimeSeriesX.create(data, rate, coords=coords, dims=dims,
                                       name="test", attrs=dict(a=1, b=[1, 2]))
    ts_with_attrs.to_hdf(filename)
    loaded = TimeSeriesX.from_hdf(filename)
    for key in ts_with_attrs.attrs:
        assert ts_with_attrs.attrs[key] == loaded.attrs[key]
Пример #9
0
def test_coords_ops():
    data = np.arange(1000).reshape(10, 10, 10)

    ts_1 = TimeSeriesX.create(data,
                              None,
                              dims=['x', 'y', 'z'],
                              coords={
                                  'x': np.arange(10),
                                  'y': np.arange(10),
                                  'z': np.arange(10) * 2,
                                  'samplerate': 1
                              })
    ts_2 = TimeSeriesX.create(data,
                              None,
                              dims=['x', 'y', 'z'],
                              coords={
                                  'x': np.arange(10),
                                  'y': np.arange(10),
                                  'z': np.arange(10),
                                  'samplerate': 1
                              })
    ts_out = ts_1 + ts_2
    assert ts_out.z.shape[0] == 5

    ts_out_1 = ts_1 + ts_2[..., ::2]

    assert (ts_out_1 == ts_out).all()

    ts_out_2 = ts_2[..., 1::2] + ts_2[..., ::2]

    assert ts_out_2.shape[-1] == 0

    ts_out_3 = ts_2[..., [0, 2, 3, 4, 8]] + ts_2[..., [3, 4, 8, 9]]

    assert (ts_out_3.z.data == np.array([3, 4, 8])).all()
Пример #10
0
def test_mean():
    """tests various ways to compute mean - collapsing different combination of axes"""
    data = np.arange(100).reshape(10,10)
    ts_1 = TimeSeriesX.create(data, None, dims=['x','y'], coords={'x':np.arange(10)*2,
                                                                'y':np.arange(10),
                                                                    'samplerate': 1})
    grand_mean = ts_1.mean()

    assert grand_mean == 49.5

    x_mean  = ts_1.mean(dim='x')
    assert (x_mean == np.arange(45,55,1, dtype=np.float)).all()
    # checking axes
    assert(ts_1.y == x_mean.y).all()

    y_mean = ts_1.mean(dim='y')
    assert (y_mean == np.arange(4.5,95,10, dtype=np.float)).all()
    # checking axes
    assert (y_mean.x == ts_1.x).all()

    # test mean NaN
    data_2 = np.arange(100, dtype=np.float).reshape(10,10)
    np.fill_diagonal(data_2,np.NaN)
    # data_2[9,9] = 99


    ts_2 = TimeSeriesX.create(data_2, None, dims=['x','y'], coords={'x':np.arange(10)*2,
                                                                'y':np.arange(10),
                                                                    'samplerate': 1})

    grand_mean = ts_2.mean(skipna=True)
    assert grand_mean == 49.5
Пример #11
0
    def read_session_data(self):
        """
        Reads entire session worth of data

        :return: TimeSeriesX object (channels x events x time) with data for entire session the events dimension has length 1
        """
        brr = self.READER_FILETYPE_DICT[self.session_dataroot](
            dataroot=self.session_dataroot, channels=self.channels)
        session_array, read_ok_mask = brr.read()
        self.channel_name = brr.channel_name

        offsets_axis = session_array['offsets']
        number_of_time_points = offsets_axis.shape[0]
        samplerate = float(session_array['samplerate'])
        physical_time_array = np.arange(number_of_time_points) * (1.0 /
                                                                  samplerate)

        # session_array = session_array.rename({'start_offsets': 'events'})

        session_time_series = TimeSeriesX(
            session_array.values,
            dims=[self.channel_name, 'start_offsets', 'time'],
            coords={
                self.channel_name: session_array[self.channel_name],
                'start_offsets': session_array['start_offsets'],
                'time': physical_time_array,
                'offsets': ('time', session_array['offsets']),
                'samplerate': session_array['samplerate']
            })
        session_time_series.attrs = session_array.attrs.copy()
        session_time_series.attrs['dataroot'] = self.session_dataroot

        return session_time_series
Пример #12
0
def test_samplerate_prop():
    data = np.arange(1000).reshape(10,10,10)
    rate = 1000

    ts_1 = TimeSeriesX.create(data, None, coords={'samplerate': 1})
    ts_2 = TimeSeriesX.create(data, None, coords={'samplerate': 2})

    with pytest.raises(AssertionError):
        ts_out = ts_1 + ts_2
Пример #13
0
def test_addition(i, j, k, expected):
    data = np.arange(1000).reshape(10, 10, 10)
    rate = 1000

    ts_1 = TimeSeriesX.create(data, None, coords={'samplerate': 1})
    ts_2 = TimeSeriesX.create(data, None, coords={'samplerate': 1})

    ts_out = ts_1 + ts_2
    assert ts_out[i, j, k] == expected
Пример #14
0
def test_samplerate_prop():
    data = np.arange(1000).reshape(10, 10, 10)
    rate = 1000

    ts_1 = TimeSeriesX.create(data, None, coords={'samplerate': 1})
    ts_2 = TimeSeriesX.create(data, None, coords={'samplerate': 2})

    with pytest.raises(AssertionError):
        ts_out = ts_1 + ts_2
Пример #15
0
def test_addition(i, j, k, expected):
    data = np.arange(1000).reshape(10,10,10)
    rate = 1000

    ts_1 = TimeSeriesX.create(data, None, coords={'samplerate': 1})
    ts_2 = TimeSeriesX.create(data, None, coords={'samplerate': 1})

    ts_out = ts_1 + ts_2
    assert ts_out[i,j,k] == expected
Пример #16
0
def test_arithmetic_operations():
    data = np.arange(1000).reshape(10,10,10)
    rate = 1000

    ts_1 =  TimeSeriesX.create(data, None, coords={'samplerate': 1})
    ts_2 =  TimeSeriesX.create(data, None, coords={'samplerate': 1})

    ts_out = ts_1 + ts_2

    print('ts_out=', ts_out)
Пример #17
0
def test_arithmetic_operations():
    data = np.arange(1000).reshape(10, 10, 10)
    rate = 1000

    ts_1 = TimeSeriesX.create(data, None, coords={'samplerate': 1})
    ts_2 = TimeSeriesX.create(data, None, coords={'samplerate': 1})

    ts_out = ts_1 + ts_2

    print('ts_out=', ts_out)
Пример #18
0
    def filter(self):
        """
        Turns time series for monopolar electrodes into time series where where 'channels' axis is replaced by
        'bipolar_pairs' axis and the time series data is a difference
        between time series corresponding to different electrodes as specified by bipolar pairs

        :return: TimeSeriesX object
        """

        # a = np.arange(20)*2
        #
        # template = [2,4,6,6,8,2,4]
        #
        # sorter = np.argsort(a)
        # idx = sorter[np.searchsorted(a, template, sorter=sorter)]

        # idx = np.where(a == 6)

        #
        # print ch0
        #
        # print ch1
        channel_axis = self.time_series['channels']

        ch0 = self.bipolar_pairs['ch0']
        ch1 = self.bipolar_pairs['ch1']

        sel0 = channel_axis.loc[ch0]
        sel1 = channel_axis.loc[ch1]

        ts0 = self.time_series.loc[dict(channels=sel0)]
        ts1 = self.time_series.loc[dict(channels=sel1)]

        dims_bp = list(self.time_series.dims)
        channels_idx = dims_bp.index('channels')
        dims_bp[channels_idx] = 'bipolar_pairs'

        # coords_bp = [self.time_series[dim_name].copy() for dim_name in self.time_series.dims]
        # coords_bp[channels_idx] = self.bipolar_pairs

        coords_bp = {
            coord_name: coord
            for coord_name, coord in self.time_series.coords.items()
        }
        del coords_bp['channels']
        coords_bp['bipolar_pairs'] = self.bipolar_pairs

        ts = TimeSeriesX(data=ts0.values - ts1.values,
                         dims=dims_bp,
                         coords=coords_bp)
        ts['samplerate'] = self.time_series['samplerate']

        ts.attrs = self.time_series.attrs.copy()
        return ts
Пример #19
0
def create_time_seriesX_from_superEEG(data, samplerate=500.0):
    times = {'time': np.arange(data.data.shape[0]) * 1000.0 / samplerate}
    samplerate = {'samplerate': samplerate}
    mni_coords = data.locs.to_dict('series')
    channels = {'channels': np.arange(data.data.shape[1])}
    coords = {**times, **channels, **samplerate}
    data = TimeSeriesX(data.data, coords=coords, dims=['time', 'channels'])
    data.attrs['x'] = mni_coords['x']
    data.attrs['y'] = mni_coords['y']
    data.attrs['z'] = mni_coords['z']
    return data
Пример #20
0
    def filter(self):
        """
        Turns time series for monopolar electrodes into time series where where 'channels' axis is replaced by
        'bipolar_pairs' axis and the time series data is a difference
        between time series corresponding to different electrodes as specified by bipolar pairs

        :return: TimeSeriesX object
        """

        # a = np.arange(20)*2
        #
        # template = [2,4,6,6,8,2,4]
        #
        # sorter = np.argsort(a)
        # idx = sorter[np.searchsorted(a, template, sorter=sorter)]


        # idx = np.where(a == 6)

        #
        # print ch0
        #
        # print ch1
        channel_axis = self.time_series['channels']

        ch0 = self.bipolar_pairs['ch0']
        ch1 = self.bipolar_pairs['ch1']

        sel0 = channel_axis.loc[ch0]
        sel1 = channel_axis.loc[ch1]

        ts0 = self.time_series.loc[dict(channels=sel0)]
        ts1 = self.time_series.loc[dict(channels=sel1)]

        dims_bp = list(self.time_series.dims)
        channels_idx = dims_bp.index('channels')
        dims_bp[channels_idx] = 'bipolar_pairs'

        # coords_bp = [self.time_series[dim_name].copy() for dim_name in self.time_series.dims]
        # coords_bp[channels_idx] = self.bipolar_pairs

        coords_bp = {coord_name:coord for coord_name, coord in list(self.time_series.coords.items())}
        del coords_bp['channels']
        coords_bp['bipolar_pairs'] = self.bipolar_pairs


        ts = TimeSeriesX(data=ts0.values - ts1.values, dims=dims_bp,coords=coords_bp)
        ts['samplerate'] = self.time_series['samplerate']

        ts.attrs = self.time_series.attrs.copy()
        return ts
Пример #21
0
    def build_output_arrays(self, wavelet_pow_array, wavelet_phase_array,
                            time_axis):
        wavelet_pow_array_xray = None
        wavelet_phase_array_xray = None
        if isinstance(self.time_series, xr.DataArray):

            dims = list(self.time_series.dims[:-1] + (
                'frequency',
                'time',
            ))

            transposed_dims = []

            # NOTE all computaitons up till this point assume that frequency position is -2 whereas
            # the default setting for this filter sets frequency axis index to 0. To avoid unnecessary transpositions
            # we need to adjust position of the frequency axis in the internal computations

            # getting frequency dim position as positive integer
            self.frequency_dim_pos = (len(dims) +
                                      self.frequency_dim_pos) % len(dims)
            orig_frequency_idx = dims.index('frequency')

            if self.frequency_dim_pos != orig_frequency_idx:
                transposed_dims = dims[:orig_frequency_idx] + dims[
                    orig_frequency_idx + 1:]
                transposed_dims.insert(self.frequency_dim_pos, 'frequency')

            coords = {
                dim_name: self.time_series.coords[dim_name]
                for dim_name in self.time_series.dims[:-1]
            }
            coords['frequency'] = self.freqs
            coords['time'] = time_axis
            if 'samplerate' not in coords:
                coords['samplerate'] = self.time_series.coords['samplerate']

            if 'offsets' in list(self.time_series.coords.keys()):
                coords['offsets'] = ('time', self.time_series['offsets'])

            if wavelet_pow_array is not None:
                wavelet_pow_array_xray = TimeSeriesX(wavelet_pow_array,
                                                     coords=coords,
                                                     dims=dims)
                if len(transposed_dims):
                    wavelet_pow_array_xray = wavelet_pow_array_xray.transpose(
                        *transposed_dims)

                wavelet_pow_array_xray.attrs = self.time_series.attrs.copy()

            if wavelet_phase_array is not None:
                wavelet_phase_array_xray = TimeSeriesX(wavelet_phase_array,
                                                       coords=coords,
                                                       dims=dims)
                if len(transposed_dims):
                    wavelet_phase_array_xray = wavelet_phase_array_xray.transpose(
                        *transposed_dims)

                wavelet_phase_array_xray.attrs = self.time_series.attrs.copy()

            return wavelet_pow_array_xray, wavelet_phase_array_xray
Пример #22
0
def test_append_recarray():
    """Test appending along a dimension with a recarray."""
    p1 = np.array([('John', 180), ('Stacy', 150), ('Dick',200)], dtype=[('name', '|S256'), ('height', int)])
    p2 = np.array([('Bernie', 170), ('Donald', 250), ('Hillary',150)], dtype=[('name', '|S256'), ('height', int)])

    data = np.arange(50, 80, 1, dtype=np.float)
    dims = ['measurement', 'participant']

    ts1 = TimeSeriesX.create(data.reshape(10, 3), None, dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p1,
                                 'samplerate': 1
                             })

    ts2 = TimeSeriesX.create(data.reshape(10, 3)*2, None, dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p2,
                                 'samplerate': 1
                             })

    ts3 = TimeSeriesX.create(data.reshape(10, 3)*2, None, dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p2,
                                 'samplerate': 2
                             })

    ts4 = TimeSeriesX.create(data.reshape(10, 3)*2, None, dims=dims,
                             coords={
                                 'measurement': np.linspace(0, 1, 10),
                                 'participant': p2,
                                 'samplerate': 2
                             })

    combined = ts1.append(ts2, dim='participant')

    assert isinstance(combined, TimeSeriesX)
    assert (combined.participant.data['height'] == np.array([180, 150, 200, 170, 250, 150])).all()
    names = np.array([b'John', b'Stacy', b'Dick', b'Bernie', b'Donald', b'Hillary'])
    assert (combined.participant.data['name'] == names).all()

    # incompatible sample rates
    with pytest.raises(ConcatenationError):
        ts1.append(ts3)

    # incompatible other dimensions (measurement)
    with pytest.raises(ConcatenationError):
        ts1.append(ts4)
Пример #23
0
    def filter(self):
        """
        Chops session into chunks corresponding to events
        :return: timeSeriesX object with chopped session
        """
        chop_on_start_offsets_flag = bool(len(self.start_offsets))

        if chop_on_start_offsets_flag:

            start_offsets = self.start_offsets
            chopping_axis_name = 'start_offsets'
            chopping_axis_data = start_offsets
        else:

            evs = self.events[self.events.eegfile == self.session_data.attrs['dataroot']]
            start_offsets = evs.eegoffset
            chopping_axis_name = 'events'
            chopping_axis_data = evs


        # samplerate = self.session_data.attrs['samplerate']
        samplerate = float(self.session_data['samplerate'])
        offset_time_array = self.session_data['offsets']

        event_chunk_size, start_point_shift = self.get_event_chunk_size_and_start_point_shift(
        eegoffset=start_offsets[0],
        samplerate=samplerate,
        offset_time_array=offset_time_array)


        event_time_axis = np.arange(event_chunk_size)*(1.0/samplerate)+(self.start_time-self.buffer_time)

        data_list = []

        for i, eegoffset in enumerate(start_offsets):

            start_chop_pos = np.where(offset_time_array >= eegoffset)[0][0]
            start_chop_pos += start_point_shift
            selector_array = np.arange(start=start_chop_pos, stop=start_chop_pos + event_chunk_size)

            chopped_data_array = self.session_data.isel(time=selector_array)

            chopped_data_array['time'] = event_time_axis
            chopped_data_array['start_offsets'] = [i]

            data_list.append(chopped_data_array)

        ev_concat_data = xr.concat(data_list, dim='start_offsets')


        ev_concat_data = ev_concat_data.rename({'start_offsets':chopping_axis_name})
        ev_concat_data[chopping_axis_name] = chopping_axis_data

        attrs = {
            "start_time": self.start_time,
            "end_time": self.end_time,
            "buffer_time": self.buffer_time
        }
        ev_concat_data['samplerate'] = samplerate
        return TimeSeriesX.create(ev_concat_data, samplerate, attrs=attrs)
Пример #24
0
    def filter(self):
        """
        Chops session into chunks orresponding to events
        :return: timeSeriesX object with chopped session
        """
        chop_on_start_offsets_flag = bool(len(self.start_offsets))

        if chop_on_start_offsets_flag:

            start_offsets = self.start_offsets
            chopping_axis_name = 'start_offsets'
            chopping_axis_data = start_offsets
        else:

            evs = self.events[self.events.eegfile == self.session_data.attrs['dataroot']]
            start_offsets = evs.eegoffset
            chopping_axis_name = 'events'
            chopping_axis_data = evs


        # samplerate = self.session_data.attrs['samplerate']
        samplerate = float(self.session_data['samplerate'])
        offset_time_array = self.session_data['offsets']

        event_chunk_size, start_point_shift = self.get_event_chunk_size_and_start_point_shift(
        eegoffset=start_offsets[0],
        samplerate=samplerate,
        offset_time_array=offset_time_array)


        event_time_axis = np.arange(event_chunk_size)*(1.0/samplerate)+(self.start_time-self.buffer_time)

        data_list = []

        for i, eegoffset in enumerate(start_offsets):

            start_chop_pos = np.where(offset_time_array >= eegoffset)[0][0]
            start_chop_pos += start_point_shift
            selector_array = np.arange(start=start_chop_pos, stop=start_chop_pos + event_chunk_size)

            chopped_data_array = self.session_data.isel(time=selector_array)

            chopped_data_array['time'] = event_time_axis
            chopped_data_array['start_offsets'] = [i]

            data_list.append(chopped_data_array)

        ev_concat_data = xr.concat(data_list, dim='start_offsets')


        ev_concat_data = ev_concat_data.rename({'start_offsets':chopping_axis_name})
        ev_concat_data[chopping_axis_name] = chopping_axis_data

        # ev_concat_data.attrs['samplerate'] = samplerate
        ev_concat_data['samplerate'] = samplerate
        ev_concat_data.attrs['start_time'] = self.start_time
        ev_concat_data.attrs['end_time'] = self.end_time
        ev_concat_data.attrs['buffer_time'] = self.buffer_time
        return TimeSeriesX(ev_concat_data)
    def build_output_arrays(self, wavelet_pow_array, wavelet_phase_array,
                            time_axis):
        wavelet_pow_array_xray = None
        wavelet_phase_array_xray = None

        if isinstance(self.time_series, xr.DataArray):

            dims = list(self.time_series.dims[:-1] + (
                'frequency',
                'time',
            ))

            transposed_dims = []

            # getting frequency dim position as positive integer
            self.frequency_dim_pos = (len(dims) +
                                      self.frequency_dim_pos) % len(dims)
            orig_frequency_idx = dims.index('frequency')

            if self.frequency_dim_pos != orig_frequency_idx:
                transposed_dims = dims[:orig_frequency_idx] + dims[
                    orig_frequency_idx + 1:]
                transposed_dims.insert(self.frequency_dim_pos, 'frequency')

            coords = {
                dim_name: self.time_series.coords[dim_name]
                for dim_name in self.time_series.dims[:-1]
            }
            coords['frequency'] = self.freqs
            coords['time'] = time_axis

            if wavelet_pow_array is not None:
                wavelet_pow_array_xray = self.construct_output_array(
                    wavelet_pow_array, dims=dims, coords=coords)
            if wavelet_phase_array is not None:
                wavelet_phase_array_xray = self.construct_output_array(
                    wavelet_phase_array, dims=dims, coords=coords)

            if wavelet_pow_array_xray is not None:
                wavelet_pow_array_xray = TimeSeriesX(wavelet_pow_array_xray)
                if len(transposed_dims):
                    wavelet_pow_array_xray = wavelet_pow_array_xray.transpose(
                        *transposed_dims)

                wavelet_pow_array_xray.attrs = self.time_series.attrs.copy()

            if wavelet_phase_array_xray is not None:
                wavelet_phase_array_xray = TimeSeriesX(
                    wavelet_phase_array_xray)
                if len(transposed_dims):
                    wavelet_phase_array_xray = wavelet_phase_array_xray.transpose(
                        *transposed_dims)

                wavelet_phase_array_xray.attrs = self.time_series.attrs.copy()

            return wavelet_pow_array_xray, wavelet_phase_array_xray
Пример #26
0
def test_init():
    """Test that everything is initialized properly."""
    data = np.random.random((10, 10, 10))
    rate = 1000

    with pytest.raises(AssertionError):
        TimeSeriesX(data, {})

    with pytest.raises(AssertionError):
        TimeSeriesX.create(data, None, coords={})

    assert TimeSeriesX.create(data, None, coords={'samplerate': 1}).samplerate == 1

    ts = TimeSeriesX(data, dict(samplerate=rate))
    assert isinstance(ts, xr.DataArray)
    assert ts.shape == (10, 10, 10)
    assert ts['samplerate'] == rate
Пример #27
0
    def filter(self):

        event_data_dict = OrderedDict()

        for eegfile_name, data in self.data_dict.items():

            evs = self.events[self.events.eegfile == eegfile_name]

            samplerate = data.attrs['samplerate']

            # used in constructing time_axis
            offset_time_array = data['time'].values['eegoffset']

            event_chunk_size, start_point_shift = self.get_event_chunk_size_and_start_point_shift(ev=evs[0],
                                                                                                  samplerate=samplerate,
                                                                                                  offset_time_array=offset_time_array)

            event_time_axis = np.linspace(-self.buffer + self.time_shift,
                                          self.event_duration + self.buffer + self.time_shift,
                                          event_chunk_size)

            data_list = []

            shape = None

            for i, ev in enumerate(evs):
                # print ev.eegoffset
                start_chop_pos = np.where(offset_time_array >= ev.eegoffset)[0][0]
                start_chop_pos += start_point_shift
                selector_array = np.arange(start=start_chop_pos, stop=start_chop_pos + event_chunk_size)

                # ev_array = eeg_session_data[:,:,selector_array] # ORIG CODE

                chopped_data_array = data.isel(time=selector_array)

                chopped_data_array['time'] = event_time_axis
                chopped_data_array['events'] = [i]

                data_list.append(chopped_data_array)

                # print i

            ev_concat_data = xray.concat(data_list, dim='events')

            # replacing simple events axis (consecutive integers) with recarray of events
            ev_concat_data['events'] = evs

            ev_concat_data.attrs['samplerate'] = samplerate
            ev_concat_data.attrs['time_shift'] = self.time_shift
            ev_concat_data.attrs['event_duration'] = self.event_duration
            ev_concat_data.attrs['buffer'] = self.buffer

            event_data_dict[eegfile_name] = TimeSeriesX(ev_concat_data)

            break  # REMOVE THIS

        return event_data_dict
Пример #28
0
class ButterworthFilter(PropertiedObject, BaseFilter):
    """Applies Butterworth filter to a time series.

    Keyword Arguments
    -----------------

    time_series
         TimeSeriesX object
    order
         Butterworth filter order
    freq_range: list-like
       Array [min_freq, max_freq] describing the filter range

    """

    _descriptors = [
        TypeValTuple('time_series', TimeSeriesX,
                     TimeSeriesX([0.0], dict(samplerate=1.), dims=['time'])),
        TypeValTuple('order', int, 4),
        TypeValTuple('freq_range', list, [58, 62]),
        TypeValTuple('filt_type', str, 'stop'),
    ]

    def __init__(self, **kwds):
        self.init_attrs(kwds)

    def filter(self):
        """
        Applies Butterwoth filter to input time series and returns filtered TimeSeriesX object

        Returns
        -------
        filtered: TimeSeriesX
            The filtered time series

        """
        time_axis_index = get_axis_index(self.time_series, axis_name='time')
        filtered_array = buttfilt(self.time_series,
                                  self.freq_range,
                                  float(self.time_series['samplerate']),
                                  self.filt_type,
                                  self.order,
                                  axis=time_axis_index)

        coords_dict = {
            coord_name: DataArray(coord.copy())
            for coord_name, coord in list(self.time_series.coords.items())
        }
        coords_dict['samplerate'] = self.time_series['samplerate']
        dims = [dim_name for dim_name in self.time_series.dims]
        filtered_time_series = TimeSeriesX(filtered_array,
                                           dims=dims,
                                           coords=coords_dict)

        # filtered_time_series = TimeSeriesX(filtered_time_series)
        filtered_time_series.attrs = self.time_series.attrs.copy()
        return filtered_time_series
Пример #29
0
def test_baseline_corrected():
    t = np.linspace(0, 10, 100)
    values = np.array([1] * 50 + [2] * 50)
    coords = {"time": t}
    ts = TimeSeriesX.create(values, 10., coords, dims=("time", ))
    corrected = ts.baseline_corrected((0, 5))
    assert all(ts['time'] == corrected['time'])
    assert ts['samplerate'] == corrected['samplerate']
    assert all(corrected.data[:50] == 0)
    assert all(corrected.data[50:] == 1)
Пример #30
0
def test_resampled():
    ts = TimeSeriesX.create(np.linspace(0, 100, 100), 10., dims=['time'])

    resampled = ts.resampled(20.)
    assert resampled.data.shape == (200, )
    assert resampled['samplerate'] == 20

    resampled = ts.resampled(5)
    assert resampled.data.shape == (50, )
    assert resampled['samplerate'] == 5
Пример #31
0
def test_init():
    """Test that everything is initialized properly."""
    data = np.random.random((10, 10, 10))
    rate = 1000

    with pytest.raises(AssertionError):
        TimeSeriesX(data, {})

    with pytest.raises(AssertionError):
        TimeSeriesX.create(data, None, coords={})

    assert TimeSeriesX.create(data, None, coords={
        'samplerate': 1
    }).samplerate == 1

    ts = TimeSeriesX(data, dict(samplerate=rate))
    assert isinstance(ts, xr.DataArray)
    assert ts.shape == (10, 10, 10)
    assert ts['samplerate'] == rate
Пример #32
0
def test_baseline_corrected():
    t = np.linspace(0, 10, 100)
    values = np.array([1]*50 + [2]*50)
    coords = {"time": t}
    ts = TimeSeriesX.create(values, 10., coords, dims=("time",))
    corrected = ts.baseline_corrected((0, 5))
    assert all(ts['time'] == corrected['time'])
    assert ts['samplerate'] == corrected['samplerate']
    assert all(corrected.data[:50] == 0)
    assert all(corrected.data[50:] == 1)
Пример #33
0
def test_resampled():
    ts = TimeSeriesX.create(np.linspace(0, 100, 100), 10., dims=['time'])

    resampled = ts.resampled(20.)
    assert resampled.data.shape == (200,)
    assert resampled['samplerate'] == 20

    resampled = ts.resampled(5)
    assert resampled.data.shape == (50,)
    assert resampled['samplerate'] == 5
Пример #34
0
def concat_time_seriesX(data_vec):
    n_events = len(data_vec)
    times = {'time': data_vec[0]['time'].values}
    channels = {'channels': data_vec[0]['channels'].values}
    samplerate = {'samplerate': data_vec[0]['samplerate']}
    events = {'event': np.arange(n_events)}
    coords = {**times, **channels, **samplerate, **events}

    data_vec_values = [x.values for x in data_vec]
    data_array = np.stack(data_vec_values)
    data = TimeSeriesX(data_array,
                       coords=coords,
                       dims=['event', 'time', 'channels'])

    data.attrs['x'] = data_vec[0].attrs['x']
    data.attrs['y'] = data_vec[0].attrs['y']
    data.attrs['z'] = data_vec[0].attrs['z']

    return data
Пример #35
0
def test_concatenate():
    """make sure we can concatenate easily time series x - test it with rec
    array as one of the coords.

    This fails for xarray > 0.7. See https://github.com/pydata/xarray/issues/1434
    for details.

    """
    p1 = np.array([('John', 180), ('Stacy', 150), ('Dick', 200)],
                  dtype=[('name', '|S256'), ('height', int)])
    p2 = np.array([('Bernie', 170), ('Donald', 250), ('Hillary', 150)],
                  dtype=[('name', '|S256'), ('height', int)])

    data = np.arange(50, 80, 1, dtype=np.float)
    dims = ['measurement', 'participant']

    ts1 = TimeSeriesX.create(data.reshape(10, 3),
                             None,
                             dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p1,
                                 'samplerate': 1
                             })

    ts2 = TimeSeriesX.create(data.reshape(10, 3) * 2,
                             None,
                             dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p2,
                                 'samplerate': 1
                             })

    combined = xr.concat((ts1, ts2), dim='participant')

    assert isinstance(combined, TimeSeriesX)
    assert (combined.participant.data['height'] == np.array(
        [180, 150, 200, 170, 250, 150])).all()
    assert (combined.participant.data['name'] == np.array(
        ['John', 'Stacy', 'Dick', 'Bernie', 'Donald', 'Hillary'])).all()
Пример #36
0
def test_mean():
    """tests various ways to compute mean - collapsing different combination of axes"""
    data = np.arange(100).reshape(10, 10)
    ts_1 = TimeSeriesX.create(data,
                              None,
                              dims=['x', 'y'],
                              coords={
                                  'x': np.arange(10) * 2,
                                  'y': np.arange(10),
                                  'samplerate': 1
                              })
    grand_mean = ts_1.mean()

    assert grand_mean == 49.5

    x_mean = ts_1.mean(dim='x')
    assert (x_mean == np.arange(45, 55, 1, dtype=np.float)).all()
    # checking axes
    assert (ts_1.y == x_mean.y).all()

    y_mean = ts_1.mean(dim='y')
    assert (y_mean == np.arange(4.5, 95, 10, dtype=np.float)).all()
    # checking axes
    assert (y_mean.x == ts_1.x).all()

    # test mean NaN
    data_2 = np.arange(100, dtype=np.float).reshape(10, 10)
    np.fill_diagonal(data_2, np.NaN)
    # data_2[9,9] = 99

    ts_2 = TimeSeriesX.create(data_2,
                              None,
                              dims=['x', 'y'],
                              coords={
                                  'x': np.arange(10) * 2,
                                  'y': np.arange(10),
                                  'samplerate': 1
                              })

    grand_mean = ts_2.mean(skipna=True)
    assert grand_mean == 49.5
Пример #37
0
class ButterworthFilter(PropertiedObject,BaseFilter):

    '''
    Applies Butterworth filter to a time series
    '''
    _descriptors = [
        TypeValTuple('time_series', TimeSeriesX, TimeSeriesX([0.0], dims=['time'])),
        TypeValTuple('order', int, 4),
        TypeValTuple('freq_range', list, [58, 62]),
        TypeValTuple('filt_type', str, 'stop'),
    ]

    def __init__(self, **kwds):
        '''
        Constructor
        :param kwds:allowed values are:
        -------------------------------------
        :param time_series  -  TimeSeriesX object
        :param order -  Butterworth filter order
        :param freq_range -  array of frequencies [min_freq, max_freq] to filter out
        :return: None
        '''
        self.init_attrs(kwds)

    def filter(self):
        '''
        Applies Butterwoth filter to input time series and returns filtered TimeSeriesX object
        :return: TimeSeriesX object
        '''

        from ptsa.filt import buttfilt

        time_axis_index = get_axis_index(self.time_series, axis_name='time')
        filtered_array = buttfilt(self.time_series,
                                  self.freq_range, float(self.time_series['samplerate']), self.filt_type,
                                  self.order, axis=time_axis_index)

        coords_dict = {coord_name: DataArray(coord.copy()) for coord_name, coord in self.time_series.coords.items()}
        coords_dict['samplerate'] = self.time_series['samplerate']
        dims = [dim_name for dim_name in self.time_series.dims]
        filtered_time_series = TimeSeriesX(
            filtered_array,
            dims=dims,
            coords=coords_dict
        )

        # filtered_time_series.attrs['samplerate'] = self.time_series.attrs['samplerate']
        # filtered_time_series.attrs['samplerate'] = self.time_series['samplerate']
        filtered_time_series = TimeSeriesX(filtered_time_series)

        return filtered_time_series
Пример #38
0
    def test_wavelets_synthetic_data(self):
        samplerate = 1000.
        frequency = 180.0
        modulation_frequency = 80.0

        duration = 1.0

        n_points = int(np.round(duration * samplerate))
        x = np.arange(n_points, dtype=np.float)
        y = np.sin(x * (2 * np.pi * frequency / n_points))
        y_mod = np.sin(x * (2 * np.pi * frequency / n_points)) * np.sin(
            x * (2 * np.pi * modulation_frequency / n_points))

        ts = TimeSeriesX(y, dims=['time'], coords=[x])
        ts['samplerate'] = samplerate
        ts.attrs['samplerate'] = samplerate

        frequencies = [10.0, 30.0, 50.0, 80., 120., 180., 250.0, 300.0, 500.]
        for frequency in frequencies:
            wf = MorletWaveletFilter(time_series=ts,
                                     freqs=np.array([frequency]),
                                     output='both',
                                     frequency_dim_pos=0,
                                     verbose=True)

            pow_wavelet, phase_wavelet = wf.filter()

            from ptsa.wavelet import phase_pow_multi

            pow_wavelet_ptsa_orig = phase_pow_multi(freqs=[frequency],
                                                    samplerates=samplerate,
                                                    dat=ts.data,
                                                    to_return='power')

            assert_array_almost_equal(
                (pow_wavelet_ptsa_orig - pow_wavelet) / pow_wavelet_ptsa_orig,
                np.zeros_like(pow_wavelet),
                decimal=6)
Пример #39
0
def test_hdf(tempdir):
    """Test saving/loading with HDF5."""
    data = np.random.random((10, 10, 10, 10))
    dims = ('time', 'x', 'y', 'z')
    coords = {label: np.linspace(0, 1, 10) for label in dims}
    rate = 1

    ts = TimeSeriesX.create(data, rate, coords=coords, dims=dims, name="test")

    filename = osp.join(tempdir, "timeseries.h5")
    ts.to_hdf(filename)

    with h5py.File(filename, 'r') as hfile:
        assert "data" in hfile
        assert "dims" in hfile
        assert "coords" in hfile
        assert "name" in list(hfile['/'].attrs.keys())

    loaded = TimeSeriesX.from_hdf(filename)
    assert (loaded.data == data).all()
    for coord in loaded.coords:
        assert (loaded.coords[coord] == ts.coords[coord]).all()
    for n, dim in enumerate(dims):
        assert loaded.dims[n] == dim
    assert loaded.name == "test"

    ts_with_attrs = TimeSeriesX.create(data,
                                       rate,
                                       coords=coords,
                                       dims=dims,
                                       name="test",
                                       attrs=dict(a=1, b=[1, 2]))
    ts_with_attrs.to_hdf(filename)
    loaded = TimeSeriesX.from_hdf(filename)
    for key in ts_with_attrs.attrs:
        assert ts_with_attrs.attrs[key] == loaded.attrs[key]
Пример #40
0
    def test_wavelets_synthetic_data(self):
        samplerate = 1000.
        frequency = 180.0
        modulation_frequency = 80.0

        duration = 1.0

        n_points = int(np.round(duration*samplerate))
        x = np.arange(n_points, dtype=np.float)
        y = np.sin(x*(2*np.pi*frequency/n_points))
        y_mod = np.sin(x*(2*np.pi*frequency/n_points))* np.sin(x*(2*np.pi*modulation_frequency/n_points))

        ts = TimeSeriesX(y, dims=['time'], coords=[x])
        ts['samplerate']=samplerate
        ts.attrs['samplerate'] = samplerate

        frequencies = [ 10.0, 30.0, 50.0, 80., 120., 180., 250.0 , 300.0, 500.]
        for frequency  in frequencies:
            wf = MorletWaveletFilter(time_series=ts,
                                     freqs=np.array([frequency]),
                                     output='both',
                                     frequency_dim_pos=0,
                                     verbose=True
                                     )

            pow_wavelet, phase_wavelet = wf.filter()

            from ptsa.wavelet import phase_pow_multi


            pow_wavelet_ptsa_orig = phase_pow_multi(freqs=[frequency],samplerates=samplerate, dat=ts.data,to_return='power')


            assert_array_almost_equal(
                (pow_wavelet_ptsa_orig-pow_wavelet)/pow_wavelet_ptsa_orig,
                np.zeros_like(pow_wavelet), decimal=6)
Пример #41
0
def test_filtered():
    data = np.random.random(1000)
    dims = ['time']

    ts = TimeSeriesX.create(data, 10, dims=dims)

    # TODO: real test (i.e., actually care about the filtering)
    with warnings.catch_warnings(record=True) as w:
        new_ts = ts.filtered([1, 2])
        assert len(w) == 1
        assert ts['samplerate'] == new_ts['samplerate']
        assert all(ts.data != new_ts.data)
        for key, attr in ts.attrs.items():
            assert attr == new_ts[key]
        assert ts.name == new_ts.name
        assert ts.dims == new_ts.dims
Пример #42
0
def test_add_mirror_buffer():
    points = 100

    data = np.array([-1] * points + [1] * points)
    samplerate = 10.
    coords = {'time': np.linspace(-1, 1, points*2)}
    dims = ['time']
    ts = TimeSeriesX.create(data, samplerate, coords=coords, dims=dims)

    duration = 10
    buffered = ts.add_mirror_buffer(duration)
    assert len(buffered.data) == len(data) + 2 * duration * samplerate

    with pytest.raises(ValueError):
        # 100 s is longer than the length of data
        ts.add_mirror_buffer(100)
Пример #43
0
def test_filtered():
    data = np.random.random(1000)
    dims = ['time']

    ts = TimeSeriesX.create(data, 10, dims=dims)

    # TODO: real test (i.e., actually care about the filtering)
    with warnings.catch_warnings(record=True) as w:
        new_ts = ts.filtered([1, 2])
        assert len(w) == 1
        assert ts['samplerate'] == new_ts['samplerate']
        assert all(ts.data != new_ts.data)
        for key, attr in ts.attrs.items():
            assert attr == new_ts[key]
        assert ts.name == new_ts.name
        assert ts.dims == new_ts.dims
Пример #44
0
def test_add_mirror_buffer():
    points = 100

    data = np.array([-1] * points + [1] * points)
    samplerate = 10.
    coords = {'time': np.linspace(-1, 1, points * 2)}
    dims = ['time']
    ts = TimeSeriesX.create(data, samplerate, coords=coords, dims=dims)

    duration = 10
    buffered = ts.add_mirror_buffer(duration)
    assert len(buffered.data) == len(data) + 2 * duration * samplerate

    with pytest.raises(ValueError):
        # 100 s is longer than the length of data
        ts.add_mirror_buffer(100)
Пример #45
0
def test_remove_buffer():
    length = 100
    data = np.array([0]*length)
    samplerate = 10.
    coords = {'time': np.linspace(-1, 1, length)}
    dims = ['time']
    ts = TimeSeriesX.create(data, samplerate, coords=coords, dims=dims)

    with pytest.raises(ValueError):
        # We can't remove this much
        ts.remove_buffer(int(samplerate * length + 1))

    buffer_dur = 0.1
    buffered = ts.add_mirror_buffer(buffer_dur)
    unbuffered = buffered.remove_buffer(buffer_dur)

    assert len(unbuffered.data) == len(ts.data)
    assert (unbuffered.data == ts.data).all()
Пример #46
0
def test_remove_buffer():
    length = 100
    data = np.array([0] * length)
    samplerate = 10.
    coords = {'time': np.linspace(-1, 1, length)}
    dims = ['time']
    ts = TimeSeriesX.create(data, samplerate, coords=coords, dims=dims)

    with pytest.raises(ValueError):
        # We can't remove this much
        ts.remove_buffer(int(samplerate * length + 1))

    buffer_dur = 0.1
    buffered = ts.add_mirror_buffer(buffer_dur)
    unbuffered = buffered.remove_buffer(buffer_dur)

    assert len(unbuffered.data) == len(ts.data)
    assert (unbuffered.data == ts.data).all()
Пример #47
0
    def build_output_arrays(self, wavelet_pow_array, wavelet_phase_array, time_axis):
        wavelet_pow_array_xray = None
        wavelet_phase_array_xray = None

        if isinstance(self.time_series, xr.DataArray):

            dims = list(self.time_series.dims[:-1] + ('frequency', 'time',))

            transposed_dims = []

            # NOTE all computaitons up till this point assume that frequency position is -2 whereas
            # the default setting for this filter sets frequency axis index to 0. To avoid unnecessary transpositions
            # we need to adjust position of the frequency axis in the internal computations

            # getting frequency dim position as positive integer
            self.frequency_dim_pos = (len(dims) + self.frequency_dim_pos) % len(dims)
            orig_frequency_idx = dims.index('frequency')

            if self.frequency_dim_pos != orig_frequency_idx:
                transposed_dims = dims[:orig_frequency_idx] + dims[orig_frequency_idx + 1:]
                transposed_dims.insert(self.frequency_dim_pos, 'frequency')

            coords = {dim_name: self.time_series.coords[dim_name] for dim_name in self.time_series.dims[:-1]}
            coords['frequency'] = self.freqs
            coords['time'] = time_axis

            if 'offsets' in self.time_series.coords.keys():
                coords['offsets'] = ('time',  self.time_series['offsets'])


            if wavelet_pow_array is not None:
                wavelet_pow_array_xray = self.construct_output_array(wavelet_pow_array, dims=dims, coords=coords)
            if wavelet_phase_array is not None:
                wavelet_phase_array_xray = self.construct_output_array(wavelet_phase_array, dims=dims, coords=coords)

            if wavelet_pow_array_xray is not None:
                wavelet_pow_array_xray = TimeSeriesX(wavelet_pow_array_xray)
                if len(transposed_dims):
                    wavelet_pow_array_xray = wavelet_pow_array_xray.transpose(*transposed_dims)

                wavelet_pow_array_xray.attrs = self.time_series.attrs.copy()

            if wavelet_phase_array_xray is not None:
                wavelet_phase_array_xray = TimeSeriesX(wavelet_phase_array_xray)
                if len(transposed_dims):
                    wavelet_phase_array_xray = wavelet_phase_array_xray.transpose(*transposed_dims)

                wavelet_phase_array_xray.attrs = self.time_series.attrs.copy()

            return wavelet_pow_array_xray, wavelet_phase_array_xray
    def read_session(self, eegfile_name):
        samplesize = 1.0 / self.samplerate

        bin_reader = self.bin_readers_dict[eegfile_name]

        print 'reading ', eegfile_name

        start_offset = self.offset
        end_offset = -1
        if self.event_data_only:
            #reading continuous data containig events and small buffer
            start_offset, end_offset = self.determine_read_offset_range(eegfile_name)

        eegdata = bin_reader._load_all_data(channels=self.channels, start_offset=start_offset, end_offset=end_offset)


        # constructing time exis as record array [(session_time_in_sec,offset)]

        number_of_time_points = eegdata.shape[2]
        start_time = start_offset * samplesize
        end_time = start_time + number_of_time_points * samplesize

        time_range = np.linspace(start_time, end_time, number_of_time_points)
        eegoffset = np.arange(start_offset, start_offset+ number_of_time_points)

        time_axis = np.rec.fromarrays([time_range, eegoffset], names='time,eegoffset')

        # constructing xray Data Array with session eeg data - note we are adding event dimension to simplify
        # chopping of the data sample into events - single events will be concatenated allong events axis
        eegdata_xray = xray.DataArray(eegdata, coords=[self.channels, np.arange(1), time_axis],
                                      dims=['channels', 'events', 'time'])
        eegdata_xray.attrs['samplerate'] = self.samplerate

        print 'last_time_stamp=',eegdata_xray['time'][-1]

        return TimeSeriesX(eegdata_xray)
    def build_output_arrays(self, wavelet_pow_array, wavelet_phase_array, time_axis):
        wavelet_pow_array_xray = None
        wavelet_phase_array_xray = None

        if isinstance(self.time_series, xray.DataArray):

            dims = list(self.time_series.dims[:-1] + ("frequency", "time"))

            transposed_dims = []

            # getting frequency dim position as positive integer
            self.frequency_dim_pos = (len(dims) + self.frequency_dim_pos) % len(dims)
            orig_frequency_idx = dims.index("frequency")

            if self.frequency_dim_pos != orig_frequency_idx:
                transposed_dims = dims[:orig_frequency_idx] + dims[orig_frequency_idx + 1 :]
                transposed_dims.insert(self.frequency_dim_pos, "frequency")

            coords = {dim_name: self.time_series.coords[dim_name] for dim_name in self.time_series.dims[:-1]}
            coords["frequency"] = self.freqs
            coords["time"] = time_axis

            if wavelet_pow_array is not None:
                wavelet_pow_array_xray = self.construct_output_array(wavelet_pow_array, dims=dims, coords=coords)
            if wavelet_phase_array is not None:
                wavelet_phase_array_xray = self.construct_output_array(wavelet_phase_array, dims=dims, coords=coords)

            if wavelet_pow_array_xray is not None:
                wavelet_pow_array_xray = TimeSeriesX(wavelet_pow_array_xray)
                if len(transposed_dims):
                    wavelet_pow_array_xray = wavelet_pow_array_xray.transpose(*transposed_dims)

                wavelet_pow_array_xray.attrs = self.time_series.attrs.copy()

            if wavelet_phase_array_xray is not None:
                wavelet_phase_array_xray = TimeSeriesX(wavelet_phase_array_xray)
                if len(transposed_dims):
                    wavelet_phase_array_xray = wavelet_phase_array_xray.transpose(*transposed_dims)

                wavelet_phase_array_xray.attrs = self.time_series.attrs.copy()

            return wavelet_pow_array_xray, wavelet_phase_array_xray
Пример #50
0
    def read_events_data(self):
        """
        Reads eeg data for individual event

        :return: TimeSeriesX  object (channels x events x time) with data for individual events
        """
        self.event_ok_mask_sorted = None  # reset self.event_ok_mask_sorted

        evs = self.events

        raw_readers, original_dataroots = self.__create_base_raw_readers()

        # used for restoring original order of the events
        ordered_indices = np.arange(len(evs))
        event_indices_list = []
        events = []

        ts_array_list = []

        event_ok_mask_list = []


        for s, (raw_reader, dataroot) in enumerate(zip(raw_readers, original_dataroots)):

            ts_array, read_ok_mask = raw_reader.read()

            event_ok_mask_list.append(np.all(read_ok_mask,axis=0))

            ind = np.atleast_1d(evs.eegfile == dataroot)
            event_indices_list.append(ordered_indices[ind])
            events.append(evs[ind])


            ts_array_list.append(ts_array)


        if not all([r.channel_name==raw_readers[0].channel_name for r in raw_readers]):
            raise IncompatibleDataError('cannot read monopolar and bipolar data together')

        self.channel_name = raw_readers[0].channel_name
        # print('raw_reader_channel_names: \n%s'%[x.channel_name for x in raw_readers])
        # print('self.channel_name: %s'%self.channel_name)


        event_indices_array = np.hstack(event_indices_list)

        event_indices_restore_sort_order_array = event_indices_array.argsort()

        start_extend_time = time.time()
        # new code
        eventdata = xr.concat(ts_array_list, dim='start_offsets')
        # tdim = np.linspace(self.start_time-self.buffer_time,self.end_time+self.buffer_time,num=eventdata['offsets'].shape[0])
        # samplerate=eventdata.attrs['samplerate'].data
        samplerate = float(eventdata['samplerate'])
        tdim = np.arange(eventdata.shape[-1]) * (1.0 / samplerate) + (self.start_time - self.buffer_time)
        cdim = eventdata[self.channel_name]
        edim = np.concatenate(events).view(np.recarray).copy()

        attrs = eventdata.attrs.copy()
        # constructing TimeSeries Object
        # eventdata = TimeSeriesX(eventdata.data,dims=['channels','events','time'],coords=[cdim,edim,tdim])
        eventdata = TimeSeriesX(eventdata.data,
                                dims=[self.channel_name, 'events', 'time'],
                                coords={self.channel_name: cdim,
                                        'events': edim,
                                        'time': tdim,
                                        'samplerate': samplerate
                                        }
                                )

        eventdata.attrs = attrs

        # restoring original order of the events
        eventdata = eventdata[:, event_indices_restore_sort_order_array, :]

        event_ok_mask = np.hstack(event_ok_mask_list)
        event_ok_mask_sorted = event_ok_mask[event_indices_restore_sort_order_array]
        #removing bad events
        if np.any(~event_ok_mask_sorted):
            self.removed_corrupt_events=True
            self.event_ok_mask_sorted = event_ok_mask_sorted

        eventdata = eventdata[:, event_ok_mask_sorted, :]

        return eventdata
Пример #51
0
class DataChopper(PropertiedObject,BaseFilter):
    """
    EventDataChopper converts continuous time series of entire session into chunks based on the events specification
    In other words you may read entire eeg session first and then using EventDataChopper
    divide it into chunks corresponding to events of your choice
    """
    _descriptors = [

        TypeValTuple('start_time', float, 0.0),
        TypeValTuple('end_time', float, 0.0),
        TypeValTuple('buffer_time', float, 0.0),
        TypeValTuple('events', np.recarray, np.recarray((1,), dtype=[('x', int)])),
        TypeValTuple('start_offsets', np.ndarray, np.array([], dtype=int)),
        TypeValTuple('session_data', TimeSeriesX, TimeSeriesX([0.0], dims=['time'])),
    ]

    def __init__(self, **kwds):
        """
        Constructor:

        :param kwds:allowed values are:
        -------------------------------------
        :param start_time {float} -  read start offset in seconds w.r.t to the eegeffset specified in the events recarray
        :param end_time {float} -  read end offset in seconds w.r.t to the eegeffset specified in the events recarray
        :param end_time {float} -  extra buffer in seconds (subtracted from start read and added to end read)
        :param events {np.recarray} - numpy recarray representing events
        :param startoffsets {np.ndarray} - numpy array with offsets at which chopping should take place
        :param session_datar {str} -  TimeSeriesX object with eeg session data

        :return: None
        """

        self.init_attrs(kwds)

    def get_event_chunk_size_and_start_point_shift(self, eegoffset, samplerate, offset_time_array):
        """
        Computes number of time points for each event and read offset w.r.t. event's eegoffset
        :param ev: record representing single event
        :param samplerate: samplerate fo the time series
        :param offset_time_array: "offsets" axis of the DataArray returned by EEGReader. This is the axis that represents
        time axis but instead of beind dimensioned to seconds it simply represents position of a given data point in a series
        The time axis is constructed by dividint offsets axis by the samplerate
        :return: event's read chunk size {int}, read offset w.r.t. to event's eegoffset {}
        """
        # figuring out read size chunk and shift w.r.t to eegoffset. We need this fcn in case we pass resampled session data

        original_samplerate = float((offset_time_array[-1] - offset_time_array[0])) / offset_time_array.shape[
            0] * samplerate


        start_point = eegoffset - int(np.ceil((self.buffer_time - self.start_time) * original_samplerate))
        end_point = eegoffset + int(
            np.ceil((self.end_time + self.buffer_time) * original_samplerate))

        selector_array = np.where((offset_time_array >= start_point) & (offset_time_array < end_point))[0]
        start_point_shift = selector_array[0] - np.where((offset_time_array >= eegoffset))[0][0]

        return len(selector_array), start_point_shift


    def filter(self):
        """
        Chops session into chunks orresponding to events
        :return: timeSeriesX object with chopped session
        """
        chop_on_start_offsets_flag = bool(len(self.start_offsets))

        if chop_on_start_offsets_flag:

            start_offsets = self.start_offsets
            chopping_axis_name = 'start_offsets'
            chopping_axis_data = start_offsets
        else:

            evs = self.events[self.events.eegfile == self.session_data.attrs['dataroot']]
            start_offsets = evs.eegoffset
            chopping_axis_name = 'events'
            chopping_axis_data = evs


        # samplerate = self.session_data.attrs['samplerate']
        samplerate = float(self.session_data['samplerate'])
        offset_time_array = self.session_data['offsets']

        event_chunk_size, start_point_shift = self.get_event_chunk_size_and_start_point_shift(
        eegoffset=start_offsets[0],
        samplerate=samplerate,
        offset_time_array=offset_time_array)


        event_time_axis = np.arange(event_chunk_size)*(1.0/samplerate)+(self.start_time-self.buffer_time)

        data_list = []

        for i, eegoffset in enumerate(start_offsets):

            start_chop_pos = np.where(offset_time_array >= eegoffset)[0][0]
            start_chop_pos += start_point_shift
            selector_array = np.arange(start=start_chop_pos, stop=start_chop_pos + event_chunk_size)

            chopped_data_array = self.session_data.isel(time=selector_array)

            chopped_data_array['time'] = event_time_axis
            chopped_data_array['start_offsets'] = [i]

            data_list.append(chopped_data_array)

        ev_concat_data = xr.concat(data_list, dim='start_offsets')


        ev_concat_data = ev_concat_data.rename({'start_offsets':chopping_axis_name})
        ev_concat_data[chopping_axis_name] = chopping_axis_data

        # ev_concat_data.attrs['samplerate'] = samplerate
        ev_concat_data['samplerate'] = samplerate
        ev_concat_data.attrs['start_time'] = self.start_time
        ev_concat_data.attrs['end_time'] = self.end_time
        ev_concat_data.attrs['buffer_time'] = self.buffer_time
        return TimeSeriesX(ev_concat_data)
Пример #52
0
    def test_append(self):
        """make sure we can concatenate easily time series x - test it with rec array as one of the coords"""

        p_data_1 = np.array([('John', 180), ('Stacy', 150), ('Dick',200)], dtype=[('name', '|S256'), ('height', int)])

        p_data_2 = np.array([('Bernie', 170), ('Donald', 250), ('Hillary',150)], dtype=[('name', '|S256'), ('height', int)])


        weights_data  = np.arange(50,80,1,dtype=np.float)


        weights_ts_1 = TimeSeriesX.create(weights_data.reshape(10,3),
                                          None,
                                          dims=['measurement','participant'],
                                          coords={'measurement':np.arange(10),
                                                  'participant':p_data_1,
                                                  'samplerate': 1}
                                          )

        weights_ts_2 = TimeSeriesX.create(weights_data.reshape(10,3)*2,
                                          None,
                                          dims=['measurement','participant'],
                                          coords={'measurement':np.arange(10),
                                                  'participant':p_data_2,
                                                  'samplerate': 1}
                                          )


        weights_ts_3 = TimeSeriesX.create(weights_data.reshape(3,10)*2,
                                          None,
                                          dims=['participant','measurement'],
                                          coords={'measurement':np.arange(10),
                                                  'participant':p_data_2,
                                                  'samplerate': 1}
                                          )

        weights_ts_4 = TimeSeriesX.create(np.arange(50,83,1,dtype=np.float).reshape(11,3),
                                          None,
                                          dims=['measurement','participant'],
                                          coords={'measurement':np.arange(11),
                                                  'participant':p_data_2,
                                                  'samplerate': 1}
                                          )


        weights_ts_5 = TimeSeriesX.create(weights_data.reshape(10,3)*2,
                                          None,
                                          dims=['measurement','participant'],
                                          coords={'measurement':np.arange(10)*2,
                                                  'participant':p_data_2,
                                                  'samplerate': 1}
                                          )



        with self.assertRaises(ValueError) as context:
            weights_ts_1.append(dim='measurement', ts=np.arange(1000))
            self.assertTrue(isinstance(context.exception,ValueError))

        with self.assertRaises(ValueError) as context:
            weights_ts_1.append(dim='measurement', ts=weights_ts_3)
            self.assertTrue(isinstance(context.exception,ValueError))
            self.assertTrue('Dimensions' in str(context.exception))

        with self.assertRaises(ValueError) as context:
            weights_ts_1.append(dim='participant', ts=weights_ts_4)
            self.assertTrue(isinstance(context.exception,ValueError))
            self.assertTrue('Dimension mismatch' in str(context.exception))

        weights_ts_1.append(dim='participant', ts=weights_ts_5)
Пример #53
0
    def read(self, channels):
        evs = self.events

        raw_bin_wrappers, original_eeg_files = self.__create_bin_readers()

        # we need to create rawbinwrappers first to figure out sample rate before calling __compute_time_series_length()
        time_series_length = self.__compute_time_series_length()

        time_series_data = np.empty(
            (len(channels), len(evs), time_series_length),
            dtype=np.float) * np.nan

        ordered_indices = np.arange(len(evs))
        event_indices_list = []
        events = []
        newdat_list = []
        eventdata = None

        for s, (src,
                eegfile) in enumerate(zip(raw_bin_wrappers,
                                          original_eeg_files)):
            ind = np.atleast_1d(evs.eegfile == eegfile)

            event_indices_list.append(ordered_indices[ind])

            if len(ind) == 1:
                event_offsets = evs['eegoffset']
                events.append(evs)
            else:
                event_offsets = evs[ind]['eegoffset']
                events.append(evs[ind])

            # get the timeseries for those events
            newdat = src.get_event_data_xray(channels,
                                             event_offsets,
                                             self.start_time,
                                             self.end_time,
                                             self.buffer_time,
                                             resampled_rate=None,
                                             filt_freq=None,
                                             filt_type=None,
                                             filt_order=None,
                                             keep_buffer=self.keep_buffer,
                                             loop_axis=None,
                                             num_mp_procs=0,
                                             eoffset='eegoffset',
                                             eoffset_in_time=False)
            newdat_list.append(newdat)

        event_indices_array = np.hstack(event_indices_list)
        event_indices_restore_sort_order_array = event_indices_array.argsort()

        start_extend_time = time.time()

        #new code
        eventdata = xr.concat(newdat_list, dim='events')
        end_extend_time = time.time()

        # concatenate (must eventually check that dims match)
        # ORIGINAL CODE
        tdim = eventdata['time']
        cdim = eventdata['channels']
        # srate = eventdata.samplerate
        srate = eventdata.attrs['samplerate']
        events = np.concatenate(events).view(Events)

        eventdata_xray = xr.DataArray(eventdata.values,
                                      coords=[cdim, events, tdim],
                                      dims=['channels', 'events', 'time'])
        eventdata_xray.attrs['samplerate'] = eventdata.attrs['samplerate']
        eventdata_xray = eventdata_xray[:,
                                        event_indices_restore_sort_order_array, :]  #### RESTORE THIS

        if not self.keep_buffer:
            # trimming buffer data samples
            number_of_buffer_samples = self.get_number_of_samples_for_interval(
                self.buffer_time)
            if number_of_buffer_samples > 0:
                eventdata_xray = eventdata_xray[:, :, number_of_buffer_samples:
                                                -number_of_buffer_samples]

        return TimeSeriesX(eventdata_xray)
Пример #54
0
    def filter(self):

        time_axis = self.time_series['time']

        time_axis_size = time_axis.shape[0]
        samplerate = float(self.time_series['samplerate'])

        wavelet_dims = self.time_series.shape[:-1] + (self.freqs.shape[0],)
        print wavelet_dims

        powers_reshaped = np.array([[]], dtype=np.float)
        phases_reshaped = np.array([[]], dtype=np.float)
        wavelets_complex_reshaped = np.array([[]], dtype=np.complex)

        if self.output == 'power':
            powers_reshaped = np.empty(shape=(np.prod(wavelet_dims), self.time_series.shape[-1]), dtype=np.float)
        if self.output == 'phase':
            phases_reshaped = np.empty(shape=(np.prod(wavelet_dims), self.time_series.shape[-1]), dtype=np.float)
        if self.output == 'both':
            powers_reshaped = np.empty(shape=(np.prod(wavelet_dims), self.time_series.shape[-1]), dtype=np.float)
            phases_reshaped = np.empty(shape=(np.prod(wavelet_dims), self.time_series.shape[-1]), dtype=np.float)
        if self.output == 'complex':
            wavelets_complex_reshaped = np.empty(shape=(np.prod(wavelet_dims), self.time_series.shape[-1]),
                                                 dtype=np.complex)

        # mt = morlet.MorletWaveletTransformMP(self.cpus)
        mt = MorletWaveletTransformMP(self.cpus)


        time_series_reshaped = self.time_series.data.reshape(np.prod(self.time_series.shape[:-1]),
                                                             self.time_series.shape[-1])
        if self.output == 'power':
            mt.set_output_type(morlet.POWER)
        if self.output == 'phase':
            mt.set_output_type(morlet.PHASE)
        if self.output == 'both':
            mt.set_output_type(morlet.BOTH)
        if self.output == 'complex':
            mt.set_output_type(morlet.COMPLEX)

        mt.set_signal_array(time_series_reshaped)
        mt.set_wavelet_pow_array(powers_reshaped)
        mt.set_wavelet_phase_array(phases_reshaped)
        mt.set_wavelet_complex_array(wavelets_complex_reshaped)

        # mt.initialize_arrays(time_series_reshaped, wavelets_reshaped)

        mt.initialize_signal_props(float(self.time_series['samplerate']))
        mt.initialize_wavelet_props(self.width, self.freqs)
        mt.prepare_run()

        s = time.time()
        mt.compute_wavelets_threads()

        powers_final = None
        phases_final = None
        wavelet_complex_final = None

        if self.output == 'power':
            powers_final = powers_reshaped.reshape(wavelet_dims + (self.time_series.shape[-1],))
        if self.output == 'phase':
            phases_final = phases_reshaped.reshape(wavelet_dims + (self.time_series.shape[-1],))
        if self.output == 'both':
            powers_final = powers_reshaped.reshape(wavelet_dims + (self.time_series.shape[-1],))
            phases_final = phases_reshaped.reshape(wavelet_dims + (self.time_series.shape[-1],))
        if self.output == 'complex':
            wavelet_complex_final = wavelets_complex_reshaped.reshape(wavelet_dims + (self.time_series.shape[-1],))

        # wavelets_final = powers_reshaped.reshape( wavelet_dims+(self.time_series.shape[-1],) )


        coords = {k: v for k, v in self.time_series.coords.items()}
        coords['frequency'] = self.freqs

        powers_ts = None
        phases_ts = None
        wavelet_complex_ts = None

        if powers_final is not None:
            powers_ts = TimeSeriesX(powers_final,
                                    dims=self.time_series.dims[:-1] + ('frequency', self.time_series.dims[-1],),
                                    coords=coords
                                    )
            final_dims = (powers_ts.dims[-2],) + powers_ts.dims[:-2] + (powers_ts.dims[-1],)

            powers_ts = powers_ts.transpose(*final_dims)

        if phases_final is not None:
            phases_ts = TimeSeriesX(phases_final,
                                    dims=self.time_series.dims[:-1] + ('frequency', self.time_series.dims[-1],),
                                    coords=coords
                                    )

            final_dims = (phases_ts.dims[-2],) + phases_ts.dims[:-2] + (phases_ts.dims[-1],)

            phases_ts = phases_ts.transpose(*final_dims)

        if wavelet_complex_final is not None:
            wavelet_complex_ts = TimeSeriesX(wavelet_complex_final,
                                             dims=self.time_series.dims[:-1] + (
                                             'frequency', self.time_series.dims[-1],),
                                             coords=coords
                                             )

            final_dims = (wavelet_complex_ts.dims[-2],) + wavelet_complex_ts.dims[:-2] + (wavelet_complex_ts.dims[-1],)

            wavelet_complex_ts = wavelet_complex_ts.transpose(*final_dims)

        if wavelet_complex_ts is not None:
            return wavelet_complex_ts, None
        else:
            return powers_ts, phases_ts
Пример #55
0
def test_append_recarray():
    """Test appending along a dimension with a recarray."""
    p1 = np.array([('John', 180), ('Stacy', 150), ('Dick', 200)],
                  dtype=[('name', '|S256'), ('height', int)])
    p2 = np.array([('Bernie', 170), ('Donald', 250), ('Hillary', 150)],
                  dtype=[('name', '|S256'), ('height', int)])

    data = np.arange(50, 80, 1, dtype=np.float)
    dims = ['measurement', 'participant']

    ts1 = TimeSeriesX.create(data.reshape(10, 3),
                             None,
                             dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p1,
                                 'samplerate': 1
                             })

    ts2 = TimeSeriesX.create(data.reshape(10, 3) * 2,
                             None,
                             dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p2,
                                 'samplerate': 1
                             })

    ts3 = TimeSeriesX.create(data.reshape(10, 3) * 2,
                             None,
                             dims=dims,
                             coords={
                                 'measurement': np.arange(10),
                                 'participant': p2,
                                 'samplerate': 2
                             })

    ts4 = TimeSeriesX.create(data.reshape(10, 3) * 2,
                             None,
                             dims=dims,
                             coords={
                                 'measurement': np.linspace(0, 1, 10),
                                 'participant': p2,
                                 'samplerate': 2
                             })

    combined = ts1.append(ts2, dim='participant')

    assert isinstance(combined, TimeSeriesX)
    assert (combined.participant.data['height'] == np.array(
        [180, 150, 200, 170, 250, 150])).all()
    names = np.array(
        [b'John', b'Stacy', b'Dick', b'Bernie', b'Donald', b'Hillary'])
    assert (combined.participant.data['name'] == names).all()

    # incompatible sample rates
    with pytest.raises(ConcatenationError):
        ts1.append(ts3)

    # incompatible other dimensions (measurement)
    with pytest.raises(ConcatenationError):
        ts1.append(ts4)