Пример #1
0
 def load_data(self):
     for i in range(self.prop['Nf']):
         if i == 0:
             data = tdms.read(self.prop['fns'][i])
         else:
             data = np.c_[data, tdms.read(self.prop['fns'][i])]
     return data
Пример #2
0
def _import_file(path=None, show=False, include_path=False):
    """
    Import a TDMS file as a Data_File object.

    Parameters
    ----------
    path : str, optional
        File path to import from. A file dialog will open if no path is
        provided.
    show : bool, optional
        If True then prints information about the imported TDMS file.

    Notes
    -----
    This is deprecated and will be removed. Please use `import_tdms_file()`.
    """
    if path is None:
        path = _open_file()
    tdms_file = TdmsFile.read(path)
    channels = tdms_file.groups()[0].channels()
    tdms_f = remove_nans(channels[0][:])
    tdms_x = remove_nans(channels[1][:])
    tdms_y = remove_nans(channels[2][:])
    [tdms_f, tdms_x, tdms_y] = match_lengths([tdms_f, tdms_x, tdms_y])
    tdms_file = Data_File(tdms_x, tdms_y, tdms_f)
    if len(channels) >= 5:
        tdms_probe_temp = remove_nans(channels[3][:])
        tdms_cryo_temp = remove_nans(channels[4][:])
        tdms_file._import_probe_temp(tdms_probe_temp)
        tdms_file._import_cryo_temp(tdms_cryo_temp)
    if show:
        print('Imported file from ' + str(path))
    if include_path:
        return tdms_file, path
    return tdms_file
Пример #3
0
def extract_from_tdms(file, var_lst):
    """
    Extract data from .tdms accordingly to channels and export as a dict.
    """
    data = {}
    channels = var2channel(var_lst)
    for var, channel in zip(var_lst, channels):
        data[var] = TdmsFile.read(file).groups()[0][channel][:]

    if is_waveform(TdmsFile.read(file).groups()[0][channels[0]]):
        data['time_PXI2_HF'] = TdmsFile.read(file).groups()[0][
            channels[0]].time_track()
        data['time_abs_PXI2_HF'] = TdmsFile.read(file).groups()[0][
            channels[0]].time_track(absolute_time=True, accuracy='us')

    return data
Пример #4
0
def load_data(filename):
    """
            Used to load data from a tdms file.

        Parameters
        ----------
        filename : String
            The path for the "tdms" file we want to load.

        Returns
        -------
        list
            A list which contains data from Sensors [1, 4].

        """
    tdms_file = TdmsFile.read(filename)
    group = tdms_file['Untitled']
    if len(group) == 4:
        channel1 = group['Untitled']
        channel2 = group['Untitled 1']
        channel3 = group['Untitled 2']
        channel4 = group['Untitled 3']
    else:
        channel1 = group['Untitled 1']
        channel2 = group['Untitled 2']
        channel3 = group['Untitled 3']
        channel4 = group['Untitled 4']
    # -- end if
    dataC1 = channel1[:]
    dataC2 = channel2[:]
    dataC3 = channel3[:]
    dataC4 = channel4[:]
    return [dataC1, dataC2, dataC3, dataC4]
Пример #5
0
 def find_all_channel_units(self):
     all_units = []
     if self.format == 'tdms':
         for channel in TdmsFile.read(self.file).groups()[0].channels():
             all_units.append(channel.properties['Unit'])
     elif self.format == 'MAT':
         data = sio.loadmat(self.file, squeeze_me=True)
         for key in data.keys():
             if 'Channel' in key and 'Header' in key:
                 all_units.append(data[key].tolist()[1])
     elif self.format == 'mat':
         all_tags = self.find_all_channel_tags()
         for tag in all_tags:
             if tag.endswith('_X'):
                 unit = 's'
             else:
                 if tag.startswith('P'):
                     unit = 'Pa'
                 elif tag.startswith('V'):
                     unit = 'm/s^2'
                 elif tag.startswith('SYNC'):
                     unit = 'V'
                 else:
                     unit = None
                     print(tag, 'does not have unit implemented.')
             all_units.append(unit)
     elif self.format == 'txt':
         with open(self.file, 'r') as txt:
             for line in txt:
                 if 'x' in line and 'y' in line:
                     cols = line.split()
                     for i, col in enumerate(cols):
                         all_units[i] = col.split('[')[1].split(']')[0]
     return all_units
Пример #6
0
    def read_tdms_to_df(self):

        tdms_file = TdmsFile.read(self.file_path)

        # get groups
        for group in tdms_file.groups():
            self.group_names.append(group.name)

        # get channels
        for channel in group.channels():
            self.channel_names.append(channel.name)

        # set current group and channel
        self.selected_group = self.group_names[0]
        self.selected_channel = self.channel_names[0]

        signal_data_df = tdms_file[self.selected_group].as_dataframe()

        # get time arrays
        date_time_dt64 = channel.time_track(
            absolute_time=True)  #, accuracy='ns'
        test_time_dt64 = channel.time_track(absolute_time=False)

        # convert time = numpy array to pandas df
        date_time_df = pd.DataFrame(date_time_dt64, columns=['date_time'])
        test_time_df = pd.DataFrame(test_time_dt64, columns=['test_time'])

        # merge date_time, test_time and data
        self.dataframe = pd.concat(
            [date_time_df, test_time_df, signal_data_df],
            axis=1).reindex(date_time_df.index)
Пример #7
0
def test_read_raw_timestamp_data():
    """ Test reading timestamp data as a raw TDMS timestamps
    """
    test_file = GeneratedFile()
    seconds = 3672033330
    second_fractions = 1234567890 * 10**10
    test_file.add_segment(
        ("kTocMetaData", "kTocRawData", "kTocNewObjList"),
        segment_objects_metadata(
            channel_metadata("/'group'/'channel1'", 0x44, 4), ),
        hexlify_value("<Q", 0) + hexlify_value("<q", seconds) +
        hexlify_value("<Q", second_fractions) + hexlify_value("<q", seconds) +
        hexlify_value("<Q", 0) + hexlify_value("<q", seconds + 1) +
        hexlify_value("<Q", second_fractions) +
        hexlify_value("<q", seconds + 1))

    expected_seconds = np.array([seconds, seconds, seconds + 1, seconds + 1],
                                np.dtype('int64'))
    expected_second_fractions = np.array(
        [0, second_fractions, 0, second_fractions], np.dtype('uint64'))

    with test_file.get_tempfile() as temp_file:
        tdms_data = TdmsFile.read(temp_file.file, raw_timestamps=True)
        data = tdms_data['group']['channel1'][:]
        assert isinstance(data, TimestampArray)
        np.testing.assert_equal(data.seconds, expected_seconds)
        np.testing.assert_equal(data.second_fractions,
                                expected_second_fractions)
Пример #8
0
def read_tdms(path, groups=None):
    """
    Read data from a TDMS file.

    Parameters
    ----------
    path : str or pathlib.Path
        Path to the TDMS file to be read.

    groups : list of strs (optional)
        Names of groups stored inside the TDMS file that should be loaded. By default,
        all groups are loaded, so specifying the groups you want explicitly can avoid
        loading the entire file from disk.

    Returns
    -------
    pandas.DataFrame : A dataframe containing the data from the TDMS file.

    """
    with TdmsFile.read(path, memmap_dir=gettempdir()) as tdms_file:
        if groups is None:
            df = tdms_file.as_dataframe()
        else:
            # TODO: Use TdmsFile.open instead of read, and only load desired groups
            data = []
            for group in groups:
                channel = tdms_file[group].channels()[0]
                group_data = tdms_file[group].as_dataframe()
                group_data = group_data.rename(
                    columns={channel.name: channel.path})
                data.append(group_data)
            df = pd.concat(data, axis=1)
    return df
Пример #9
0
def test_read_with_index_file(test_file, expected_data):
    """ Test reading a file with an associated tdms_index file
    """
    with test_file.get_tempfile_with_index() as tdms_file_path:
        tdms_file = TdmsFile.read(tdms_file_path)

    for ((group, channel), expected_channel_data) in expected_data.items():
        channel_obj = tdms_file[group][channel]
        compare_arrays(channel_obj.data, expected_channel_data)
Пример #10
0
    def __init__(self, path, derivative_offset=-0.1):
        self.derivative_offset = derivative_offset

        try:
            self.failed = False
            self.status = "UNKNOWN"
            self.cut_off_position = 0

            self.filename = os.path.basename(path)
            tdms_file = TdmsFile.read(path)

            group = tdms_file['Untitled']

            if 'Load' in group:
                load = group['Load'][:]
            else:
                load = group['Untitled 1'][:]

            if 'Position' in group:
                position = group['Position'][:]
            else:
                position = group['Untitled'][:]
            load, position = smooth_convolve(load, 700), smooth_convolve(position, 700)

            peek_load_index = np.argmax(load)  # for discharge
            self.original = Curve(load=load[:peek_load_index],
                                  position=position[:peek_load_index],
                                  load_discharge=load[peek_load_index:],
                                  position_discharge=position[peek_load_index:])
            #self.original.plot()

            load, position = smooth_convolve(load, 70), smooth_convolve(position, 70)

            load, position = smooth_fun(load, 70), smooth_fun(position, 70)
            load, position = smooth_convolve(load, 50), smooth_convolve(position, 50)
            # load, position = smooth_convolve(load, 20), smooth_convolve(position, 20)


            peek_load_index = np.argmax(load)  # for discharge

            self.original_smooth = Curve(load=load[:peek_load_index],
                                  position=position[:peek_load_index],
                                  load_discharge=load[peek_load_index:],
                                  position_discharge=position[peek_load_index:])
            #self.original.plot()
            self.stiffness_curve = None
            self.offset_load = None
            self.cut_off_curve = None
            self.mirror_offset_curve = None

            self.process()
            self.validity_check()
        except Exception:
            print(self.filename)
            traceback.print_exc()
            print('-----------------------------------')
            self.failed = True
Пример #11
0
 def loadTMDS(self, file_paths):
     samp_v = []
     for file_path in file_paths:
         print(str(file_path), '加载中...')
         tdms_file = TdmsFile.read(file_path)
         channel_object = tdms_file.groups()[0].channels()[0][:]
         samp_v.extend(channel_object)
     samp_v = np.array(samp_v)
     return samp_v
Пример #12
0
def test_read_with_mismatching_index_file():
    """ Test that reading data when the index file doesn't match the data file raises an error
    """

    test_file = GeneratedFile()
    test_file.add_segment(
        ("kTocMetaData", "kTocRawData", "kTocNewObjList"),
        segment_objects_metadata(
            channel_metadata("/'group'/'channel1'", 3, 2),
            channel_metadata("/'group'/'channel2'", 3, 2),
        ),
        "01 00 00 00" "02 00 00 00"
        "03 00 00 00" "04 00 00 00"
    )
    test_file.add_segment(
        ("kTocMetaData", "kTocRawData", "kTocNewObjList"),
        segment_objects_metadata(
            channel_metadata("/'group'/'channel1'", 3, 2),
            channel_metadata("/'group'/'channel2'", 3, 2),
        ),
        "01 00 00 00" "02 00 00 00"
        "03 00 00 00" "04 00 00 00"
    )

    test_file_with_index = GeneratedFile()
    test_file_with_index.add_segment(
        ("kTocMetaData", "kTocRawData", "kTocNewObjList"),
        segment_objects_metadata(
            channel_metadata("/'group'/'channel1'", 3, 3),
            channel_metadata("/'group'/'channel2'", 3, 3),
        ),
        "01 00 00 00" "02 00 00 00" "03 00 00 00"
        "04 00 00 00" "05 00 00 00" "06 00 00 00"
    )
    test_file_with_index.add_segment(
        ("kTocMetaData", "kTocRawData", "kTocNewObjList"),
        segment_objects_metadata(
            channel_metadata("/'group'/'channel1'", 3, 3),
            channel_metadata("/'group'/'channel2'", 3, 3),
        ),
        "01 00 00 00" "02 00 00 00" "03 00 00 00"
        "04 00 00 00" "05 00 00 00" "06 00 00 00"
    )

    with test_file.get_tempfile(delete=False) as tdms_file:
        with test_file_with_index.get_tempfile_with_index() as tdms_file_with_index_path:
            # Move index file from second file to match the name of the first file
            new_index_file = tdms_file.name + '_index'
            copyfile(tdms_file_with_index_path + '_index', new_index_file)
            try:
                tdms_file.file.close()
                with pytest.raises(ValueError) as exc_info:
                    _ = TdmsFile.read(tdms_file.name)
                assert 'Check that the tdms_index file matches the tdms data file' in str(exc_info.value)
            finally:
                os.remove(new_index_file)
                os.remove(tdms_file.name)
Пример #13
0
def test_close_after_read():
    test_file, _ = scenarios.single_segment_with_one_channel().values
    temp_file = test_file.get_tempfile(delete=False)
    try:
        temp_file.file.close()
        tdms_data = TdmsFile.read(temp_file.name)
        tdms_data.close()
    finally:
        os.remove(temp_file.name)
Пример #14
0
 def read(filename: str, **kwargs) -> xr.Dataset:
     tdms_file = TdmsFile.read(filename)
     for group in tdms_file.groups():
         df = group.as_dataframe(
         )  # requires rewriting if tdms file has multiple groups
         for channel in group.channels():
             df.rename(
                 {channel.name: channel.name.strip()}, axis=1, inplace=True
             )  #remove \n and spaces from channel names and assign inplace to columns axis
     return df.to_xarray()
Пример #15
0
def test_read_raw_channel_data_slice():
    """Test reading a slice of raw channel data"""

    test_file, expected_data = scenarios.single_segment_with_one_channel().values
    with test_file.get_tempfile() as temp_file:
        tdms_file = TdmsFile.read(temp_file.file)
        for ((group, channel), expected_data) in expected_data.items():
            actual_data = tdms_file[group][channel].read_data(offset=1, length=2, scaled=False)
            assert actual_data.dtype == expected_data.dtype
            compare_arrays(actual_data, expected_data[1:3])
Пример #16
0
def test_indexing_channel_after_read_data():
    """ Test indexing into a channel after reading all data
    """
    test_file, expected_data = scenarios.chunked_segment().values
    with test_file.get_tempfile() as temp_file:
        tdms_file = TdmsFile.read(temp_file.file)
    for ((group, channel), expected_channel_data) in expected_data.items():
        channel_object = tdms_file[group][channel]
        assert channel_object[0] == expected_channel_data[0]
        compare_arrays(channel_object[:], expected_channel_data)
Пример #17
0
    def load_sdata(self, del_data=True, skip_cal=False):
        if (del_data == True):
            self.delete_data()

        self.prop['fns'] = filedialog.askopenfilenames(
            filetypes=[("TDMS", "*.tdms")],
            title='open s-polarized excitation data')
        self.prop['Nf'] = len(self.prop['fns'])

        self.prop['PxCols'] = list(
            tdms(self.prop['fns'][0]).properties.items())[11][1]
        self.prop['PxRows'] = list(
            tdms(self.prop['fns'][0]).properties.items())[12][1]

        self.sIs = np.zeros(
            (self.prop['PxCols'], self.prop['PxRows'], self.prop['Nf']))
        self.sIp = np.zeros(
            (self.prop['PxCols'], self.prop['PxRows'], self.prop['Nf']))

        for i in range(self.prop['Nf']):
            sDat = tdms.read(self.prop['fns'][i]).groups()[0].channels(
            )[1].data  #channel 2 is PBS reflection path -> s-polarized light
            pDat = tdms.read(self.prop['fns'][i]).groups()[0].channels(
            )[2].data  #channel 3 is PBS transmission path -> p-polarized light

            for j in range(self.prop['PxCols']):
                self.sIs[j, :,
                         i] = sDat[(j *
                                    self.prop['PxRows']):((j + 1) *
                                                          self.prop['PxRows'])]
                self.sIp[j, :,
                         i] = pDat[(j *
                                    self.prop['PxRows']):((j + 1) *
                                                          self.prop['PxRows'])]

        print(np.mean(self.sIp))
        print(np.mean(self.sIs))

        # beware of doulbe processing when using calibrate function
        if (skip_cal == False):
            self.axelrod()
            self.s_sensitivity(missing=False)
Пример #18
0
def f_open_tdms(filename, channel):
    if filename == 'Input':
        filename = filedialog.askopenfilename()

    file = TdmsFile.read(filename)
    all_groups = file.groups()
    group = all_groups[0]
    data_channel = group[channel]
    data = data_channel[:]

    return data
Пример #19
0
    def write_array_data(self,
                         filepath,
                         groupName,
                         rawData,
                         tags={},
                         sampleInfo={
                             "batchId": None,
                             "batchName": '',
                             "sampleId": None
                         }):
        nextIndexOfChannel = 0
        hasGroup = False
        root_object = RootObject(properties=sampleInfo)
        group_object = GroupObject(groupName,
                                   properties={'data_form': 'array'})
        channel_object = ChannelObject(groupName,
                                       'm0',
                                       rawData,
                                       properties=tags)
        if os.path.exists(filepath):
            tdms_file = TdmsFile.read(filepath)
            original_groups = tdms_file.groups()
            original_channels = [
                chan for group in original_groups for chan in group.channels()
            ]

            try:
                hasGroup = tdms_file[groupName] is not None
            except KeyError:
                hasGroup = False
            print(f'has group? {hasGroup}')
            if hasGroup:
                channels = tdms_file[groupName].channels()
                print(channels)
                nextIndexOfChannel = len(channels)
            channelName = f'm{nextIndexOfChannel}'
            channel_object = ChannelObject(groupName,
                                           channelName,
                                           rawData,
                                           properties=tags)
            with TdmsWriter(filepath, mode='a') as tdms_writer:
                # root_object = RootObject(tdms_file.properties)
                # channels_to_copy = [chan for chan in original_channels]
                # channels_to_copy.append(channel_object)
                # tdms_writer.write_segment([root_object] + original_groups + channels_to_copy)
                # Write first segment
                tdms_writer.write_segment(
                    [root_object, group_object, channel_object])
        else:
            with TdmsWriter(filepath) as tdms_writer:
                # Write first segment
                tdms_writer.write_segment(
                    [root_object, group_object, channel_object])
Пример #20
0
def test_read_data_after_open_in_read_mode_throws():
    """ Trying to read channel data after reading all data initially should throw
    """
    test_file, expected_data = scenarios.single_segment_with_one_channel(
    ).values
    group, channel = list(expected_data.keys())[0]
    with test_file.get_tempfile() as temp_file:
        tdms_file = TdmsFile.read(temp_file.file)
        with pytest.raises(RuntimeError) as exc_info:
            tdms_file[group][channel].read_data()
        assert "Cannot read data after the underlying TDMS reader is closed" in str(
            exc_info.value)
Пример #21
0
def test_read_channel_data(test_file, expected_data):
    """Test reading data"""

    with test_file.get_tempfile() as temp_file:
        tdms_data = TdmsFile.read(temp_file.file)

    for ((group, channel), expected_data) in expected_data.items():
        channel_obj = tdms_data[group][channel]
        actual_data = channel_obj.data
        assert actual_data.dtype == expected_data.dtype
        assert channel_obj.dtype == expected_data.dtype
        compare_arrays(actual_data, expected_data)
Пример #22
0
def test_iterate_channel_data_in_read_mode():
    """Test iterating over channel data after reading all data
    """
    test_file, expected_data = scenarios.chunked_segment().values

    with test_file.get_tempfile() as temp_file:
        tdms_file = TdmsFile.read(temp_file.file)
        for ((group, channel), expected_channel_data) in expected_data.items():
            actual_data = []
            for value in tdms_file[group][channel]:
                actual_data.append(value)
            compare_arrays(actual_data, expected_channel_data)
Пример #23
0
def f_open_tdms(filename, channel):
    if filename == 'Input':
        filename = filedialog.askopenfilename()
    file = TdmsFile.read(filename)
    all_groups = file.groups()
    group = all_groups[0]
    try:
        data_channel = group[channel]
        data = data_channel[:]
    except:
        print('***error channel, try: ')
        print(group.channels())
    return data
Пример #24
0
def test_iterate_file_and_groups():
    """ Test iterating over TdmsFile and TdmsGroup uses key values
    """
    test_file, expected_data = scenarios.chunked_segment().values

    with test_file.get_tempfile() as temp_file:
        tdms_file = TdmsFile.read(temp_file.file)
        for group_name in tdms_file:
            group = tdms_file[group_name]
            for channel_name in group:
                channel = group[channel_name]
                expected_channel_data = expected_data[(group_name, channel_name)]
                compare_arrays(channel.data, expected_channel_data)
Пример #25
0
def read_tdms(pathname, keys, groupname):
    """
    Read data from TDMS file

    Args:
        pathname: name of the tdms file, which will be read
        keys: list of channel names for each column
        groupname: name of group, under which the channels are placed
    Returns:
        numpy array containing a column for each channel
    """
    tdms_file = TdmsFile.read(pathname)
    return [tdms_file[groupname][key].data for key in keys]
Пример #26
0
def test_read_file_passed_as_pathlib_path():
    """ Test reading a file when using a pathlib Path object
    """
    import pathlib

    test_file, expected_data = scenarios.single_segment_with_one_channel().values

    with test_file.get_tempfile_with_index() as tdms_file_path_str:
        tdms_file_path = pathlib.Path(tdms_file_path_str)
        tdms_file = TdmsFile.read(tdms_file_path)

    for ((group, channel), expected_channel_data) in expected_data.items():
        channel_obj = tdms_file[group][channel]
        compare_arrays(channel_obj.data, expected_channel_data)
Пример #27
0
def tdmsTransform(directory, filePath):
    reqFiletdms = glob.glob('*.tdms')
    tdms_file = td.read(directory + reqFiletdms[0])
    num_groups = len(tdms_file.groups())
    if num_groups == 1:
        group = tdms_file['FTA_CTA_Train']

        time = tdms_file['FTA_CTA_Train']["TriggerSys1"].properties[
            "wf_start_time"]
        t = time.astype(datetime.datetime)
        df = group.as_dataframe(time_index=False, absolute_time=True)
        with open(filePath + t.strftime("%Y%m%d%H%M%S") + '.txt', 'a+') as f:
            f.write(df.to_string(header=True, index=False))
    else:
        return
Пример #28
0
    def extract_all_channels(self):
        all_tags = self.find_all_channel_tags()
        all_units = self.find_all_channel_units()
        if self.format == 'tdms':
            raw_data = TdmsFile.read(self.file).groups()[0]
            for tag, unit in zip(all_tags, all_units):
                self.add_channel(DataChannel(tag, unit, raw_data[tag][:]))
            if raw_data.channels()[0].is_waveform():
                self.add_channel(
                    DataChannel.time_channel(
                        'relative',
                        raw_data.channels()[0].time_track()))
                self.add_channel(
                    DataChannel.time_channel(
                        'absolute',
                        raw_data.channels()[0].time_track(absolute_time=True,
                                                          accuracy='us')))
        elif self.format in ['mat', 'MAT']:
            raw_data = sio.loadmat(self.file, squeeze_me=True)
            for key, tag, unit in zip(raw_data.keys(), all_tags, all_units):
                if 'Channel' in key and 'Data' in key:
                    self.add_channel(DataChannel(tag, unit, raw_data[key]))
        elif self.format == 'txt':
            raw_data = []
            with open(self.file, 'r') as txt:
                for line in txt:
                    if 'ch' not in line and line != '\n':
                        for i in range(len(all_tags)):
                            raw_data[i].append(line.split()[i])
            for tag, unit, data in zip(all_tags, all_units, raw_data):
                self.add_channel(
                    DataChannel(tag, unit, np.array(list(map(float, data)))))

        for channel in self.channels:
            if channel.tag in [
                    'System Time',
                    'Time__1_-_default_sample_rate',
                    'x',
            ] or channel.tag.endswith('_X'):
                self.replace_channel(
                    channel, DataChannel.time_channel('relative',
                                                      channel.data))
            elif channel.tag == 'Absolute Time':
                self.replace_channel(
                    channel, DataChannel.time_channel('absolute',
                                                      channel.data))
        return print('All data extracted from', self.name)
Пример #29
0
    def calculate_sample_rate(self, file_paths, file_n):
        # 파일명에 적힌 날짜와 다음 파일명에 적힌 날짜를 비교하여 기간을 알아내고, 데이터 개수를 측정하여 샘플링 레이트를 알아낸다.
        with TdmsFile.read(file_paths[file_n]) as tdms_file:
            df = tdms_file.as_dataframe(scaled_data=False)  # pandas 데이터프레임
            datas = df.to_numpy()  # 이래야 사용 가능
            datas = np.transpose(datas)  # 안 뒤집으면 fft 방향이 잘못됨

        time1 = datetime.datetime.strptime(self.filename_list[file_n],
                                           '%Y%m%d_%H%M%S')
        time2 = datetime.datetime.strptime(self.filename_list[file_n + 1],
                                           '%Y%m%d_%H%M%S')

        time_diff = time2 - time1

        sample_rate = time_diff.seconds / np.shape(datas)[1]

        return sample_rate
def load_data(filename):
    tdms_file = TdmsFile.read(filename)
    group = tdms_file['Untitled']
    if len(group) == 4:
        channel1 = group['Untitled']
        channel2 = group['Untitled 1']
        channel3 = group['Untitled 2']
        channel4 = group['Untitled 3']
    else:
        channel1 = group['Untitled 1']
        channel2 = group['Untitled 2']
        channel3 = group['Untitled 3']
        channel4 = group['Untitled 4']
    # -- end if
    dataC1 = channel1[:]
    dataC2 = channel2[:]
    dataC3 = channel3[:]
    dataC4 = channel4[:]
    return [dataC1, dataC2, dataC3, dataC4]