예제 #1
0
def get_period(files):
    """
    A function to get the stimulation onset and offset times
    (for the whole stimulation block), in ms, relative to the start
    of the data file.
    Args:
        -files: list of tdms files that contain the stim waveform data
    Returns:
        -start, stop: times, in ms
    """
    global stim_chan
    ##make sure that the files are in the correct order
    files = order_files(files)
    start = None
    stop = None
    files = iter(files)
    ##this will churn through the files until a start value is found
    while start == None:
        path = next(files)
        print("Loading {}".format(path))
        tdms_file = nptdms.TdmsFile(path)
        ##here are a couple diffent possibilities for how things could be named
        try:
            channel_object = tdms_file.object('Group Name', stim_chan)
        except KeyError:
            try:
                channel_object = tdms_file.object('ephys', stim_chan)
            except KeyError:
                channel_object = tdms_file.object('Untitled', stim_chan)
        start, end = find_stim(channel_object)
        if start != None:
            print("Found the start")
            stop = end
    print("Moving on to the end detection")
    ##now we'll keep going until there aren't any stim pulses detected
    ##that all of the stim data is in the first file
    while end != None:
        ##if end is not None, we must have found a continuation of the stim block in the previous file,
        ##so update 'stop' to reflect this
        stop = end
        path = next(files)
        print("Loading {}".format(path))
        tdms_file = nptdms.TdmsFile(path)
        ##here are a couple diffent possibilities for how things could be named
        try:
            channel_object = tdms_file.object('Group Name', stim_chan)
        except KeyError:
            try:
                channel_object = tdms_file.object('ephys', stim_chan)
            except KeyError:
                channel_object = tdms_file.object('Untitled', stim_chan)
        ##here we want to ignore any new values of start, because the start value we found in
        ##an earlier file should be the true start
        ignore, end = find_stim(channel_object)
    return start, stop
예제 #2
0
def load_ephys(path, resample=False, load_time=True, index=0):
    """
    A function to load data from ephys files, and
    resample them to a lower rate if necessary
    Args:
        -path: full path to the datafile
        -resample: if False, loads the full dataset. If a number is given,
            it will resample the data to roughly that sample rate (in Hz)
        -load_time: if True, loads the duration of the recording in seconds
        -index: useful for ordering after asynchronous multiprocessing
    Returns:
        -data: dictionary with labeled data arrays
    """
    ##load the file
    tdms_file = nptdms.TdmsFile(path)
    ##figure out which channels here are ephys channels
    ephys_chans = get_ephys_chans(tdms_file)
    data = {}
    for chan in ephys_chans:
        try:
            channel_object = tdms_file.object('Group Name', chan)
        except KeyError:
            ##case where the group name is different
            channel_object = tdms_file.object("ephys", chan)
        if resample:
            chan_data = downsample(channel_object, resample)
        else:
            chan_data = channel_object.data
        data[chan] = chan_data
    if load_time:
        data['time'] = get_duration_seconds(channel_object)
    return data, index
예제 #3
0
def load_signal_from_tdms(file_path, param) -> np.ndarray:

    tdms_file = nptdms.TdmsFile(file_path)

    channel = tdms_file.object(param['group_name'], param['channel_name'])

    data = channel.data
    time = channel.time_track()

    file_name = file_path[::-1][:file_path[::-1].find('/')][::-1]
    t_n = time[-1]
    N = data.size
    dt = t_n / N
    fs = int(round(np.reciprocal(dt), 0))
    dt = np.reciprocal(float(fs))
    data_param = {'t_n': t_n, 'N': N, 'dt': dt, 'fs': fs, 'f_name': file_name}

    if param["plot"]["do"]:
        plt.plot(time, data)
        plt.xlabel(param["plot"]["x_label"], fontsize=16)
        plt.ylabel(param["plot"]["y_label"], fontsize=16)
        plt.title(param["plot"]["title"], fontsize=16)
        plt.show()

    return data, time, data_param
예제 #4
0
파일: load_rc.py 프로젝트: ryanneely/rc
def order_stim_arrays(file_dict):
    """
    Takes in a list of stimulation files, and returns ordered lists of stim
    monitor data arrays, except they are now ordered according to which one happened first
    (according to the datetime stamp). 
    Args:
        -files_dict: dictionary of stim amplitude:file path pairs
    Returns:
        -timestamps: list of datetime.datetime timestamps
        -amps: list of stim amplitudes, in mA
        -stim_data: list of raw stim data arrays
    """
    ##first thing to do is to split out the pieces we need into separate lists
    stim_amps = list(file_dict.keys())
    files = list(file_dict.values())
    ##convert the amps to float values in mA
    stim_amps = [utils.standardize_amps(x) for x in stim_amps]
    ##now we want to laod the data, and get the timestamps
    timestamps = []
    stim_data = []
    for f in files:
        tdms_file = nptdms.TdmsFile(f)
        ##now find the address of the stim array data
        group, channel = utils.search_stim(tdms_file)
        data = tdms_file.object(group, channel)
        ##grab the timestamp in datetime.datetime format
        timestamps.append(get_tstart(data))
        ##now extract the sim data
        stim_data.append(data.data)
    ##now order things according to the datetime stamp
    idx = list(np.argsort(timestamps))
    return [timestamps[i] for i in idx], [stim_amps[i] for i in idx
                                          ], [stim_data[i] for i in idx]
예제 #5
0
def test_processes_example_file_correct(tmpdir):
    meta = source.MetaData(
        chunk_size=6,
        recurrence_size=2,
        recurrence_distance=3,
        consistency_sample_size=10,
    )
    output_filename = pathlib.Path(tmpdir) / "output.tdms"

    fix.export_correct_data(
        tdms_path="tests/assets/example_file.tdms",
        meta=meta,
        export_path=output_filename,
    )
    tdms_operator = nptdms.TdmsFile(output_filename)
    df_result = tdms_operator.as_dataframe()
    assert len(df_result) == 15
    assert np.array_equal(
        df_result.columns.values,
        np.array([
            "/'Untitled'/'D'",
            "/'Untitled'/'C'",
            "/'Untitled'/'B'",
            "/'Untitled'/'A'",
        ]),
    )
    assert np.array_equal(df_result["/'Untitled'/'A'"].values,
                          np.arange(1, 16))
예제 #6
0
def load_stim(path, offset=0):
    """
    A function to load a stim channel from a TDMS file, and extract the times when stimulation is "on"
    Args: 
        -path: full path to the datafile
        -offset: the number of samples to offset the start,stop sample values by (in case we are concatenating multiple files)
    Returns:
        -start: start times of stim wf
        -stop: end times of stim wf
        -z: binary stim on/off array for full sample rate
        -fs: sample rate for this dataset
    """
    global stim_chan
    ##load the file
    tdms_file = nptdms.TdmsFile(path)
    try:
        channel_object = tdms_file.object('Group Name', stim_chan)
    except KeyError:
        try:
            channel_object = tdms_file.object('ephys', stim_chan)
        except KeyError:
            channel_object = tdms_file.object('Untitled', stim_chan)
    raw = channel_object.data
    fs = 1 / channel_object.properties['wf_increment']
    ##process the stim output (guessing on parameters here)
    start, stop, z = get_stim_times(raw, 0.1, -0.1, 25)
    start = start + offset
    stop = stop + offset
    return start, stop, z, fs
예제 #7
0
def load_bp(path, resample=False, load_time=True, index=0):
    """
    A function to load data from blood pressure monitors, and
    resample them to a lower rate if necessary
    Args:
        -path: full path to the datafile
        -resample: if False, loads the full dataset. If a number is given,
            it will resample the data to roughly that sample rate (in Hz)
        -load_time: if True, loads the duration of the recording in seconds
    Returns:
        -data: dictionary with labeled data arrays
    """
    global bp_chans
    ##load the file
    tdms_file = nptdms.TdmsFile(path)
    data = {}
    for chan in bp_chans:
        channel_object = tdms_file.object('Group Name', chan)
        if resample:
            chan_data = downsample(channel_object, resample)
        else:
            chan_data = channel_object.data
        data[chan] = chan_data
    if load_time:
        data['time'] = get_duration_seconds(channel_object)
    return data, index
예제 #8
0
파일: common.py 프로젝트: msb002/mhdpy
def tdms2df(filepath):
    filename = os.path.split(filepath)[1]
    ext = os.path.splitext(filename)[1]
    if ext != ".tdms":
        print("File was not a tdms file")
        return None

    tdmsfile = nptdms.TdmsFile(filepath)
    df = tdmsfile.as_dataframe()

    #test if a waveform channel
    channel1 = tdmsfile.group_channels(tdmsfile.groups()[0])[0]
    waveform = True
    try:
        channel1.time_track()
    except KeyError:
        waveform = False
    #find the longest waveform
    if waveform:
        longestchannel = None
        length = 0
        for group in tdmsfile.groups():
            for channel in tdmsfile.group_channels(group):
                newlength = len(channel.data)
                if newlength > length:
                    length = newlength
                    longestchannel = channel
        timedata = longestchannel.time_track(absolute_time = True) 
        df = df.set_index(timedata)

    return df
예제 #9
0
def load_physio(path, resample=False, load_time=True):
    """
    A function to load a TDMS dataset aquired from the SomnoSuite
    serial pipe.
    Args:
        -path: full path to the datafile
        -resample: if a number, resamples the data to 'resample' Hz
        -load_time: if True, includes the duration of the recording in seconds
    Returns:
        -data: dictionary of data arrays arranged by channel names
    """
    global serial_chans
    ##load our file
    tdms_file = nptdms.TdmsFile(path)
    ##load the data into arrays and put into a dictionary
    data = {}
    for chan in list(serial_chans.keys()):
        channel_object = tdms_file.object('Untitled', chan)
        if resample:
            chan_data = downsample(channel_object, resample)
        else:
            chan_data = channel_object.data
        data[serial_chans[chan]] = chan_data
    if load_time:
        data['time'] = get_duration_seconds(
            channel_object
        ) / 10.0  ##not sure why I need to use this scale factor here, but LabView seems to be saving the wf increment at the wrong value (1 instead of 10?)
    return data
예제 #10
0
    def read_data(self):
        tdms_file = nptdms.TdmsFile(self.name)  # Reads a tdms file.
        root_object = tdms_file.object()  # tdms file information

        for name, value in root_object.properties.items():
            print("{0}: {1}".format(name, value))

        group_name = "Trap"
        channels = tdms_file.group_channels(group_name)
        self.ch_num = len(channels)
        self.ch_name = [str(channels[i].channel) for i in range(len(channels))]
        self.dt = channels[0].properties[u'wf_increment']  # Sampling time
        self.fs = int(1.0 / self.dt)  # Sampling frequency
        self.N = len(channels[0].time_track())  # Number of time points

        print("Channel number: %d" % self.ch_num)
        print("Channel name: %s" % self.ch_name)
        print("Sampling rate: %d Hz" % self.fs)
        print("Data size: %d sec \n" % int(self.N * self.dt))

        PZT_nm2V = [5000, 5000, 3000]  # PZT Volt to nm conversion factor
        self.QPD_nm2V = QPD_nm2V  # QPD sensitivity (nm/V) at V_sum = 8 V.
        self.stiffness_pN2nm = stiffness_pN2nm  # Stiffness [pN/nm]

        # Read data
        self.t = channels[0].time_track()
        self.QPDy = (channels[1].data - np.median(
            reject_outliers(channels[1].data))) * self.QPD_nm2V[1]
        self.PZTy = -(channels[4].data -
                      np.median(channels[4].data)) * PZT_nm2V[1]
        self.QPDs = (channels[2].data)
        self.QPDy = self.QPDy / self.QPDs

        self.T = int(self.fs / f_drive)  # Oscillation period in number
예제 #11
0
def read_tdms_file(file_name, channel_list):
    """
    return as Pandas DataFrame data of channels specified in <channel_list> from <file_name>
    :param file_name: string, the full name of a .tdms file
    :param channel_list: list of strings, list of channel names, must be contained in the tdms file
    :return: pandas DataFrame, each column corresponding to one element of <channel_list>
    """
    tdms_file = nptdms.TdmsFile(file_name)
    df = tdms_file.as_dataframe(time_index=True, absolute_time=True)
    # 重命名列,原列名称</'未命名'/'NLHQ-X-03-S05'>,新列名称<NLHQ-X-03-S05>
    df.rename(columns=lambda name: name.split('/')[-1].strip('\''),
              inplace=True)

    return df[channel_list]
예제 #12
0
def copy_tdms(nwb, in_path, out_path, nrois):
    num_all_rois = nwb['/processing/Acquired_ROIs/roi_spec'].shape[0]
    print('Copying {} of {} ROIs from {} to {}'.format(
        nrois, num_all_rois, in_path, out_path))
    in_tdms = nptdms.TdmsFile(in_path)
    group_name = 'Functional Imaging Data'
    with nptdms.TdmsWriter(out_path) as out_tdms:
        root, group = in_tdms.object(), in_tdms.object(group_name)
        out_tdms.write_segment([root, group])
        for ch, channel in {'0': 'Red', '1': 'Green'}.items():
            ch_name = 'Channel {} Data'.format(ch)
            ch_obj = in_tdms.object(group_name, ch_name)
            shape = (cycles_per_trial(nwb), num_all_rois, -1)
            ch_data = ch_obj.data.reshape(shape)
            subset = ch_data[:, :nrois, :].reshape(-1)
            new_obj = nptdms.ChannelObject(group_name, ch_name, subset, properties={})
            out_tdms.write_segment([new_obj])
예제 #13
0
def _convert_file(tdms_file_path: Path, hdf_dir: Path) -> None:
    """
    converts the tdms file of tdms_file_path to an hdf file by use of the nptdms converter
    :param tdms_file_path: file path of the tdms file
    :param hdf_dir: file path of the hdf file (dir instead of file name is given because easier handling with mp)
    """
    t_0 = time()
    with nptdms.TdmsFile(tdms_file_path) as tdms_file:
        logger.debug("reading tdms file  %40s     took: %10.10s sec",
                     tdms_file_path.stem,
                     time() - t_0)
        hdf_file_path = hdf_dir / tdms_file_path.with_suffix(".hdf").name
        t_0 = time()
        tdms_file.as_hdf(hdf_file_path, mode="w", group="/")
        logger.debug("tdms2hdf + writing %40s     took: %10.10s sec",
                     tdms_file_path.stem,
                     time() - t_0)
예제 #14
0
def _load_raw_stim(path, group_name='Group Name'):
    """
    A function to load a stim channel from a TDMS file, and extract the
    raw values of stim and the sampling rate (usually 25kHz).
    Args:
        - path: full path to the datafile
        - group_name: Name of the group storing the stim data in the tdms file
    Returns:
        -raw: raw stim.
        -fs: sample rate for this dataset.
    """
    global stim_chan
    ##load the file
    tdms_file = nptdms.TdmsFile(path)
    # channel_object = tdms_file.object('Group Name', stim_chan)
    channel_object = tdms_file.object(group_name, stim_chan)
    raw = channel_object.data
    fs = 1/channel_object.properties['wf_increment']
    return raw, fs
예제 #15
0
    def loadAndCalibrateSignals(self, path_to_signals):
        '''load the signals, calibrate, and correct them.

        Args: 
            path_to_signals (string): path to the signals.tdms file.
        '''

        tdms_file = nptdms.TdmsFile("signals.tdms")
        self.xchannel = tdms_file.object('position_data', 'xwaveFSD')
        self.ychannel = tdms_file.object('position_data', 'ywaveFSD')
        self.zchannel = tdms_file.object('position_data', 'zwaveFSD')
        power = tdms_file.object('position_data', 'powerFSD')

        self.powerArray = xr.DataArray(np.array(power.data),
                                       dims='time',
                                       coords={'time': power.time_track()},
                                       name='power')

        self.data_calibrated = xr.Dataset({'x': self._calibrateAndCorrect_Channel(self.xchannel, 0), \
            'y': self._calibrateAndCorrect_Channel(self.ychannel, 1), \
                'z': self._calibrateAndCorrect_Channel(self.zchannel, 2),})
예제 #16
0
def check_correct_tdms_file(tmp_dir: pathlib.Path) -> None:
    """Checks if the tdms_file exists and if it contains the correct data.
    """
    # New TdmsFile exists with the correct naming
    path_to_corrected_tdms = tmp_dir / FILENAME_CORRECTED
    assert path_to_corrected_tdms.exists()

    # The corrected TdmsFile contains the desired data
    tdms_operator = nptdms.TdmsFile(path_to_corrected_tdms)
    df_result = tdms_operator.as_dataframe()
    assert len(df_result) == 15
    assert np.array_equal(
        df_result.columns.values,
        np.array([
            "/'Untitled'/'D'",
            "/'Untitled'/'C'",
            "/'Untitled'/'B'",
            "/'Untitled'/'A'",
        ]),
    )
    assert np.array_equal(df_result["/'Untitled'/'A'"].values,
                          np.arange(1, 16))
예제 #17
0
def get_event_count_cache(fname):
    """Get the number of events from a tdms or avi file

    Parameters
    ----------
    fname: str
        Path to an experimental data file (tdms or avi)

    Returns
    -------
    event_count: int
        The number of events in the data set

    Notes
    -----
    The values for a file name are cached on disk using
    the file name and the first 100kB of the file as a
    key.
    """
    fname = pathlib.Path(fname).resolve()
    ext = fname.suffix
    # Generate key
    with fname.open(mode="rb") as fd:
        data = fd.read(100 * 1024)
    fhash = hashlib.md5(data + fname.as_uri()).hexdigest()
    cfgec = settings.SettingsFileCache(name="shapeout_tdms_event_counts.txt")
    try:
        event_count = cfgec.get_int(fhash)
    except KeyError:
        if ext == ".avi":
            with imageio.get_reader(str(fname)) as video:
                event_count = len(video)
        elif ext == ".tdms":
            tdmsfd = nptdms.TdmsFile(str(fname))
            event_count = len(tdmsfd.object("Cell Track", "time").data)
        else:
            raise ValueError("unsupported file extension: {}".format(ext))
        cfgec.set_int(fhash, event_count)
    return event_count
예제 #18
0
def load_signal_from_tdms(file_path: str, group_name: str,
                          channel_name: str) -> np.ndarray:
    '''
    Open a TDMS file and extract a single signal from that.
    Parameters
    ----------
    str : file_path
        The Path of TDMS file that the user wants to extract the signal.
    str : group_name
        Name of group that contains the signal of interest.
    str : channel_name
        Name of signal of interest.

    Returns
    -------
    data : np.ndarray
        The signal itself.
    time : np.ndarray
        The time series of the signal.

    '''
    tdms_file = nptdms.TdmsFile(file_path)

    channel = tdms_file.object(group_name, channel_name)

    data = channel.data
    time = channel.time_track()

    file_name = file_path[::-1][:file_path[::-1].find('/')][::-1]
    t_n = time[-1]
    N = data.size
    dt = t_n / N
    fs = int(round(np.reciprocal(dt), 0))
    dt = np.reciprocal(float(fs))
    data_param = {'t_n': t_n, 'N': N, 'dt': dt, 'fs': fs, 'f_name': file_name}

    return data, time, data_param
예제 #19
0
    def _parse(self):
        for file_name in self.file_list:
            tdms_file = nptdms.TdmsFile(file_name)
            for channel_name in self.channel_list:
                channel_object = tdms_file.object(u'未命名', channel_name)

                # acquire this channel's 'wf_start_time' property
                # and get its timestamp value for JSON serialize
                start_time = channel_object.property('wf_start_time')
                timestamp = time.mktime(start_time.timetuple())
                tup = [timestamp]

                # acquire this channel's other properties
                others = [
                    v for k, v in channel_object.properties.items()
                    if k != 'wf_start_time'
                ]
                tup.extend(others)

                # acquire channel data
                data = channel_object.data.tolist()
                tup.append(data)

                yield tup
예제 #20
0
    def read_data(self):
        tdms_file = nptdms.TdmsFile(self.name)  # Reads a tdms file.
        root_object = tdms_file.object()  # tdms file information

        for name, value in root_object.properties.items():
            print("{0}: {1}".format(name, value))

        group_name = "Trap"
        channels = tdms_file.group_channels(group_name)
        self.dt = channels[0].properties[u'wf_increment']  # Sampling time
        self.fs = int(1.0 / self.dt)  # Sampling frequency
        self.N = len(channels[0].time_track())  # Number of time points
        print("Sampling rate: %d Hz" % self.fs)
        print("Data size: %d sec \n" % int(self.N * self.dt))

        PZT_nm2V = [5000, 5000, 3000]  # PZT Volt to nm conversion factor

        # Read data
        t0 = channels[0].time_track()
        QPDy = (channels[1].data - np.median(reject_outliers(
            channels[1].data))) * self.QPD_nm2V[1]
        PZT0 = -(channels[4].data - np.median(channels[4].data)) * PZT_nm2V[1]
        QPDs = (channels[2].data)
        QPD0 = QPDy / QPDs

        # Fit PZT
        p0 = [f_drive, A_drive, 0, 0]
        lb = (f_drive - 0.01, A_drive * 0.9, -np.pi, -100)
        ub = (f_drive + 0.01, A_drive * 1.1, np.pi, 100)
        p, cov = curve_fit(triangle, t0, PZT0, p0, bounds=(lb, ub))
        PZT_fit0 = triangle(t0, p[0], p[1], p[2], p[3])
        #        print("PZT fit = ", p)

        # Subtract low-frequency noise
        QPD_cut = np.array(QPD0)
        self.QPD_max = 2 * np.median(np.abs(QPD_cut))
        QPD_cut[QPD_cut > self.QPD_max] = self.QPD_max
        QPD_cut[QPD_cut < -self.QPD_max] = -self.QPD_max

        N_lp = int(f_sample / f_drive)
        if N_lp % 2 == 0:
            N_lp += 1

        self.t_lp = running_mean(t0, N_lp)
        self.QPD_lp = running_mean(QPD_cut, N_lp)

        t1 = t0
        QPD1 = QPD0 - self.QPD_lp
        PZT1 = PZT0
        PZT_fit1 = PZT_fit0

        # Fit Sine
        p0 = [f_drive, 50, 0, 0]
        lb = (f_drive - 0.01, 0, -np.pi, -10)
        ub = (f_drive + 0.01, 100, np.pi, 10)
        p, cov = curve_fit(sine,
                           t1[np.abs(QPD1) < self.QPD_max],
                           QPD1[np.abs(QPD1) < self.QPD_max],
                           p0,
                           bounds=(lb, ub))

        # Fit and subtract Trapzoid
        p0 = [f_drive, 20 * p[1], p[2], p[3], 1e-4]
        lb = (f_drive - 0.01, 4 * p[1], p[2] - 0.2, p[3] - 1, 1e-8)
        ub = (f_drive + 0.01, 100 * p[1], p[2] + 0.2, p[3] + 1, 1e-2)
        p, cov = curve_fit(trapzoid,
                           t1[np.abs(QPD1) < self.QPD_max],
                           QPD1[np.abs(QPD1) < self.QPD_max],
                           p0,
                           bounds=(lb, ub))
        QPD_fit1 = trapzoid(t1, p[0], p[1], p[2], p[3], p[4])
        #        print("QPD_fit = ", p)

        dQPD1 = QPD1 - QPD_fit1

        # Subtract low-frequency noise
        dQPD_cut = np.array(dQPD1)
        self.dQPD_max = 2 * np.median(np.abs(dQPD1))
        dQPD_cut[dQPD_cut > self.dQPD_max] = self.dQPD_max
        dQPD_cut[dQPD_cut < -self.dQPD_max] = -self.dQPD_max

        N_lp = int(f_sample / f_drive / 4)
        if N_lp % 2 == 0:
            N_lp += 1

        dQPD_lp = running_mean(dQPD_cut, N_lp)
        dQPD2 = dQPD1
        ddQPD = dQPD2 - dQPD_lp

        self.t = running_mean(t1, N_lp)
        self.QPD = QPD1
        self.QPD_fit = QPD_fit1
        self.PZT = PZT1
        self.PZT_fit = PZT_fit1
        self.Force = ddQPD * self.stiffness_pN2nm[1]
예제 #21
0
def parseRaw(fname):
    dd = {}
    fn = fname.split("/")[-1]
    fd = nptdms.TdmsFile(fname)

    try:
        fd["meta"]
    except:
        log.error("No meta object, skipping " + fn)
        return -1

    ch0 = fd["radar"]["ch0"]  # .channel_data("radar","ch0")
    lat = fd["meta"]["lat"]
    lon = fd["meta"]["lon"]
    elev = fd["meta"]["elev"]
    time = fd["meta"]["time"]
    root = fd.properties

    startTime = root["start_time"]
    startTime = datetime.strptime(startTime, "%Y-%m-%dT%H:%M:%S.%f")

    dd["sig"] = "chirp"  # All TDMS files are chirped

    dd["txCF"] = root["chirp_cf"]
    dd["txBW"] = root["chirp_bw"] / 100
    dd["txlen"] = root["chirp_len"]
    # dd["chirpAmp"] = root["chirp_amp"]
    dd["txPRF"] = root["prf"]
    dd["fs"] = 1.0 / root["dt"]
    dd["stack"] = root["stacking"]
    dd["spt"] = root["record_len"]
    dd["trlen"] = root["dt"] * dd["spt"]

    # Some files have "pulse" and not "bark"
    try:
        bark = root["bark"]
    except KeyError:
        bark = root["pulse"]

    try:
        bark_len = root["bark_len"]
    except KeyError:
        bark_len = root["pulse_len"]

    try:
        bark_delay = root["bark_delay"]
    except KeyError:
        bark_delay = root["pulse_delay"]

    spb = int(np.ceil((bark_len + bark_delay) * dd["fs"]))

    # Extract ch0
    dd["rx0"] = tdmsSlice(ch0, dd["spt"], bark, spb)

    ntrace = dd["rx0"].shape[1]
    if (ntrace < 10):
        log.warning("Skipping " + fn + " because less than 10 traces (" +
                    str(ntrace) + ")")
        return -1

    # Correct double length metadata error, or trim data
    if len(lat) > 2 * dd["rx0"].shape[1]:
        lat = lat[0:len(lat) - 1:2]
        lon = lon[0:len(lon) - 1:2]
        elev = elev[0:len(elev) - 1:2]
        time = time[0:len(time) - 1:2]
    else:
        lat = lat[0:len(lat) - 1]
        lon = lon[0:len(lon) - 1]
        elev = elev[0:len(elev) - 1]
        time = time[0:len(time) - 1]

    # Fill in every other time value
    for i in range(len(time) - 1):
        if time[i] == 0:
            time[i] = (time[i - 1] + time[i + 1]) / 2

    # Fill in last time value
    diff = time[-2] - time[-3]
    time[-1] = time[-2] + diff

    # Time -> seconds since unix epoch
    time = time.astype(np.float64)
    epoch = datetime.utcfromtimestamp(0)
    initT = timedelta(0, time[0])
    for i in range(len(time)):
        delta = timedelta(0, time[i]) - initT
        time[i] = ((startTime + delta) - epoch).total_seconds()

    # Crop metadata if rx0 shorter
    nt = dd["rx0"].shape[1]
    lat = lat[:nt]
    lon = lon[:nt]
    elev = elev[:nt]
    time = time[:nt]

    # Get rid of traces with non-unique time
    time, ai = np.unique(time, return_index=True)
    lat = lat[ai]
    lon = lon[ai]
    elev = elev[ai]
    dd["rx0"] = dd["rx0"][:, ai]

    dd["ntrace"] = dd["rx0"].shape[1]
    dd["rx0"] = dd["rx0"][:, 0:dd["ntrace"]].astype(np.float32)

    dd["lat"] = np.zeros(dd["ntrace"]).astype("float")
    dd["lon"] = np.zeros(dd["ntrace"]).astype("float")
    dd["alt"] = np.zeros(dd["ntrace"]).astype("float")
    dd["tfull"] = np.zeros(dd["ntrace"]).astype("int64")
    dd["tfrac"] = np.zeros(dd["ntrace"]).astype("double")

    for i in range(dd["ntrace"]):
        # print(time[i], lat[i], lon[i], elev[i])
        dd["tfull"][i] = int(time[i]) - 37  # GPS to UTC
        dd["tfrac"][i] = time[i] - int(time[i])
        dd["lat"][i] = lat[i]
        dd["lon"][i] = lon[i]
        dd["alt"][i] = elev[i]

    # Rotate out HW delay
    date = datetime.utcfromtimestamp(dd["tfull"][i] + dd["tfrac"][i])

    if date.year != 2018 and date.month not in (5, 8):
        log.error("Unknown TDMS data source")
        return -1

    # Handle offset changes over campaign
    # May is constant, but a split in Aug
    if date.month == 5:
        dd["rx0"] = np.roll(dd["rx0"], -14, axis=0)
    elif date.month == 8 and date.day in (17, 18, 19, 20):
        dd["rx0"] = np.roll(dd["rx0"], 158, axis=0)
    elif date.month == 8:
        dd["rx0"] = np.roll(dd["rx0"], 14, axis=0)
    else:
        log.warning("No offset correction found for " + fn)

    # add extra offset for high pass malaspina track (mystery)
    if (fn == "20180819-215243.tdms"):
        log.warning("Adding malaspina high pass offset to " + fn)
        dd["rx0"] = np.roll(dd["rx0"], -40, axis=0)

    return dd
예제 #22
0
def parse_tdms_zip(database, model, input, files, aids, **kwargs):
    """
    uploads a set of .tdms .zip file and parses it for the database
    :param database: slycat.web.server.database.couchdb.connect()
    :param model: database.get("model", self._mid)
    :param input: boolean
    :param files: files to be parsed
    :param aids: artiftact id
    :param kwargs:
    """

    # import error handling from source
    dac_error = imp.load_source(
        'dac_error_handling',
        os.path.join(os.path.dirname(__file__), 'py/dac_error_handling.py'))

    dac_error.log_dac_msg("TDMS zip parser started.")

    # get user parameters
    MIN_TIME_STEPS = int(aids[0])
    MIN_CHANNELS = int(aids[1])
    SHOT_TYPE = aids[2]
    TIME_STEP_TYPE = aids[3]
    INFER_CHANNEL_UNITS = aids[4]
    INFER_SECONDS = aids[5]
    SUFFIX_LIST = aids[6]

    # keep a parsing error log to help user correct input data
    # (each array entry is a string)
    parse_error_log = dac_error.update_parse_log(database, model, [],
                                                 "Progress", "Notes:")

    # push progress for wizard polling to database
    slycat.web.server.put_model_parameter(database, model,
                                          "dac-polling-progress",
                                          ["Extracting ...", 10.0])

    # treat uploaded file as bitstream
    try:

        file_like_object = io.BytesIO(files[0])
        zip_ref = zipfile.ZipFile(file_like_object)
        zip_files = zip_ref.namelist()

    except Exception as e:

        dac_error.quit_raise_exception(
            database, model, parse_error_log,
            "Couldn't read .zip file (too large or corrupted).")

    # loop through zip files and look for tdms files matching suffix list
    file_list = []
    file_object = []
    tdms_ref = []

    for zip_file in zip_files:

        # get file name and extension
        head, tail = os.path.split(zip_file)
        ext = tail.split(".")[-1].lower()

        # is it a tdms file?
        if ext == 'tdms' or ext == 'tdm':

            # get suffix
            suffix = tail.split("_")[-1].split(".")[0]

            # should we read this file?
            if suffix in SUFFIX_LIST:

                try:

                    file_object.append(io.BytesIO(zip_ref.read(zip_file)))
                    tdms_ref.append(nptdms.TdmsFile(file_object[-1]))

                    file_list.append(zip_file)

                except Exception as e:

                    dac_error.quit_raise_exception(
                        database, model, parse_error_log,
                        "Couldn't read .tdms file.")

    # log files to be parsed
    for file_to_parse in file_list:
        parse_error_log = dac_error.update_parse_log(
            database, model, parse_error_log, "Progress",
            'Found file to parse: "' + file_to_parse + '".')

    # launch thread to read actual tdms files
    stop_event = threading.Event()
    thread = threading.Thread(target=parse_tdms_thread,
                              args=(database, model, tdms_ref, MIN_TIME_STEPS,
                                    MIN_CHANNELS, SHOT_TYPE, TIME_STEP_TYPE,
                                    INFER_CHANNEL_UNITS, INFER_SECONDS,
                                    dac_error, parse_error_log, stop_event))
    thread.start()
예제 #23
0
def parse_tdms(database, model, input, files, aids, **kwargs):
    """
    uploads a set of .tdms files and parses them for the database
    :param database: slycat.web.server.database.couchdb.connect()
    :param model: database.get("model", self._mid)
    :param input: boolean
    :param files: files to be parsed
    :param aids: normally artifact ID, but we are using it to pass parameters from the UI
    :param kwargs:
    """

    # import error handling from source
    dac_error = imp.load_source(
        'dac_error_handling',
        os.path.join(os.path.dirname(__file__), 'py/dac_error_handling.py'))

    dac_error.log_dac_msg("TDMS parser started.")

    # get user parameters
    MIN_TIME_STEPS = int(aids[0])
    MIN_CHANNELS = int(aids[1])
    SHOT_TYPE = aids[2]
    TIME_STEP_TYPE = aids[3]
    INFER_CHANNEL_UNITS = aids[4]
    INFER_SECONDS = aids[5]

    # keep a parsing error log to help user correct input data
    # (each array entry is a string)
    parse_error_log = dac_error.update_parse_log(database, model, [],
                                                 "Progress", "Notes:")

    # count number of tdms files
    num_files = len(files)
    parse_error_log = dac_error.update_parse_log(
        database, model, parse_error_log, "Progress",
        "Uploaded " + str(num_files) + " file(s).")

    # push progress for wizard polling to database
    slycat.web.server.put_model_parameter(database, model,
                                          "dac-polling-progress",
                                          ["Extracting ...", 10.0])

    # treat each uploaded file as bitstream
    file_object = []
    tdms_ref = []

    for i in range(0, num_files):

        try:

            file_object.append(io.BytesIO(files[i]))
            tdms_ref.append(nptdms.TdmsFile(file_object[i]))

        except Exception as e:

            dac_error.quit_raise_exception(database, model, parse_error_log,
                                           "Couldn't read TDMS file.")

    # start actual parsing as a thread
    stop_event = threading.Event()
    thread = threading.Thread(target=parse_tdms_thread,
                              args=(database, model, tdms_ref, MIN_TIME_STEPS,
                                    MIN_CHANNELS, SHOT_TYPE, TIME_STEP_TYPE,
                                    INFER_CHANNEL_UNITS, INFER_SECONDS,
                                    dac_error, parse_error_log, stop_event))
    thread.start()
예제 #24
0
    def _init_data_with_tdms(self, tdms_filename):
        """Initializes the current RT-DC dataset with a tdms file.
        """
        tdms_file = nptdms.TdmsFile(str(tdms_filename))
        # time is always there
        table = "Cell Track"
        # Edit naming.dclab2tdms to add features
        for arg in naming.tdms2dclab:
            try:
                data = tdms_file[table][arg].data
            except KeyError:
                pass
            else:
                if data is None or len(data) == 0:
                    # Ignore empty features. npTDMS treats empty
                    # features in the following way:
                    # - in nptdms 0.8.2, `data` is `None`
                    # - in nptdms 0.9.0, `data` is an array of length 0
                    continue
                self._events[naming.tdms2dclab[arg]] = data
        if len(self._events) == 0:
            raise IncompleteTDMSFileFormatError(
                "No usable feature data found in '{}'!".format(tdms_filename))
        # Set up configuration
        config_paths = [self.path.with_name(self._mid + "_para.ini"),
                        self.path.with_name(self._mid + "_camera.ini")]
        for cp in config_paths:
            if not cp.exists():
                raise IncompleteTDMSFileFormatError(
                    "Missing file: {}".format(cp))
        shpin_set = self.path.with_name(self._mid + "_SoftwareSettings.ini")
        if shpin_set.exists():
            config_paths.append(shpin_set)

        tdms_config = Configuration(files=config_paths, disable_checks=True)

        dclab_config = Configuration()

        for cfgii in [naming.configmap, naming.config_map_set]:
            for section in cfgii:
                for pname in cfgii[section]:
                    meta = cfgii[section][pname]
                    convfunc = dfn.get_config_value_func(section, pname)
                    if isinstance(meta, tuple):
                        osec, opar = meta
                        if osec in tdms_config and opar in tdms_config[osec]:
                            val = tdms_config[osec].pop(opar)
                            dclab_config[section][pname] = convfunc(val)
                    else:
                        dclab_config[section][pname] = convfunc(meta)

        # Additional information from log file
        rtfdc_log = self.path.with_name(self._mid + "_log.ini")
        if rtfdc_log.exists():
            with rtfdc_log.open("r", errors="replace") as fd:
                loglines = fd.readlines()
            for line in loglines:
                if line.startswith("[EVENT LOG]"):
                    sv = line.split("]")[1].strip()
                    if sv:
                        dclab_config["setup"]["software version"] = sv

        rtfdc_parm = self.path.with_name("parameters.txt")
        if rtfdc_parm.exists():
            with rtfdc_parm.open("r", errors="replace") as fd:
                parlines = fd.readlines()
            p1 = None
            p2 = None
            p3 = None
            for line in parlines:
                if line.startswith("pulse_led"):
                    fdur = float(line.split()[1])
                    dclab_config["imaging"]["flash duration"] = fdur
                elif line.startswith("numberofchannels"):
                    nc = int(line.split()[1])
                    dclab_config["fluorescence"]["channel count"] = nc
                elif line.startswith("laser488"):
                    p1 = float(line.split()[1])
                    dclab_config["fluorescence"]["laser 1 lambda"] = 488
                    dclab_config["fluorescence"]["laser 1 power"] = p1
                elif line.startswith("laser561"):
                    p2 = float(line.split()[1])
                    dclab_config["fluorescence"]["laser 2 lambda"] = 561
                    dclab_config["fluorescence"]["laser 2 power"] = p2
                elif line.startswith("laser640"):
                    p3 = float(line.split()[1])
                    dclab_config["fluorescence"]["laser 3 lambda"] = 640
                    dclab_config["fluorescence"]["laser 3 power"] = p3
                elif line.startswith("samplerate"):
                    sr = int(float(line.split()[1]))
                    dclab_config["fluorescence"]["sample rate"] = sr
                elif line.startswith("samplesperframe"):
                    spe = int(line.split()[1])
                    dclab_config["fluorescence"]["samples per event"] = spe
                elif line.startswith("Vmin"):
                    vmin = float(line.split()[1])
                    dclab_config["fluorescence"]["signal min"] = vmin
                elif line.startswith("Vmax"):
                    vmax = float(line.split()[1])
                    dclab_config["fluorescence"]["signal max"] = vmax
                elif line.startswith("median_pmt"):
                    mfs = int(line.split()[1])
                    dclab_config["fluorescence"]["trace median"] = mfs
            # Add generic channel names (independent of lasers)
            for ii in range(1, 4):
                chn = "channel {} name".format(ii)
                fln = "fl{}_max".format(ii)
                if fln in self and chn not in dclab_config["fluorescence"]:
                    dclab_config["fluorescence"][chn] = "FL{}".format(ii)
            lc = bool(p1) + bool(p2) + bool(p3)
            dclab_config["fluorescence"]["laser count"] = lc
            li = (p1 is not None) + (p2 is not None) + (p3 is not None)
            dclab_config["fluorescence"]["lasers installed"] = li
            dclab_config["fluorescence"]["channels installed"] = 3

        # Additional information from commented-out log-file (manual)
        with config_paths[0].open("r", errors="replace") as fd:
            lns = [s[1:].strip() for s in fd.readlines() if s.startswith("#")]
            if lns and lns[0] == "[FLUOR]":
                if ("software version" not in dclab_config["setup"]
                        and lns[1].startswith("fRTDC")):
                    dclab_config["setup"]["software version"] = lns[1]
                for ll in lns[2:]:
                    if ("sample rate" not in dclab_config["fluorescence"]
                            and ll.startswith("Samplerate")):
                        val = int(float(ll.split("=")[1]))
                        dclab_config["fluorescence"]["sample rate"] = val
                    elif ("signal min" not in dclab_config["fluorescence"]
                            and ll.startswith("ADCmin")):
                        val = float(ll.split("=")[1])
                        dclab_config["fluorescence"]["signal min"] = val
                    elif ("signal max" not in dclab_config["fluorescence"]
                            and ll.startswith("ADCmax")):
                        val = float(ll.split("=")[1])
                        dclab_config["fluorescence"]["signal max"] = val

        self.config = dclab_config
        self._complete_config_tdms(tdms_config)

        self._init_filters()

        # Load log files
        log_files = config_paths
        for name in [self._mid + "_events.txt",
                     self._mid + "_log.ini",
                     self._mid + "_SoftwareSettings.ini",
                     "FG_Config.mcf",
                     "parameters.txt"]:
            pl = self.path.with_name(name)
            if pl.exists():
                log_files.append(pl)
        for pp in log_files:
            with pp.open("r", errors="replace") as f:
                cfg = [s.strip() for s in f.readlines()]
            self.logs[pp.name] = cfg
예제 #25
0
 def read_from_path(cls, tdms_path: pathlib.Path, meta: MetaData):
     tdms_operator = nptdms.TdmsFile(tdms_path,
                                     memmap_dir=tempfile.gettempdir())
     return cls(tdms_operator, meta)
예제 #26
0
def tdms_to_hdf5(tdms_file,
                 h5_file,
                 load_data=True,
                 chan_map='',
                 memmap=True,
                 compression_level=0):
    """
    Converts TDMS data output from the LabView DAQ software used in the Viventi Lab to
    record from multiplexing neural implants. This method will most likely not interpret
    other TDMS files: see npTDMS for general file handling.

    Parameters
    ----------
    tdms_file : path (string)
    h5_file : path (string)
    chan_map : path (string)
        Optional table specifying a channel permutation. The first p rows
        of the outgoing H5 file will be the contents of these channels in
        sequence. The next (N-p) rows will be any channels not specified,
        in the order they are found.
    memmap : bool
    compression_level : int
        Optionally compress the outgoing H5 rows with zlib compression.
        This can reduce the time cost caused by disk access.

    """

    map_dir = tempfile.gettempdir() if memmap else None

    with tables.open_file(h5_file, mode='w') as h5_file:

        tdms_file = nptdms.TdmsFile(tdms_file, memmap_dir=map_dir)
        # assume for now there is only a single group -- see more files later

        # Catch an API change (older version first)
        try:
            t_group = tdms_file.groups()[0]
            group = tdms_file.object(t_group)
            chans = tdms_file.group_channels(t_group)
        except AttributeError:
            group = tdms_file.groups()[0]
            # Headstage channels and BNC channels are presently lumped into the "data" HDF5 array.. it might make sense
            # to separate them
            chans = group.channels()

        n_col = len(chans)
        n_row = len(chans[0])

        # The H5 file will be constructed as follows:
        #  * create a Group for the info section
        #  * create a CArray with zlib(3) compression for the data channels
        #  * create separate Arrays for special values
        #    (SampRate[SamplingRate], numRow[nrRows], numCol[nrColumns],
        #     OSR[OverSampling], numChan[nrColumns+nrBNCs])
        special_conversion = dict(SamplingRate='sampRate',
                                  nrRows='numRow',
                                  nrColumns='numCol',
                                  OverSampling='OSR')
        h5_info = h5_file.create_group(h5_file.root, 'info')
        for (key, val) in group.properties.items():
            if isinstance(val, str):
                # pytables doesn't support strings as arrays
                arr = h5_file.create_vlarray(h5_info,
                                             key,
                                             atom=tables.ObjectAtom())
                arr.append(val)
            elif isinstance(val, np.datetime64):
                h5_file.create_array(h5_info,
                                     key,
                                     obj=val.astype('f8'),
                                     atom=tables.Time64Atom())
            else:
                h5_file.create_array(h5_info, key, obj=val)
                if key in special_conversion:
                    print('caught', key)
                    # Put this array at the top level with new name
                    h5_file.create_array('/', special_conversion[key], obj=val)

        # do extra extra conversions
        try:
            num_chan = group.properties['nrColumns'] + group.properties[
                'nrBNCs']
            h5_file.create_array(h5_file.root, 'numChan', num_chan)
        except KeyError:
            pass
        try:
            mux_ratio = group.properties['OverSampling'] * group.properties[
                'nrRows']
            Fs = float(group.properties['SamplingRate']) / mux_ratio
            h5_file.create_array(h5_file.root, 'Fs', Fs)
        except KeyError:
            print('Could not determine sampling rate')

        h5_file.flush()

        if not load_data:
            return h5_file

        # now get down to the data
        atom = tables.Float64Atom()
        if compression_level > 0:
            filters = tables.Filters(complevel=compression_level,
                                     complib='zlib')
        else:
            filters = None

        d_array = h5_file.create_earray(h5_file.root,
                                        'data',
                                        atom=atom,
                                        shape=(0, n_row),
                                        filters=filters,
                                        expectedrows=n_col)

        # create a reverse lookup to index channels by number
        col_mapping = dict([(ch.properties['NI_ArrayColumn'], ch)
                            for ch in chans])
        # If a channel permutation is requested, lay down channels
        # in that order. Otherwise go in sequential order.
        if chan_map:
            chan_map = np.loadtxt(chan_map).astype('i')
            if chan_map.ndim > 1:
                print(chan_map.shape)
                # the actual channel permutation is in the 1st column
                # the array matrix coordinates are in the next columns
                chan_ij = chan_map[:, 1:3]
                chan_map = chan_map[:, 0]
            else:
                chan_ij = None
            # do any channels not specified at the end
            if len(chan_map) < n_col:
                left_out = set(range(n_col)).difference(chan_map.tolist())
                left_out = sorted(left_out)
                chan_map = np.r_[chan_map, left_out]
        else:
            chan_map = list(range(n_col))
            chan_ij = None

        for n in chan_map:
            # get TDMS column
            ch = col_mapping[n]
            # make a temp array here.. if all data in memory, then this is
            # slightly wasteful, but if it is mmap'd then this is more flexible
            d = ch.data[:]
            d_array.append(d[None, :])
            print('copied channel', ch.path, d_array.shape)

        if chan_ij is not None:
            h5_file.create_array(h5_file.root, 'channel_ij', obj=chan_ij)

    return h5_file
예제 #27
0
def prepare_afe_primary_file(tdms_file, write_path=None):

    tdms_path, tdms_name = os.path.split(tdms_file)
    tdms_name = os.path.splitext(tdms_name)[0]
    if write_path is None:
        write_path = tdms_path
    if not os.path.exists(write_path):
        mkdir_p(write_path)
    new_primary_file = os.path.join(write_path, tdms_name + '.h5')

    with tables.open_file(new_primary_file, 'w') as hdf:
        tdms = nptdms.TdmsFile(tdms_file)
        # Translate metadata
        t_group = tdms.groups()[0]
        group = tdms.object(t_group)
        rename_arrays = dict(SamplingRate='sampRate',
                             nrRows='numRow',
                             nrColumns='numCol',
                             OverSampling='OSR')
        h5_info = hdf.create_group('/', 'info')
        for (key, val) in group.properties.items():
            if isinstance(val, str):
                # pytables doesn't support strings as arrays
                arr = hdf.create_vlarray(h5_info,
                                         key,
                                         atom=tables.ObjectAtom())
                arr.append(val)
            else:
                hdf.create_array(h5_info, key, obj=val)
                if key in rename_arrays:
                    hdf.create_array('/', rename_arrays[key], obj=val)

        # Parse channels
        chans = tdms.group_channels(t_group)
        elec_chans = [c for c in chans if 'CH_' in c.channel]
        bnc_chans = [c for c in chans if 'BNC_' in c.channel]
        # For now, files only have 1 row and all channels relate to that row
        num_per_electrode_row = len(elec_chans)

        # do extra extra conversions
        fs = float(group.properties['SamplingRate']) / (
            num_per_electrode_row * group.properties['OverSampling'])
        hdf.create_array(hdf.root, 'Fs', fs)

        # ensure that channels are ordered correctly
        elec_mapping = dict([(ch.properties['NI_ArrayColumn'], ch)
                             for ch in elec_chans])
        # This value is currently always 1 -- but leave this factor in for future flexibility
        num_electrode_rows = len(elec_chans) // num_per_electrode_row
        if num_per_electrode_row * num_electrode_rows < len(elec_chans):
            print('There were excess TDMS channels: {}'.format(
                len(elec_chans)))
        channels_per_row = 32
        sampling_offset = 6
        hdf_array = hdf.create_carray('/',
                                      'data',
                                      atom=tables.Float64Atom(),
                                      shape=(channels_per_row *
                                             num_electrode_rows,
                                             len(elec_chans[0].data)))
        for elec_group in range(num_electrode_rows):
            for c in range(channels_per_row):
                chan_a = elec_mapping[2 * c + sampling_offset]
                chan_b = elec_mapping[2 * c + 1 + sampling_offset]
                hdf_array[elec_group * channels_per_row +
                          c, :] = 0.5 * (chan_a.data + chan_b.data)
            sampling_offset += num_per_electrode_row

        bnc_array = hdf.create_carray('/',
                                      'bnc',
                                      atom=tables.Float64Atom(),
                                      shape=(len(bnc_chans),
                                             len(bnc_chans[0].data)))
        bnc_mapping = dict([(ch.properties['NI_ArrayColumn'] - len(elec_chans),
                             ch) for ch in bnc_chans])
        for n in range(len(bnc_mapping)):
            bnc_array[n, :] = bnc_mapping[n].data
    return
예제 #28
0
def readtdmsfile(filepath):
    """ read tdms file at filepath """
    tdms_file = nptdms.TdmsFile(filepath)
    return tdms_file
예제 #29
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 31 15:05:42 2018

@author: SWC
"""
import numpy as np; import nptdms as td; import cv2

 
    
#read tdms file of short video
file_path = ''

tdms_file = td.TdmsFile(file_path)
group = tdms_file.groups()
channel = tdms_file.group_channels(group[0])
channel_object = tdms_file.object(group[0],'data')
data = channel_object.data

video = np.reshape(data,(len(data)/1024/1048,1024,1280))
cv2.imshow('frame',video[0,:,:])





   
   
예제 #30
0
    "\t██╔══╝  ██╔═══╝ ██╔══██║" + "\n" +
    "\t███████╗██║     ██║  ██║" + "\n" +
    "\t╚══════╝╚═╝     ╚═╝  ╚═╝")
print("\tEngine Performance Analysis (v1.1)")
print("\tJonathan Palafoutas")
print("")
print("\t"+pre_lc+str(n)+", "+pre_pt+str(n)+", "+pre_tc+str(n))
print("")
print("Setting up ...")
print("\tLibraries enabled")

# ------------------------------------------------------------------------------
# Import LabView data (second step of setting up)

# Load cells
lc = nptdms.TdmsFile(TDMS_path + pre_lc + str(n) + ".TDMS")
lc_time = lc["Untitled"][time_name][:]
lc_LNG  = lc["Untitled"][lc_LNG_name][:]
lc_LOX  = lc["Untitled"][lc_LOX_name][:]
lc_engine = lc["Untitled"][lc_engine_name][:]

print("\tLoad cell data imported")

# Pressure transducers
pt = nptdms.TdmsFile(TDMS_path + pre_pt + str(n) + ".TDMS")
pt_time = pt["Untitled"][time_name][:]
pt_LNG1 = pt["Untitled"][pt_LNG1_name][:]
pt_LNG2 = pt["Untitled"][pt_LNG2_name][:]
pt_LOX1 = pt["Untitled"][pt_LOX1_name][:]
pt_LOX2 = pt["Untitled"][pt_LOX2_name][:]
pt_LOX3 = pt["Untitled"][pt_LOX3_name][:]