def test_can_append_to_file_using_path(): input_1 = np.linspace(0.0, 1.0, 10) input_2 = np.linspace(1.0, 2.0, 10) segment_1 = ChannelObject("group", "a", input_1) segment_2 = ChannelObject("group", "a", input_2) tempdir = tempfile.mkdtemp() temppath = "%s/test_file.tdms" % tempdir try: with TdmsWriter(temppath) as tdms_writer: tdms_writer.write_segment([segment_1]) with TdmsWriter(temppath, 'a') as tdms_writer: tdms_writer.write_segment([segment_2]) tdms_file = TdmsFile(temppath) output = tdms_file["group"]["a"].data assert len(output) == 20 np.testing.assert_almost_equal(output, np.concatenate([input_1, input_2])) finally: if os.path.exists(temppath): os.remove(temppath) os.rmdir(tempdir)
def read_csd_hydro(root_path): """ Stolen from Antonio for reading CSD pump data (LabView) :param path: File path :return: """ frames = [] for f in glob('{}/*.tdms'.format(root_path)): tdms_file = TdmsFile(f) pump_df = tdms_file['Channels'].as_dataframe() pump_df = pump_df.set_index('Time') pump_df.rename(columns={ 'Interval pressure': 'Pressure', 'Pump flow': 'Flow' }, inplace=True) pump_df = pump_df.sort_index() pump_df.index = pump_df.index.tz_localize('Etc/GMT+1') pump_df.index = pump_df.index.tz_convert('UTC') pump_df.index = pump_df.index.tz_convert(None) pump_df['Pressure'] /= 1000. # To MPa from kPa frames.append(pump_df) all_df = pd.concat(frames) all_df = all_df.sort_index() return all_df
def readFiles(): files = os.listdir(path) files.sort() for filename in files: if filename.endswith(".tdms"): #print(filename) tdms_file = TdmsFile(path + "/" + filename) channel = tdms_file.object("Untitled", "Canale 4") data = channel.data fftSpectrum, peakSpectrum, mean, stdDeviation, slope = evaluateFeature( data) resFiles.append(cleanFilename(filename)) resFFTSpectrum.append(fftSpectrum) resPeakSpectrum.append(peakSpectrum) resMean.append(mean) resStdDeviation.append(stdDeviation) resSlope.append(slope) #mean, mean1 = avg(data) #print("Filename - " + filename + " Mean: " + str(mean)) saveResults(resFiles, "featureObtained/Date.p") saveResults(resFFTSpectrum, "featureObtained/FFTSpectrum.p") saveResults(resPeakSpectrum, "featureObtained/PeakSpectrum.p") saveResults(resMean, "featureObtained/Mean.p") saveResults(resStdDeviation, "featureObtained/StdDeviation.p") saveResults(resSlope, "featureObtained/Slope.p")
def readTDMS(self): tdms = TdmsFile(self.filepath) self.RTD1_list = tdms.object("Meta Data", "MA-RTD 1").data self.RTD2_list = tdms.object("Meta Data", "MA-RTD 2").data Time = tdms.object("Meta Data", "Time").data Torque_CMD = tdms.object("Meta Data", "MA_Command.Torque").data t0 = Torque_CMD[80] j = 0 for t in Torque_CMD: if t >= t0: break else: j += 1 else: j = 0 self.j = j self.RTD1_list = self.RTD1_list[j:] self.RTD2_list = self.RTD2_list[j:] Time = Time[j:] # 时间需要年月日,默认设置为2019年1月1日 zero = datetime(2019, 1, 1) zero = mdates.date2num(zero) T0 = mdates.date2num(Time[0]) Time = mdates.date2num(Time) self.Relative_time = [t - T0 + zero for t in Time]
def test_can_write_timestamp_data_with_datetimes(): input_data = [ datetime(2017, 7, 9, 12, 35, 0), datetime(2017, 7, 9, 12, 36, 0), datetime(2017, 7, 9, 12, 37, 0) ] expected_data = np.array( ['2017-07-09T12:35:00', '2017-07-09T12:36:00', '2017-07-09T12:37:00'], dtype='datetime64') segment = ChannelObject("group", "timedata", input_data) output_file = BytesIO() with TdmsWriter(output_file) as tdms_writer: tdms_writer.write_segment([segment]) output_file.seek(0) tdms_file = TdmsFile(output_file) output_data = tdms_file["group"]["timedata"].data assert len(output_data) == 3 assert output_data[0] == expected_data[0] assert output_data[1] == expected_data[1] assert output_data[2] == expected_data[2]
def main(args): logging.basicConfig(level=logging.INFO) if os.path.isdir(args.path): files = glob.glob(os.path.join(args.path, "*.tdms")) elif os.path.exists(args.path) and os.path.splitext( args.path)[1] == ".tdms": files = [args.path] else: logger.fatal( "--path must point to either folder or tdms file: {}".format( args.path)) sys.exit(2) client = CogniteClient(api_key=args.apikey if args.apikey else os.environ. get("COGNITE_API_KEY")) for path in files: with open(path, "rb") as fp: try: tdms = TdmsFile(fp) except Exception as exc: logger.error("Fatal: failed to parse TDMS file {}: {}".format( path, exc)) continue else: process_tdms_file(client, tdms, path, args.only_static)
def getDataFrame(filename): if type(filename) == str: tdms_file = TdmsFile(filename) #,memmap_dir=cwd) tddf = tdms_file.as_dataframe() else: raise TypeError('I need a single filename') return tddf
def load_VNASeparateFieldsMeasurement(self): """ Load a VNA measurement from a TDMS file with two magnetic field channels (field_before, field_after). Apply certain standard operations and plot data. Allow to select used channels. """ fnames = QFileDialog.getOpenFileNames( caption="Open Tdms file(s)", filter=u"TDMS (*.tdms);;All files (*.*)") for fname in fnames: tdms_file = TdmsFile(fname) channel_labels = [ "Field_before", "Field_after", "Frequency", "Re(signal)", "Im(signal)" ] paths, accepted = TdmsChannelSelectDialog.get_group_channels( tdms_file=tdms_file, channel_labels=channel_labels) m = VNASeparateFieldsMeasurement(fname=fname, tdms_file=tdms_file, path_field_before=paths[0], path_field_after=paths[1], path_frequency=paths[2], path_real_signal=paths[3], path_imag_signal=paths[4]) m.add_operation(bp.derivative_divide, modulation_amp=4) m.add_operation(bp.mag) self.add_measurement(m, cmap="RdYlGn") return fnames
def load_VNAMeasurement_select_channels(self): """ Load a VNA measurement from a TDMS file. Apply certain standard operations and plot data. Allow to select used channels. """ fnames = QFileDialog.getOpenFileNames( caption="Open Tdms file(s)", filter=u"TDMS (*.tdms);;All files (*.*)") for fname in fnames: tdms_file = TdmsFile(fname) channel_labels = [ "Field channel", "Frequency channel", "Re(signal) channel", "Im(signal) channel" ] paths, accepted = TdmsChannelSelectDialog.get_group_channels( tdms_file=tdms_file, channel_labels=channel_labels) m = VNAMeasurement(fname=fname, tdms_file=tdms_file, group=paths[2][0], field_channel=paths[0][1], freq_channel=paths[1][1], signal_channel=paths[2][1], imag_channel=paths[3][1]) m.add_operation(bp.derivative_divide, modulation_amp=4) m.add_operation(bp.mag) self.add_measurement(m, cmap="RdYlGn") return fnames
def f_open_tdms_2(filename): if filename == 'Input': filename = filedialog.askopenfilename() tdms_file = TdmsFile(filename) group_names = tdms_file.groups() # print('groups') # print(group_names) channel_object = tdms_file.group_channels(group_names[0]) # print('channel') # print(channel_object) channel_name = channel_object[0].channel # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') # print(channel_name) # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') data = tdms_file.channel_data(group_names[0], channel_name) # channel_object = tdms_file('Thiele_Versuche', 'AE Signal') # group_name = file.groups() # print(group_name[0]) # channel_name = file.group_channels(group_name[0]) # print(channel_name[0]) # canal = file.object(group_name[0], 'AE Signal') # group_name = file.groups() # group_name = group_name[0] # data = file.object('Thiele_Versuche', 'AE Signal') # data = file.channel_data(group_name[0], 'AE Signal') return data
def tdmsfuncapr14(filename): # Accepts a filename and path. Opens TDMS file and extracts 4 columns of data, as per April 14 tests. # Load the file tdms_file = TdmsFile(filename) # Specify the channel to load. Format is tab, and then channel string name channel1 = tdms_file.object('Untitled', '1khz (Filtered)') channel2 = tdms_file.object('Untitled', '10khz (Filtered)') channel3 = tdms_file.object('Untitled', '40khz (Filtered)') channel4 = tdms_file.object('Untitled', '100khz (Filtered)') # time= tdms_file.object('Untitled','Time*') c1 = channel1.data c2 = channel2.data c3 = channel3.data c4 = channel4.data c1 = np.reshape(c1, (len(c1), 1)) c2 = np.reshape(c2, (len(c2), 1)) c3 = np.reshape(c3, (len(c3), 1)) c4 = np.reshape(c4, (len(c4), 1)) mean1 = np.mean(c1) mean2 = np.mean(c2) mean3 = np.mean(c3) mean4 = np.mean(c4) # print 'mean of data is',average # print 'distance is', distance,'mm' # print 'filename is',filename return mean1, mean2, mean3, mean4, c1, c2, c3, c4
def buildDirectories(sourceTDMSDirectory, tdmsFiles, taskName, mode="csv"): directoryBuildTime = time.time() # sourceTDMSDirectory is where we are collecting these files # I know these parentheses are ugly, but lemme explain. # TdmsFile only takes a string, but I want it to work on Linux or Windows. So I'm using the Path library # to concatenate these strings together in a way that works on both OS's. # The Path lib takes a "/" as an operator to concatenate filepaths. # Then, after I concatenate the paths, I turn them back into a string. firstObject = TdmsFile(str(Path(str(sourceTDMSDirectory)) / Path(str(tdmsFiles[0])))) parentDir = sourceTDMSDirectory.parent newFolder = parentDir / str(taskName) newFolder.mkdir(exist_ok=True, parents=True) # Now that we have the folder: let's go in and make a subfolder for every group we've got in the TDMS files. folderDictionary = dict() groups = firstObject.groups() for part in groups: print(f'part: {part}') partName = part.name print(f'partName: {part.name}') #TODO: find out why these files aren't getting built in the proper directories. newDir = newFolder / partName newDir.mkdir(exist_ok=True, parents=True) folderDictionary[str(part.name)] = str(newDir) #if the mode is HDF5 then we need to build the HDF5 file here and detect it later to fill with datasets # Why build here?? Because later when populating, we populate data by loading in a slice and doing all X parts. # So we build the HDF5 for a part, and load in all X layers later. if mode is "HDF5": destination = newDir / str(str(partName) + ".hdf5") print(str(destination)) #problem: these get opened in the root folder for the script, not the target folder. #print(str(partName) + ".hdf5") cwd = os.getcwd() os.chdir(newDir) hf = h5py.File(str(partName) + ".hdf5", 'w') hf.close() os.chdir(cwd) folderDictionary[str(part.name)] = str(destination) # pprint(FolderDictionary) # Boom: we've now got a folder for every part, and a mapping of group name (part name) to folder name. # Next step is to go into every TDMS's item, and write the # associated data to a CSV inside of the folder for that name, with a good name for the slice. # sourceTDMSDirectory directoryBuildTime = time.time() - directoryBuildTime print("Time to build directories was " + str(directoryBuildTime) + " seconds.\n") return folderDictionary
def import_tdmsfile_to_tempodb(file_path, series_key_base=None): # Parse the TDMS file and get a handle to the object tdmsfile = TdmsFile(file_path) # Logging options show_properties = True show_data = False show_time = False import_data = True count = 0 level = 0 root = tdmsfile.object() display('/', level) if show_properties: display_properties(root, level) for group in tdmsfile.groups(): level = 1 group_obj = tdmsfile.object(group) display("%s" % group_obj.path, level) if show_properties: display_properties(group_obj, level) for channel in tdmsfile.group_channels(group): level = 2 display("%s" % channel.path, level) if show_properties: level = 3 display("data type: %s" % channel.data_type.name, level) display_properties(channel, level) if show_data: level = 3 data = channel.data display("data: %s" % data, level) if show_time: level = 3 time = channel.time_track() display("time: %s" % time, level) if import_data: level = 3 try: if series_key_base: series_key = "%s-%i" % (series_key_base, count) count += 1 # "Paul-Python-TDMS-1" else: # series_key_base = "%s-%s-%s" % os.path.basename(os.path.splitext(file_path))[0], group_obj. series_key = channel.path import_channel_to_tempodb(channel, series_key) except KeyError as ke: display("There is no embedded time data in this channel.", level) print ke print print
def load_tdms(self): """ Looks for .tdms files in folder and loads the first one it finds """ for fname in os.listdir(self.folder): ext = os.path.splitext(fname)[1] if ext == '.tdms': self.tdms = TdmsFile(self.folder + fname) if not self.tdms: print 'Tdms file not found'
def _connect(self, path): try: self._tdms_file = TdmsFile(path) self._settings = self._tdms_file.object('Settings').as_dataframe() self._data = self._tdms_file.object('Data').as_dataframe() self._properties = self._tdms_file.object().properties except FileNotFoundError as err: print(f'File not found: `{err}`')
def _lade_zeile(y): """ :type y: int :rtype: numpy.multiarray.ndarray """ return _lade_tdms(TdmsFile(_par.konf.verzeichnis + sep + _typ + str(y) + '.tdms').object(_par.konf.gruppe, _par.konf.kanal), dim=1)
def convert_to_df(self, debug=False): '''This function convert the content of the Files into a list. Each element of the list is a pandas dataframe with two columns: x,y. Input parameters: debug: a boolean. If False no debug text is printed. If True then debug informations are printed. data_files: a list with the list of the file names. Typically this is the value returned by the function generate_file_list(). Return Value: 1. A pandas dataframe with 3 columns: 'data', 'groupName', and 'channelName'. 2. the number of channels as integer.''' df = pd.DataFrame() data_files = [x for x in os.listdir(self.path) if x.endswith(".tdms")] for filename in data_files: tdms_file = TdmsFile(self.path + '/' + filename) if (debug): print("The following Groups and Channels are available:") for group in tdms_file.groups(): print(group) for channel in tdms_file.group_channels(group): print(channel) s1 = pd.Series(tdms_file.object('Reference', 'Ramp_Output').data) # This DataFrame will contain the data and the name of group and # Channel. for group in tdms_file.groups(): if (str(group) != 'Reference'): for channel in tdms_file.group_channels(group): channelName = TDMSConverter.get_channel_name( self, channel) if (debug): print(">>>", str(group), '--', channelName) s2 = pd.Series( tdms_file.object(str(group), channelName).data) df_data = pd.concat([s1, s2], axis=1) df_data.columns = ['x', 'y'] df_tmp = pd.DataFrame({ "data": [df_data], "groupName": [str(group)], "channelName": [channelName], "filename": [self.path + filename] }) df = df.append(df_tmp) return df, df.shape[0]
def plot(self): ''' plot some random stuff ''' # random data # data = [random.random() for i in range(10)] # tdms data tdms_file = TdmsFile("t1.tdms") tdms_groups = tdms_file.groups() data_array = [] for grp in tdms_groups: for ch in reversed(tdms_file.group_channels(grp)): temp = str(ch).split('\'') # print((temp[1]), (temp[3]), ch) temp_obj = tdms_file.object(temp[1], temp[3]) data_array.append(temp_obj.data) # ax = self.figure.add_subplot() # ax.clear() # ax.plot(temp_obj.data) # # getattr(self, "self.canvas%s.draw" % str(len(data_array)))() # self.canvas1.draw() data_array = np.asarray(data_array) ax1 = self.figure.add_subplot() ax1.clear() ax1.plot(data_array[0]) self.canvas1.draw() ax2 = self.figure.add_subplot() ax2.clear() ax2.plot(data_array[1]) self.canvas2.draw() ax3 = self.figure.add_subplot() ax3.clear() ax3.plot(data_array[2]) self.canvas3.draw() ax4 = self.figure.add_subplot() ax4.clear() ax4.plot(data_array[3]) self.canvas4.draw() ax5 = self.figure.add_subplot() ax5.clear() ax5.plot(data_array[4]) self.canvas5.draw() ax6 = self.figure.add_subplot() ax6.clear() ax6.plot(data_array[5]) self.canvas6.draw() ax7 = self.figure.add_subplot() ax7.clear() ax7.plot(data_array[6]) self.canvas7.draw()
def main(): # this script is used for testing and give feedback to training set # =========Step 1 read in data============ # load in data, take the TDMS data type as example tdms_file = TdmsFile( "/media/sherry/新加卷/ubuntu/WZL-2018/Feintool Daten/FW-1-1/new material/AKF_SS-FW2-H04521-H05000.tdms" ) # tdms_file.groups() df_all = tdms_file.object('Untitled').as_dataframe() df_force = df_all['Stempel_1 (Formula Result)'] df_stroke = df_all['Position_Ma'] # sample time series data df_f = df_force[80800:99000].reset_index(drop=True) df_s = df_stroke[80800:99000].reset_index(drop=True) # the training data read in segmentations = read_from_file() # =========step 2: extract the hub =========== # Extract all punches of the dataset SEH = npF.SegHub() df_punches_t = SEH.extract_hub(df_f, df_s) df_punches_t = df_punches_t.reset_index(drop=True) # =========Step 3: segmentation into trends========= punch_seg = SEH.segment_and_plot(df_punches_t[0].dropna(), 'l2') sub_punch_seg = SEH.segment_and_plot(punch_seg[4].dropna(), 'rbf', 0, 4) punch_seg[4] = sub_punch_seg[0] punch_seg[5] = sub_punch_seg[1] punch_seg[6] = sub_punch_seg[2] punch_seg[7] = sub_punch_seg[3] # =========Step 4: classification========= for i in range(0, 8): print("Trend:" + str(i + 1)) s = SEH.Uniformation(punch_seg[i]) clusters = pd.read_csv("cluster_" + str(i) + ".csv") data_train = SEH.Uniformation(segmentations[i]) row, col = data_train.shape col = min(len(s), col) print("Result:.........") s = s[0:col] test = pd.DataFrame([s, s]) data_train = data_train.iloc[:, 0:col] # generate new clusters and save into the file # you cannot direct use xxx = yyyy for tables new_dataset = data_train.copy() new_dataset.loc[row] = s.values z = hac.linkage(new_dataset, 'ward') result = SEH.print_clusters(data_train, z, 3, plot=False) pd.DataFrame(result).to_csv("cluster_" + str(i) + ".csv", index=False) SEH.classifier(data_train, clusters, test, 3) #==========Step 5: save the newly added file========== save_newdata(punch_seg)
def tdmsfuncjun(filename): # Accepts files from June 2017, which now have motion tracker data incorporated # Goal: Create a matrix containing labels and x values [time,x,y,a,b,c,d] # Load the file tdms_file = TdmsFile(filename) # Load the file # Load the individual data channels c1 = tdms_file.object('Untitled', '1khz (Filtered)').data c2 = tdms_file.object('Untitled', '10khz (Filtered)').data c3 = tdms_file.object('Untitled', '40khz (Filtered)').data c4 = tdms_file.object('Untitled', '100khz (Filtered)').data # Load time data and coord data. x = tdms_file.object('Untitled', 'Resampled').data y = tdms_file.object('Untitled', 'Resampled 1').data t = tdms_file.object('Untitled', '1khz (Filtered)').time_track() # Reshape data c1 = np.reshape(c1, (len(c1), 1)) c2 = np.reshape(c2, (len(c2), 1)) c3 = np.reshape(c3, (len(c3), 1)) c4 = np.reshape(c4, (len(c4), 1)) x = np.reshape(x, (len(x), 1)) y = np.reshape(y, (len(y), 1)) t = np.reshape(t, (len(t), 1)) print 'length compare', len(x), len(c1) # Fix jagged edge issue, where size of x,y column often shorter than the photodiode data if len(x) < len(c1) or len(y) < len(c1): # The fix jagged edge issuesue. Sometimes even x,y column are different length. print 'jagged edge found' jagged = min(len(x), len(y)) c1 = c1[:jagged] c2 = c2[:jagged] c3 = c3[:jagged] c4 = c4[:jagged] t = t[:jagged] x = x[:jagged] y = y[:jagged] # print len(x),len(y),len(t),len(c1),len(c2),len(c3),len(c4) # Desired data formatting. # Round coordinate values, since the re-sampling is probably introducing more error # x = np.round(x,decimals=0) # y = np.round(y,decimals=0) # Format as a new array f = np.concatenate((t, x, y, c1, c2, c3, c4), axis=1) # Return result return f
def loadTDMSImages(self, file): tdms_file = TdmsFile(file) p = tdms_file.object().properties self.dimx = int(p['dimx']) self.dimy = int(p['dimy']) self.binning = int(p['binning']) self.frames = int(p['dimz']) self.exposure = float(p['exposure'].replace(',', '.')) images = tdms_file.channel_data('Image', 'Image') return images.reshape(self.frames, self.dimx, self.dimy)
def __init__(self, fname): tdms_file = TdmsFile(fname)["main"] t0 = tdms_file["t0"][:] t0 -= t0[0] t0 /= 1e6 self.tdms_file = tdms_file self.t0 = t0 self.Nrecords = len(self.t0) self.colletion_name = "Collection channel is not set!" self.collection = []
def read_tdms(file_name): tdms_file = TdmsFile(file_name) readings = {} for i in range(1, 4): readings['V{}'.format(i)] = tdms_file.object('data', 'U{}'.format(i)).data readings['I{}'.format(i)] = tdms_file.object('data', 'I{}'.format(i)).data return readings
def read2DFMR2magFields(filepattern, path_field_after, path_field_before, path_frequency, path_real_signal, path_imag_signal, field_points=None, freq_points=None, tdms_file=None): """ Read a VNA- or Lock-In measurement with a field-before and a field-after channel. As an input this routine requires a field_before- and a field_after-channel and the groups respectively. For the frequency-channel and the signal-channels it assumes that these are in the same group. Parameters ---------- filepattern : string filename (relative) of tdms file. may include wilcards (see glob) path_* : tuple of strings path i.e. [group, channel] to the corresponding data nFieldPoints : integer, optional number of expected points in x direction, gets calculated if not specified nFrequencyPoints : integer, optional number of expected points in y direction, gets calculated if not specified Returns ------- x : dim. (nXPoints x 1) numpy array averaged field values from field_before and field_after y : dim. (nYPoints x 1) numpy array frequency values complexSignal : (nXPoints x nYPoints) complex numpy array signal as matrix tdms_file : (TdmsFile) loaded tdms file object Usage example (matplotlib): -------------- >>> x, y, aSignal, _ = read2DFMR2magFields(fname, signal_group, group_field_before, group_field_after, field_before_channel, field_after_channel, freq_channel, signal_channel, imag_channel, tdms_file=tdms_file) """ if tdms_file is None: files = glob(filepattern) if len(files) > 1: l.warn("Provided file pattern %s did return multiple results."%filepattern) if len(files) < 1: l.warn("Provided file pattern %s did not match any files."%filepattern) l.info("Reading file %s"%files[0]) tdms_file = TdmsFile(files[0]) x = average_channels(tdms_file, path_field_before, path_field_after, True) y = tdms_file.channel_data(*path_frequency) signal = tdms_file.channel_data(*path_real_signal) + 1j*tdms_file.channel_data(*path_imag_signal) return list(reshape(x, y, signal, field_points, freq_points)) + [tdms_file]
def load_data(path): # Handles nested dictionaries for us CSV_PATH, PLOT_PATH, RAW_CSV_DATA = output_directories(path) sample_dict = vivdict() directory_listing = [ os.path.join(path, directory) for directory in os.listdir(path) ] directory_listing = list( filter( lambda x: not re.findall(r"(CSV DATA)|(PLOT DATA)|(RAW CSV DATA)", x), directory_listing, ) ) for directory in directory_listing: """ Example: directory == '2020.07.30_14.23.02_2E2M_Control 3' sample == 2E2M sample_type == Control sample_rep == 3 """ log.info(f"Loading data for -> {directory}") sample = re.split(r"[_\-]", directory)[-2] sample_type = " ".join(re.split(r"[_\-]", directory)[-1].split(" ")[0:-1]) sample_rep = " ".join(re.split(r"[_\-]", directory)[-1].split(" ")[-1]) # Get rid of Index file file = list( filter(lambda x: not re.findall(r"index", x), glob.glob(f"{directory}\*")) ) if len(file) == 1: file = file[0] try: tdms_file = TdmsFile(file) df = tdms_file.as_dataframe() df = df[ list(filter(lambda x: re.findall(r"Timestampe|CO2", x), df.columns)) ] df.columns = ["CO2", "Time"] df["Time"] = df["Time"].sub(df["Time"].min()).sub(40) df.set_index("Time", inplace=True) initial_condition = df[:-30].mean() df = df.sub(initial_condition) sample_dict[sample][sample_type][sample_rep] = df df.to_csv( os.path.join(RAW_CSV_DATA, f"{sample}_{sample_type}_{sample_rep}.csv"), sep=",", index=True, ) except [FileNotFoundError, OSError] as e: log(f"{e}\t File: {file} \t Directory: {directory}") else: log(f"Have some files here I don't like {directory} // \n{file} \n-- Skipping this") return sample_dict
def __init__(self, filename): self.filename = filename self.ctime = os.path.getctime(self.filename) self.mtime = os.path.getmtime(self.filename) self.file = TdmsFile(filename) self.readfile() try: self.readfile() except: print('error') self.set_epoch_time()
def getData(filePath): print("Start getData") SPM = [] tdms_file = TdmsFile(filePath) #tdms_file = TdmsFile("Analogslow.tdms") #tdms_file = TdmsFile("fourChannelSineWave.tdms") for group in tdms_file.groups(): for channel in tdms_file.group_channels(group): SPM.append(channel.data) #print(len(channel.data))'' return SPM
def tdms_info(tdmsName): tdms_file = TdmsFile(tdmsName) # グループ名をすべて取得 groupName = tdms_file.groups() # チャンネル名をすべて取得 channelName = [] for g in groupName: channelName.append(tdms_file.group_channels(g)) return groupName, channelName
def velocity_calc(PDname, method='max'): PDfile = TdmsFile(PDname) PDdata = PDfile.as_dataframe(time_index=True, absolute_time=False) #gets data for each photodiode PD1 = PDdata[PDdata.columns[0::4]] PD2 = PDdata[PDdata.columns[1::4]] PD3 = PDdata[PDdata.columns[2::4]] PD4 = PDdata[PDdata.columns[3::4]] del PDdata # Choose the method for the determination of the velocity if method == 'diff': D1 = PD1.diff() D2 = PD2.diff() D3 = PD3.diff() D4 = PD4.diff() elif method == 'max': D1 = PD1 D2 = PD2 D3 = PD3 D4 = PD4 else: sys.exit('The method you have chosen for the velicty calculation is' + ' not reconized. Please select a different method and retry.') #finds the time point at which D# is at a max del PD1, PD2, PD3, PD4 t1 = D1.idxmax() t2 = D2.idxmax() t3 = D3.idxmax() t4 = D4.idxmax() del D1, D2, D3, D4 #lengths between photodiodes L1 = 0.127762 L2 = 0.129337 L3 = 0.130175 #takes the difference in time values to get values for each velocity T1 = pd.Series(t2.values - t1.values) T2 = pd.Series(t3.values - t2.values) T3 = pd.Series(t4.values - t3.values) V1 = L1/T1.values V2 = L2/T2.values V3 = L3/T3.values # measurement error calculation R1 = np.sqrt((-.5*(L1/T1.values**2)*1e-6)**2+(1/T1.values*0.003175)**2) R2 = np.sqrt((-.5*(L2/T2.values**2)*1e-6)**2+(1/T2.values*0.003175)**2) R3 = np.sqrt((-.5*(L3/T3.values**2)*1e-6)**2+(1/T3.values*0.003175)**2) vel_data = pd.DataFrame(np.transpose( np.vstack((V1, V2, V3, R1, R2, R3)))) vel_data.columns = ['V1', 'V2', 'V3', 'R1', 'R2', 'R3'] return vel_data
def copy_tdms(self, filePath, targetPath): original_file = TdmsFile(filePath) original_groups = original_file.groups() original_channels = [ chan for group in original_groups for chan in group.channels() ] with TdmsWriter(targetPath) as copied_file: root_object = RootObject(original_file.properties) channels_to_copy = [chan for chan in original_channels] copied_file.write_segment([root_object] + original_groups + channels_to_copy)