def convert_to_df(self, debug=False): '''This function convert the content of the Files into a list. Each element of the list is a pandas dataframe with two columns: x,y. Input parameters: debug: a boolean. If False no debug text is printed. If True then debug informations are printed. data_files: a list with the list of the file names. Typically this is the value returned by the function generate_file_list(). Return Value: 1. A pandas dataframe with 3 columns: 'data', 'groupName', and 'channelName'. 2. the number of channels as integer.''' df = pd.DataFrame() data_files = [x for x in os.listdir(self.path) if x.endswith(".tdms")] for filename in data_files: tdms_file = TdmsFile(self.path + '/' + filename) if (debug): print("The following Groups and Channels are available:") for group in tdms_file.groups(): print(group) for channel in tdms_file.group_channels(group): print(channel) s1 = pd.Series(tdms_file.object('Reference', 'Ramp_Output').data) # This DataFrame will contain the data and the name of group and # Channel. for group in tdms_file.groups(): if (str(group) != 'Reference'): for channel in tdms_file.group_channels(group): channelName = TDMSConverter.get_channel_name( self, channel) if (debug): print(">>>", str(group), '--', channelName) s2 = pd.Series( tdms_file.object(str(group), channelName).data) df_data = pd.concat([s1, s2], axis=1) df_data.columns = ['x', 'y'] df_tmp = pd.DataFrame({ "data": [df_data], "groupName": [str(group)], "channelName": [channelName], "filename": [self.path + filename] }) df = df.append(df_tmp) return df, df.shape[0]
def get_maximum_array_size(tdms_operator: nptdms.TdmsFile) -> int: """Returns the maximal array length saved in the TDMS file. Arguments: tdms_operator: Operator of the tdms file Returns: Maximal array length. """ if len(tdms_operator.groups()) == 0: return 0 return max( max(len(channel) for channel in group.channels()) for group in tdms_operator.groups() if len(group.channels()) > 0 )
def ocop2df(filepath, ): file = TF(filepath) #find the group name that the normalized data is in normdata_groupname = None normdata_regex = "(.+_Norm)" for group in file.groups(): m = re.search(normdata_regex, group) if m != None: normdata_groupname = m.groups()[0] break if (normdata_groupname == None): print('could not find Norm group in ' + filepath) return pd.DataFrame() df = file.object(normdata_groupname).as_dataframe() df.index = file.object('Global', "Wavelength").data indexarr = list( zip(*[ file.object('Global', 'MP Pos').data, file.object('Global', 'Time').data ])) df.columns = pd.MultiIndex.from_tuples(indexarr, names=['MP', 'Wavelength']) return df # filepath = "C:\\Labview Test Data\\2018-11-20\\UnspecifiedProj\\Run3\\Log_NIRQuest512_0_Case5_seed_0.tdms" # # df = ocop2df(filepath) # file = TF(filepath)
def read_names(path): """ Read time series names from Technical Data Management Streaming (TDMS) file. Parameters ---------- path: str Path (relative or absolute) to tdms-file Returns ------- list List of time series names (datasets) References ---------- 1. http://www.ni.com/product-documentation/3727/en/ """ if not os.path.isfile(path): raise FileNotFoundError("file not found: %s" % path) f = TdmsFile(path) names = [ f"{g.name}\\{c.name}" for g in f.groups() for c in f[g.name].channels() if c.name.lower() != 'time' ] return names
def cut_log_spectra(fileinpaths, times, fileoutpaths_list, **kwargs): for i, fileinpath in enumerate(fileinpaths): fileoutpaths = fileoutpaths_list[i] tdmsfile = TF(fileinpath) for j, t in enumerate(times): fileoutpath = fileoutpaths[j] direc = os.path.split(fileoutpath)[0] if not os.path.exists(direc): os.makedirs(direc) root_object = RootObject(properties={}) try: with TdmsWriter(fileoutpath, mode='w') as tdmswriter: for group in tdmsfile.groups().remove('Global'): idx1, idx2 = _get_indextime(timedate, t[0], t[1]) for channel in file.group_channels(group)[idx1, idx2]: tdms_writer.write_segment([root_object, channel]) for channel in tdmsfile.group_channels('Global'): if channel.channel == 'Wavelength': channel_object = channel else: channel_object = _cut_channel(channel, time[0], time[1], timedata=None) tdms_writer.write_segment( [root_object, channel_object]) except ValueError as error: print(error) print('removing the file at: \n', fileoutpath) os.remove(fileoutpath)
def f_open_tdms_2(filename): if filename == 'Input': filename = filedialog.askopenfilename() tdms_file = TdmsFile(filename) group_names = tdms_file.groups() # print('groups') # print(group_names) channel_object = tdms_file.group_channels(group_names[0]) # print('channel') # print(channel_object) channel_name = channel_object[0].channel # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') # print(channel_name) # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') data = tdms_file.channel_data(group_names[0], channel_name) # channel_object = tdms_file('Thiele_Versuche', 'AE Signal') # group_name = file.groups() # print(group_name[0]) # channel_name = file.group_channels(group_name[0]) # print(channel_name[0]) # canal = file.object(group_name[0], 'AE Signal') # group_name = file.groups() # group_name = group_name[0] # data = file.object('Thiele_Versuche', 'AE Signal') # data = file.channel_data(group_name[0], 'AE Signal') return data
def readTDMS(path, acqNum, channelName='PXI1Slot7/ai0', tdms_file=None): dataOut = {} if tdms_file == None: #Load the file tdms_file = TdmsFile(path) #Get the number of groups (or entries) groups = tdms_file.groups() GroupName = str(acqNum) #Extract the data channel = tdms_file.object(GroupName, channelName) #Get the data data = channel.data data = data.astype(np.float) #Get the meta data timeStep = channel.property('wf_increment') numSamples = channel.property('wf_samples') #Store data #dataOut['Acq'+str(acqNum)]=data #dataOut['Time']=np.arange(0,numSamples*timeStep,timeStep) #Convert to dataframe, cause it IS better newData = data return newData
def import_tdmsfile_to_tempodb(file_path, series_key_base=None): # Parse the TDMS file and get a handle to the object tdmsfile = TdmsFile(file_path) # Logging options show_properties = True show_data = False show_time = False import_data = True count = 0 level = 0 root = tdmsfile.object() display('/', level) if show_properties: display_properties(root, level) for group in tdmsfile.groups(): level = 1 group_obj = tdmsfile.object(group) display("%s" % group_obj.path, level) if show_properties: display_properties(group_obj, level) for channel in tdmsfile.group_channels(group): level = 2 display("%s" % channel.path, level) if show_properties: level = 3 display("data type: %s" % channel.data_type.name, level) display_properties(channel, level) if show_data: level = 3 data = channel.data display("data: %s" % data, level) if show_time: level = 3 time = channel.time_track() display("time: %s" % time, level) if import_data: level = 3 try: if series_key_base: series_key = "%s-%i" % (series_key_base, count) count += 1 # "Paul-Python-TDMS-1" else: # series_key_base = "%s-%s-%s" % os.path.basename(os.path.splitext(file_path))[0], group_obj. series_key = channel.path import_channel_to_tempodb(channel, series_key) except KeyError as ke: display("There is no embedded time data in this channel.", level) print ke print print
def buildDirectories(sourceTDMSDirectory, tdmsFiles, taskName, mode="csv"): directoryBuildTime = time.time() # sourceTDMSDirectory is where we are collecting these files # I know these parentheses are ugly, but lemme explain. # TdmsFile only takes a string, but I want it to work on Linux or Windows. So I'm using the Path library # to concatenate these strings together in a way that works on both OS's. # The Path lib takes a "/" as an operator to concatenate filepaths. # Then, after I concatenate the paths, I turn them back into a string. firstObject = TdmsFile(str(Path(str(sourceTDMSDirectory)) / Path(str(tdmsFiles[0])))) parentDir = sourceTDMSDirectory.parent newFolder = parentDir / str(taskName) newFolder.mkdir(exist_ok=True, parents=True) # Now that we have the folder: let's go in and make a subfolder for every group we've got in the TDMS files. folderDictionary = dict() groups = firstObject.groups() for part in groups: print(f'part: {part}') partName = part.name print(f'partName: {part.name}') #TODO: find out why these files aren't getting built in the proper directories. newDir = newFolder / partName newDir.mkdir(exist_ok=True, parents=True) folderDictionary[str(part.name)] = str(newDir) #if the mode is HDF5 then we need to build the HDF5 file here and detect it later to fill with datasets # Why build here?? Because later when populating, we populate data by loading in a slice and doing all X parts. # So we build the HDF5 for a part, and load in all X layers later. if mode is "HDF5": destination = newDir / str(str(partName) + ".hdf5") print(str(destination)) #problem: these get opened in the root folder for the script, not the target folder. #print(str(partName) + ".hdf5") cwd = os.getcwd() os.chdir(newDir) hf = h5py.File(str(partName) + ".hdf5", 'w') hf.close() os.chdir(cwd) folderDictionary[str(part.name)] = str(destination) # pprint(FolderDictionary) # Boom: we've now got a folder for every part, and a mapping of group name (part name) to folder name. # Next step is to go into every TDMS's item, and write the # associated data to a CSV inside of the folder for that name, with a good name for the slice. # sourceTDMSDirectory directoryBuildTime = time.time() - directoryBuildTime print("Time to build directories was " + str(directoryBuildTime) + " seconds.\n") return folderDictionary
def read_tdms(fn): tdms_file = TdmsFile(fn) try: times = np.array( [[channel.data for channel in tdms_file.group_channels(group) if channel.data is not None] for group in tdms_file.groups() if group.lower() == "time"][0][0]) dt = np.mean(np.diff(times)) except IndexError: if not "Sampling Rate" in tdms_file.object().properties.keys(): if not "Sampling Rate(AI)" in tdms_file.object().properties.keys(): dt = 1.0 else: sr = float(tdms_file.object().properties['Sampling Rate(AI)']) if sr > 0: dt = 1e3/sr else: dt = 1.0/25.0 else: sr = float(tdms_file.object().properties['Sampling Rate']) if sr > 0: dt = 1e3/sr else: dt = 1.0/25.0 yunits = tdms_file.object().properties['Units'] try: meta = tdms_file.group_channels('Meta') except: meta = '' recording = {group: [ channel.data for channel in tdms_file.group_channels(group) if channel.data is not None] for group in tdms_file.groups()} recording["dt"] = dt recording["yunits"] = yunits recording["holding"] = meta return recording
def plot(self): ''' plot some random stuff ''' # random data # data = [random.random() for i in range(10)] # tdms data tdms_file = TdmsFile("t1.tdms") tdms_groups = tdms_file.groups() data_array = [] for grp in tdms_groups: for ch in reversed(tdms_file.group_channels(grp)): temp = str(ch).split('\'') # print((temp[1]), (temp[3]), ch) temp_obj = tdms_file.object(temp[1], temp[3]) data_array.append(temp_obj.data) # ax = self.figure.add_subplot() # ax.clear() # ax.plot(temp_obj.data) # # getattr(self, "self.canvas%s.draw" % str(len(data_array)))() # self.canvas1.draw() data_array = np.asarray(data_array) ax1 = self.figure.add_subplot() ax1.clear() ax1.plot(data_array[0]) self.canvas1.draw() ax2 = self.figure.add_subplot() ax2.clear() ax2.plot(data_array[1]) self.canvas2.draw() ax3 = self.figure.add_subplot() ax3.clear() ax3.plot(data_array[2]) self.canvas3.draw() ax4 = self.figure.add_subplot() ax4.clear() ax4.plot(data_array[3]) self.canvas4.draw() ax5 = self.figure.add_subplot() ax5.clear() ax5.plot(data_array[4]) self.canvas5.draw() ax6 = self.figure.add_subplot() ax6.clear() ax6.plot(data_array[5]) self.canvas6.draw() ax7 = self.figure.add_subplot() ax7.clear() ax7.plot(data_array[6]) self.canvas7.draw()
def getData(filePath): print("Start getData") SPM = [] tdms_file = TdmsFile(filePath) #tdms_file = TdmsFile("Analogslow.tdms") #tdms_file = TdmsFile("fourChannelSineWave.tdms") for group in tdms_file.groups(): for channel in tdms_file.group_channels(group): SPM.append(channel.data) #print(len(channel.data))'' return SPM
def copy_tdms(self, filePath, targetPath): original_file = TdmsFile(filePath) original_groups = original_file.groups() original_channels = [ chan for group in original_groups for chan in group.channels() ] with TdmsWriter(targetPath) as copied_file: root_object = RootObject(original_file.properties) channels_to_copy = [chan for chan in original_channels] copied_file.write_segment([root_object] + original_groups + channels_to_copy)
def check_for_same_length(tdms_operator: nptdms.TdmsFile) -> either.Either: """Checks whether all relevant channels of the Tdms file have the same length. """ array_lengths = [[ len(channel) for channel in group.channels() if len(channel) > 0 ] for group in tdms_operator.groups()] array_lengths = np.array(array_lengths).flatten() all_lengths_equal = len(set(array_lengths)) == 1 if not all_lengths_equal: return either.Left(ErrorCode.LENGTHERROR) return either.Right(tdms_operator)
def tdms_info(tdmsName): tdms_file = TdmsFile(tdmsName) # グループ名をすべて取得 groupName = tdms_file.groups() # チャンネル名をすべて取得 channelName = [] for g in groupName: channelName.append(tdms_file.group_channels(g)) return groupName, channelName
def ocop2df(filepath, ): file = TF(filepath) df = file.object(file.groups()[2]).as_dataframe() df.index = file.object('Global', "Wavelength").data indexarr = list( zip(*[ file.object('Global', 'MP Pos').data, file.object('Global', 'Time').data ])) df.columns = pd.MultiIndex.from_tuples(indexarr, names=['MP', 'Wavelength']) return df
def cut_log_spectra(fileinpaths, times, fileoutpaths_list, **kwargs): for i, fileinpath in enumerate(fileinpaths): fileoutpaths = fileoutpaths_list[i] tdmsfile = TF(fileinpath) for j, t in enumerate(times): fileoutpath = fileoutpaths[j] direc = os.path.split(fileoutpath)[0] if not os.path.exists(direc): os.makedirs(direc) root_object = RootObject(properties={}) try: with TdmsWriter(fileoutpath, mode='w') as tdms_writer: timedata = [ dt64(y) for y in tdmsfile.channel_data('Global', 'Time') ] idx1, idx2 = _get_indextime(timedata, t[0], t[1]) if idx1 == idx2: pass else: for group in tdmsfile.groups(): group_object = GroupObject(group, properties={}) if group == "Global": for channel in tdmsfile.group_channels(group): if channel.channel == 'Wavelength': channel_object = ChannelObject( channel.group, channel.channel, channel.data) else: channel_object = ChannelObject( channel.group, channel.channel, channel.data[idx1:idx2]) tdms_writer.write_segment([ root_object, group_object, channel_object ]) else: for channel_object in tdmsfile.group_channels( group)[idx1:idx2]: tdms_writer.write_segment([ root_object, group_object, channel_object ]) except ValueError as error: print(error) print('removing the file at: \n', fileoutpath) os.remove(fileoutpath)
def _parseFile(self): if self.filename.lower().endswith('.tdms'): tdms = TdmsFile(self.filename) self.time = {} self.data = {} self.groups = tdms.groups() self.channels = {} for g in self.groups: self.time[g] = {} self.data[g] = {} self.channels[g] = tdms.group_channels(g) for c in self.channels[g]: if c.has_data: props = c.properties self.time[g][props["NI_ChannelName"]] = c.time_track() self.data[g][props["NI_ChannelName"]] = c.data elif self.filename.lower().endswith('.txt'): fid = open(self.filename, "r") if "<Mach-1 File>" in fid.readline(): contents = fid.readlines() fid.close() self.time = OrderedDict() self.data = OrderedDict() self.channels = OrderedDict() info_blocks = [ i for i, j in izip(count(), contents) if "<INFO>" in j or "<END INFO>" in j ] info_blocks = izip(islice(info_blocks, 0, None, 2), islice(info_blocks, 1, None, 2)) data_blocks = [ i for i, j in izip(count(), contents) if "<DATA>" in j or "<END DATA>" in j ] data_blocks = izip(islice(data_blocks, 0, None, 2), islice(data_blocks, 1, None, 2)) self.groups = range(1, len(list(info_blocks)) + 1) for i, ind in enumerate(data_blocks): g = self.groups[i] header = contents[ind[0] + 1].rstrip("\r\n").split("\t") self.channels[g] = header data = contents[ind[0] + 2:ind[1]] for j, d in enumerate(data): data[j] = d.rstrip("\r\n").split("\t") data = np.array(data, float) self.time[g] = OrderedDict() self.data[g] = OrderedDict() for j, c in enumerate(self.channels[g][1:]): self.time[g][c] = data[:, 0] self.data[g][c] = data[:, j + 1]
def read_tdms(fn): tdms_file = TdmsFile(fn) t1 = tdms_file.groups() t2 = tdms_file.group_channels(t1[0]) t2_0 = t2[0].channel t2_1 = t2[1].channel channel1 = tdms_file.object(t1[0], t2_0) channel2 = tdms_file.object(t1[0], t2_1) sam = channel1.property('wf_samples') current = channel1.data voltage = channel2.data time = channel1.time_track() data = np.array((time, current * 500, voltage)) return data, sam
def tdms_to_nparr(tdms_name, time_req=False): tdms_file = TdmsFile(tdms_name) group_name = tdms_file.groups() channel_name = tdms_file.group_channels(group_name[0]) channel = tdms_file.object(group_name[0], channel_name[0].channel) data = channel.data.astype(np.float32) if time_req == True: time = channel.time_track() return data, time else: return data
def tdms_extract(entry): try: if(entry.endswith('.tdms')): tdms_file = TdmsFile(entry) for group in tdms_file.groups(): data = tdms_file.object(group).as_dataframe() if group == 'Events': data["TimeStamp"] = data["TimeStamp"].astype(float).values - float(data["TimeStamp"].values[0]) if group == 'Channels': data["TimeStamp (sec)"] = data["TimeStamp (sec)"].values - data["TimeStamp (sec)"].values[0] if not os.path.exists('C:/Niagara/UEF/' + group): os.makedirs('C:/Niagara/UEF/' + group) name = entry.split('\\')[-1] data.to_csv('C:/Niagara/UEF/' +group + "/" + name[:-5] +".csv") except Exception as e: pass
def f_open_tdms_old(filename, channel): if filename == 'Input': filename = filedialog.askopenfilename() file = TdmsFile(filename) group_name = file.groups() group_name = group_name[0] # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') # print(group_name) # print(file) # print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') data = file.channel_data(group_name, channel) return data
def convert_tdms(fileName, tempo, env): if tempo: time.sleep(20) path = env.path tdms_file = TdmsFile(os.path.join(path, fileName + '.tdms')) # tdms_file=TdmsFile(r'D:\DATA\00838_Data.tdms') hdf5 = h5py.File(envG.H5path + fileName + '.h5', 'w') #channel=tdms_file.object('PXI M6251','Lang_U') #group=tdms_file.object('PXI M6251') grouplist = tdms_file.groups() #print grouplist for i in grouplist: group = tdms_file.object(i) grouph = hdf5.create_group(i) print group.path if group.path == '/\'PXI M6251\'': nbchannels = group.properties['Nchannel'] tstart = group.properties['Tstart'] sampling = group.properties['SampleTime'] if group.path == '/\'Tektronix\'': tstart = group.properties['Tstart'] #sampling=group.properties['SampleTime'] sampling = 1 / 1.25e9 nbchannels = group.properties['Nchannel'] if group.path == '/\'S7\'': nbchannels = group.properties['Nchannel'] tstart = 0. sampling = 1. #print nbchannels,tstart,sampling grouph.attrs['Nchannel'] = nbchannels grouph.attrs['Tstart'] = tstart grouph.attrs['sampling'] = 1 / float(sampling) liste = tdms_file.group_channels(i) for j in liste: grouph.create_dataset(re.sub('[\']', '', j.path), data=j.data, compression="gzip") # conn=sqlite3.connect('ishtar') # curs=conn.cursor() # curs.execute('insert into shots values(?,?,?,?,?)',(int(fileName[0:-5]),fileName,0.,0.,0.)) # conn.commit() hdf5.create_group('Process') hdf5.close() return 1
def tdms_to_json(directory, target_directory, name): ''' Parses the TDMS file name located in the directory to json file named like the tdms file ''' print("Parsing: " + directory + name + ".tdms") # load in data, take the TDMS data type as example tdms_file = TdmsFile(directory + name + ".tdms") groups = tdms_file.groups() df_list = [] #df_test = pd.DataFrame() df_list.append(pd.DataFrame()) for group in groups: print(group) for channel in tdms_file.group_channels(group): try: chan_name = str(channel).split("'/'")[1].replace("'>", "") data = tdms_file.channel_data(group, chan_name) chan_name = chan_name.replace("(", " ").replace( ")", " ").replace(" ", " ").replace(" ", "_") df_test = pd.DataFrame() #print(chan_name) # print(data) #time.sleep(0.5) df_test[chan_name] = chan_name #data df_list[-1][chan_name] = chan_name #data #df_test.to_json(target_directory + chan_name + ".json", orient='split') # print(chan_name) # print(data) #df_test.to_json(target_directory + chan_name + ".json", orient='split') #df_test.to_json(target_directory + chan_name + '_' + name + ".json", orient='split') with open( target_directory + chan_name + '_' + name + ".json", 'w') as fp: json.dump({chan_name: data.tolist()}, fp) except: print("An Error Occured at: X !") continue
def readTDMS(path, Source=None): from nptdms import TdmsFile from datetime import datetime from os import sep import pytz try: tdms_file = TdmsFile(path) except: logger.warning("Failed to open {}".format(path)) Signals = [] return Signals if Source == None: Source = getSource(tdms_file, path) Groups = tdms_file.groups() Signals = [] for i in range(0, len(Groups)): Channel_lst = tdms_file.group_channels(Groups[i]) for j in range(0, len(Channel_lst)): Name = str(Channel_lst[j]).split("/")[2][1:-2] if "unit_string" in Channel_lst[j].properties: Unit = Channel_lst[j].properties["unit_string"] else: Unit = "" Data = tdms_file.object(Groups[i], Name) Fs = 1 / Channel_lst[j].properties["wf_increment"] timestampstr = path[-20:-5] # timestamp is start of measurement timestamp = datetime.strptime(timestampstr, "%Y%m%d_%H%M%S") timestamp = pytz.utc.localize(timestamp) Signals.append( Signal(Source, Groups[i], Fs, Data.data, Name, Unit, timestamp) ) # %% Quality check of Signals signalLengths = [] for signal in Signals: length = signal.data.shape[0] if length == 0: Signals.remove(signal) else: signalLengths.append(length) return Signals
def convert_tdms(fileName,tempo,env): if tempo: time.sleep(20) path=env.path tdms_file=TdmsFile(os.path.join(path,fileName+'.tdms')) # tdms_file=TdmsFile(r'D:\DATA\00838_Data.tdms') hdf5=h5py.File(path+os.sep+fileName+'.h5','w') #channel=tdms_file.object('PXI M6251','Lang_U') #group=tdms_file.object('PXI M6251') grouplist=tdms_file.groups() #print grouplist for i in grouplist: group=tdms_file.object(i) grouph=hdf5.create_group(i) print group.path if group.path=='/\'PXI M6251\'': nbchannels=group.properties['Nchannel'] tstart=group.properties['Tstart'] sampling=group.properties['SampleTime'] if group.path=='/\'Tektronix\'': tstart=group.properties['Tstart'] #sampling=group.properties['SampleTime'] sampling=1/1.25e9 nbchannels=group.properties['Nchannel'] if group.path=='/\'S7\'': nbchannels=group.properties['Nchannel'] tstart=0. sampling=1. #print nbchannels,tstart,sampling grouph.attrs['Nchannel']=nbchannels grouph.attrs['Tstart']=tstart grouph.attrs['sampling']=1/float(sampling) liste=tdms_file.group_channels(i) for j in liste: grouph.create_dataset(re.sub('[\']','',j.path),data=j.data,compression="gzip") # conn=sqlite3.connect('ishtar') # curs=conn.cursor() # curs.execute('insert into shots values(?,?,?,?,?)',(int(fileName[0:-5]),fileName,0.,0.,0.)) # conn.commit() hdf5.create_group('Process') hdf5.close() env.process.addFile(fileName)
def cut_powermeter(fileinpaths, times, fileoutpaths_list, **kwargs): """Cut up a power meter tdms file based on input times.""" localtz = tzlocal.get_localzone() for i in range(len(fileinpaths)): fileinpath = fileinpaths[i] fileoutpaths = fileoutpaths_list[i] tdmsfile = TF(fileinpath) for j in range(len(times)): time1 = times[j][0].astype('O') time1 = time1.replace(tzinfo=pytz.utc) #convert to datetime time2 = times[j][1].astype('O') time2 = time2.replace(tzinfo=pytz.utc) fileoutpath = fileoutpaths[j] direc = os.path.split(fileoutpath)[0] if not os.path.exists(direc): os.makedirs(direc) root_object = RootObject(properties={ #TODO root properties }) try: with TdmsWriter(fileoutpath, mode='w') as tdms_writer: for group in tdmsfile.groups(): timedata = tdmsfile.channel_data(group, 'Time_LV') for channel in tdmsfile.group_channels(group): if type(channel.data_type.size) == type(None): break #skips over non numeric channels channel_object = _cut_channel(channel, time1, time2, timedata=timedata) tdms_writer.write_segment( [root_object, channel_object]) timechannel = tdmsfile.object(group, 'Time_LV') timechannel_cut = _cut_datetime_channel( timechannel, time1, time2) tdms_writer.write_segment( [root_object, timechannel_cut]) except ValueError as error: print(error) print('removing the file at: \n', fileoutpath) os.remove(fileoutpath)
def _parseFile(self): if self.filename.lower().endswith('.tdms'): tdms = TdmsFile(self.filename) self.time = {} self.data = {} self.groups = tdms.groups() self.channels = {} for g in self.groups: self.time[g] = {} self.data[g] = {} self.channels[g] = tdms.group_channels(g) for c in self.channels[g]: if c.has_data: props = c.properties self.time[g][props["NI_ChannelName"]] = c.time_track() self.data[g][props["NI_ChannelName"]] = c.data elif self.filename.lower().endswith('.txt'): fid = open(self.filename, "r") if "<Mach-1 File>" in fid.readline(): contents = fid.readlines() fid.close() self.time = OrderedDict() self.data = OrderedDict() self.channels = OrderedDict() info_blocks = [i for i, j in izip(count(), contents) if "<INFO>" in j or "<END INFO>" in j] info_blocks = izip(islice(info_blocks, 0, None, 2), islice(info_blocks, 1, None, 2)) data_blocks = [i for i, j in izip(count(), contents) if "<DATA>" in j or "<END DATA>" in j] data_blocks = izip(islice(data_blocks, 0, None, 2), islice(data_blocks, 1, None, 2)) self.groups = range(1, len(list(info_blocks))+1) for i, ind in enumerate(data_blocks): g = self.groups[i] header = contents[ind[0]+1].rstrip("\r\n").split("\t") self.channels[g] = header data = contents[ind[0]+2:ind[1]] for j, d in enumerate(data): data[j] = d.rstrip("\r\n").split("\t") data = np.array(data, float) self.time[g] = OrderedDict() self.data[g] = OrderedDict() for j, c in enumerate(self.channels[g][1:]): self.time[g][c] = data[:, 0] self.data[g][c] = data[:, j+1]
def parameterSearch(self,TDMSPath): config = ConfigParser.ConfigParser() config.read(u'config.txt') #This section reads the needed information from the config file startFreq = config.getint(u'Frequencies',u'Start Frequency') stopFreq = config.getint(u'Frequencies',u'Stop Frequency') stepFreq = config.getint(u'Frequencies',u'Step Frequency') NUM_POINTS = config.getint(u'Symbolic Constants',u'Num Points') JUMP_BACK = config.getint(u'Symbolic Constants',u'Jump Back') TDMS_Time = [] TDMS_Data = [] TDMSfiles = os.listdir(TDMSPath) TDMSfiles = [file for file in TDMSfiles if file.endswith(u'.tdms')] for file in TDMSfiles: #Loop through all TDMS files in order to get the data within each path = TDMSPath + u'/' + file TDMS = TdmsFile(path) #Function that reads the specific TDMS file group = TDMS.groups()[0] channel = TDMS.object(group, u'Dev1/ai0') #returns a channel type data = channel.data time = channel.time_track() #Determining starting point to read file #if highest point is more than JUMP_BACK points into the data, start there if numpy.argmax(data)>JUMP_BACK: start = numpy.argmax(data)-JUMP_BACK else: start = 0 #otherwise start at the beginning of the file t = time[start:start+NUM_POINTS] #time information from the start point to the end point s = data[start:start+NUM_POINTS] #data from the start point to the end point TDMS_Time.append(t) #add the TDMS files data to the set of all TDMS files data TDMS_Data.append(s) #Now that the data has all been found it can be set using the InputData class InputData.Set_Start_Freq(startFreq) InputData.Set_Stop_Freq(stopFreq) InputData.Set_Step_Freq(stepFreq) InputData.Set_TDMS_Time(TDMS_Time) InputData.Set_TDMS_Data(TDMS_Data)
def testTdmsFile(): tdms = TdmsFile( "/Volumes/RAID-0/LockheedMartin/TDMS_200120_12-40_2020-01-20 ATRQ Build 2/Slice00122.tdms" ) # tdms.as_hdf('/tmp/Slice00122.h5') properties = tdms.properties for property in properties: print(f'PROPERTY: {property} = {properties[property]}') objects = tdms.objects for obj in objects: print(f'OBJECT: {obj}') groups = tdms.groups() for part in groups: print(f'GROUP: {part}') # get the data from each group's channel and make a CSV channels = tdms.group_channels(part) # make a 2D array, and populate it with the arrays in this loop. groupCSV = [] areaCol = [] xCol = [] yCol = [] paramCol = [] intensityCol = [] laserCol = [] csvCount = 0 # copy each channel's data to its respective frame for channel in channels: print(f' CHANNEL: {channel}') names = [] for i in channels: wordList = str(i).split("/") name = wordList[-1] name = name.strip(">") name = name.strip("'") names.append(name) colNames = names
def cut_log_file(fileinpaths, times, fileoutpaths_list, **kwargs): """ Cuts up a log file based on the supplied times. This function assumes that the channels are waveforms. """ for i, fileinpath in enumerate(fileinpaths): fileoutpaths = fileoutpaths_list[i] tdmsfile = TF(fileinpath) for j in range(len(times)): time1 = times[j][0] time2 = times[j][1] fileoutpath = fileoutpaths[j] direc = os.path.split(fileoutpath)[0] if not os.path.exists(direc): os.makedirs(direc) root_object = RootObject(properties={ #TODO root properties }) try: with TdmsWriter(fileoutpath, mode='w') as tdms_writer: for group in tdmsfile.groups(): for channel in tdmsfile.group_channels(group): channel_object = _cut_channel(channel, time1, time2, timedata=None) tdms_writer.write_segment( [root_object, channel_object]) except ValueError as error: print(error) print('removing the file at: \n', fileoutpath) os.remove(fileoutpath)
def _getChannelInfo(self) -> [[object]]: #Get channel's informations from tdms file #load units from units.json file f = open("units.json", 'r') units = json.load(f) f.close() fileInfo = QFileInfo(self._path) if not fileInfo.exists(): return [[]] tdms = TdmsFile(self._path) group = tdms.groups()[0] #There is only one data group in TdmsFile chnObjs = tdms.group_channels(group) chnInfos = [] for chnObj in chnObjs: properties = chnObj.properties chnName = chnObj.channel try: chnUnit = properties['NI_UnitDescription'] except KeyError: chnUnit = "unknown" try: chnType = units[chnUnit]["number"] chnUnitDesc = units[chnUnit]["unit_desc"] except KeyError: chnType = 0 chnUnitDesc = "unknown" chnInfo = [chnName, chnType, chnUnit, chnUnitDesc] chnInfos.append(chnInfo) return chnInfos
class TDMS_dj(object): def __init__(self, tdms_path): self.tdms_path = tdms_path self.file_name = path_leaf(self.tdms_path) self.file_dir = path_dir(self.tdms_path) self.tdms_file = TdmsFile(tdms_path) def get_groups(self): self.group_lst = self.tdms_file.groups() return self.group_lst def get_channels(self, grp_ind): self.chans_lst = self.tdms_file.group_channels(grp_ind) self.chans_lstout = [] for ch in self.chans_lst: ch = str(ch) ch = ch[ch.find("'/'") + 3:] if ch.find("\t") < 0: ch = ch[:ch.find("'>")] else: ch = ch[:ch.find("\t")] self.chans_lstout.append(ch) return self.chans_lstout def get_all_grps_chnls(self): self.grps = self.get_groups() self.grps_chans_dict = {} for grp in self.grps: self.grps_chans_dict[grp] = self.get_channels(grp) return self.grps_chans_dict def get_data(self, group, channel): return self.tdms_file.object(group, channel).data def conv_to_hdf(self): self.hdf_path = path_join_hdf(self.file_dir, self.file_name) self.tdms_file.as_hdf(self.hdf_path, mode='w', group='/')
while(1): if(number<10): randomFile = "SpectralOut_00"+str(number)+".tdms" powerFile = "PowerOut_00"+str(number)+".tdms" elif(number<100): randomFile = "SpectralOut_0"+str(number)+".tdms" powerFile = "PowerOut_0"+str(number)+".tdms" elif(number<1000): randomFile = "SpectralOut_"+str(number)+".tdms" powerFile = "PowerOut_"+str(number)+".tdms" print randomFile if(os.path.exists(randomFile)): print "inside " + randomFile random_tdms = TdmsFile('Spectral/' + randomFile) power_tdms = TdmsFile('Power/' + powerFile) b = random_tdms.object(random_tdms.groups()[0], "Untitled") c = power_tdms.object(power_tdms.groups()[0],"Untitled") x_set=np.array([b.data]) power_set = np.array([c.data]) # print(power_set[0]) for x in range(1, 60): b = random_tdms.object(random_tdms.groups()[0],"Untitled "+str(x)) np1=np.array([b.data]) x_set = np.concatenate((x_set,np1),axis=0) predicted=model.predict(x_set) # probability = (model.predict_proba(x_set).max(1)) # for i in range(1,len(probability)): # if (probability[i] < 0.8): # predicted[i] = 1
tdmsfile = TdmsFile(file_path) # channel = tdmsfile.object('Group', 'Channel1') # data = channel.data # time = channel.time_track() show_properties = False show_data = False show_time_track = False level = 0 root = tdmsfile.object() display('/', level) if show_properties: display_properties(root, level) for group in tdmsfile.groups(): level = 1 group_obj = tdmsfile.object(group) display("%s" % group_obj.path, level) if show_properties: display_properties(group_obj, level) for channel in tdmsfile.group_channels(group): level = 2 display("%s" % channel.path, level) if show_properties: level = 3 display("data type: %s" % channel.data_type.name, level) display_properties(channel, level) if show_time_track: level = 3 try:
newx.append(startbin + binsize/2.) newy.append(bin_acc) newcount = newcount + 1 startbin += binsize bin_acc = 0. bin_acc += abs(datay[count]) return (newx,newy) # reading TDMS file filename = sys.argv[1] datafile = TdmsFile(filename) # TdmsFile is a function from the library/class npTDMS?? # get the group names print filename list_of_groups = datafile.groups() # groups is a function from npTDMS, what returns the names of the groups # print list_of_groups[0] it's only possible to print element 0, so the list has only one element number_of_groups = len(list_of_groups) # print number_of_groups this gives a "1", so there is only one group for groupname in list_of_groups: print groupname # the groupname is "data"; it means to print every element of a list list_of_channels = datafile.group_channels(groupname) # group channels is a function from npTDMS, what returns a list of channel objects for channel in list_of_channels: print channel # extracting first waveform # getting voltages bin_res_x = [] bin_res_y = [] #for group in ("Cube X1",): for group in ("Cube X1", "Cube X2", "Cube Y1", "Cube Y2", "Cube Z1", "Cube Z2"):
def lade_tdms(self, datei): """ :type datei: str :return: Die gemittelten Messwerte aus der angegebenen Datei :rtype: numpy.mutliarray.ndarray """ # Beschnittene Daten (links: positiv, rechts: negativ) daten = np.zeros(self.par.messpunkte - self.par.bereich_links + self.par.bereich_rechts) try: tdat = TdmsFile(datei) tdms = tdat.object(tdat.groups()[0], 'Untitled') except (ValueError, IOError): print('Datei ' + datei + ' nicht auslesbar') return daten index_fehler = False for mittelung in range(self.par.mittelungen): try: """ Mittelung (durch Addition) UND Begrenzung des Fitbereichs (zur Eliminierung von parasitären Frequenzpeaks) nach Angabe in GUI """ start = mittelung * self.par.messpunkte links = start + self.par.bereich_links rechts = start + self.par.messpunkte + self.par.bereich_rechts daten += tdms.data[links:rechts] """if mittelung == 0: name = raw_input('$') import matplotlib.pyplot as plt plt.title(name+ ": Einzelmessung") plt.xlabel(u"Frequenz / Hz") plt.ylabel(u"Amplitude / µV") plt.plot( self.frequenzen, daten * (1000*1000/50/2.9), antialiased=True ) plt.show() elif mittelung == self.par.mittelungen-1: name = raw_input('$') import matplotlib.pyplot as plt plt.title(name+ ": 200x gemittelt") plt.xlabel(u"Frequenz / Hz") plt.ylabel(u"Amplitude / µV") plt.plot( self.frequenzen, daten / self.par.mittelungen * (1000*1000/180), antialiased=True ) plt.show()""" except (ValueError, IndexError): """ In diesem Fall ist ein Messfehler aufgetreten. Das kann (sehr selten) passieren, weshalb der Fit dennoch funktionieren muss. Hier ist dann aber ein Einbruch in der Amplitude zu verzeichnen. """ if not index_fehler: index_fehler = True print('Fehlende Messwerte in Datei ' + datei) return daten / self.par.mittelungen