def test_short_lvm(): data = read('./data/pickle_only.lvm') np.testing.assert_equal(data[0]['data'][0, 0], 0.914018) data = read('./data/short.lvm', read_from_pickle=False) np.testing.assert_equal(data[0]['data'][0, 0], 0.914018) data = read('./data/short.lvm', read_from_pickle=True) np.testing.assert_equal(data[0]['data'][0, 0], 0.914018) data = read('./data/short_new_line_end.lvm', read_from_pickle=True, dump_file=False) np.testing.assert_equal(data[0]['data'][0, 0], 0.914018)
def import_data_lvm(fn): import lvm_read data = lvm_read.read(fn) data = data[0]["data"] signals = [[i[a] for i in data] for a in [1, 2, 3, 4]] rot_freq = [i[5] for i in data if i[5]] return (signals, mean(rot_freq[0:20]))
def plot_real_data_incorrect(): import lvm_read fn = "./data/teippimittaus_uusi.lvm" data = lvm_read.read(fn) channels = (data[0]["Channel names"]) channel_data = data[0]["data"] channel_names = ['Laser 1', 'Laser 2', 'Laser 3', 'Laser 4'] channel_indexes = [channels.index(name) for name in channel_names] rounds = [0, 100] signals = [[i[a] for i in channel_data][1024 * rounds[0]:1024 * rounds[1]] for a in channel_indexes] signals = np.array(average(signals)) data = import_data_lvm("matlab/4.00Hz_300mm.lvm") signals = np.array(average(data[0])) advanced1 = advanced_roundness_f_coeff(signals, [0, 38, 67, 180]) advanced2 = advanced_roundness_f_coeff( signals, [0, 37.06875, 65.88984375, 179.41640625]) filtered_advanced1 = filter_fft(advanced1) filtered_advanced2 = filter_fft(advanced2) ar1 = get_roundness_profile(advanced1) ar2 = get_roundness_profile(advanced2) polar_plot(ar1, "Angles with errors", 0.03) polar_plot(ar2, "Determined angles", 0.03) plt.show()
def timing_on_long_short_lvm(): N = 5 tic = time.time() for i in range(N): data = read('./data/long.lvm', read_from_pickle=False) toc = time.time() print(f'Average time: {(toc-tic)/N:3.1f}s')
def read_lvm(path): raw_data = lvm_read.read(path) dataset = pd.DataFrame( raw_data[0]['data'], columns=[c.strip() for c in raw_data[0]['Channel names'] if c]) dataset = dataset[[ c for c in dataset.columns if c == 'X_Value' or 'Accel' in c ]] return dataset.rename( { 'X_Value': 'Time', dataset.columns[-1]: 'Acceleration' }, axis='columns')
fTyp = [("", "*.lvm")] iDir = os.path.abspath(os.path.dirname(__file__)) tkinter.messagebox.showinfo('lvm形式ファイルための読み込みプログラム', '処理を実施したいファイルを複数選択してください!') file = tkinter.filedialog.askopenfilenames(filetypes=fTyp, initialdir=iDir) ##### 選択ファイルリスト作成 ##### list_tmp = list(file) tkinter.messagebox.showinfo('lvm形式ファイルための読み込みプログラム', list_tmp) count = 0 for j in range(0, len(list_tmp)): filepath = list_tmp[j] basename = os.path.basename(filepath) lvm_file = read(filepath) if j == 0: tmp = lvm_file[0]['data'][0, 0:4] #辞書型からキーを選択してデータを抽出する u_true = np.sqrt( 2 * tmp[1] * 1000 / tmp[3]) # U = SQRT (2 x Pd / rho ) [Pa] マノメータの換算には出力電圧を1000倍する tmp = np.append(tmp, u_true) ttmp = np.reshape(tmp, (5, 1)).T #抽出後は1次配列となるため,変換して行列にする df_lvm = pd.DataFrame(ttmp, index=[basename]) else: tmp = lvm_file[0]['data'][0, 0:4] #辞書型からキーを選択してデータを抽出する u_true = np.sqrt(2 * tmp[1] * 1000 / tmp[3]) tmp = np.append(tmp, u_true)
import sys, os my_path = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, my_path + '/../') import FLife import lvm_read import numpy as np import matplotlib.pyplot as plt import scipy.integrate as si from scipy.integrate import quad data = lvm_read.read('./data/m1.lvm')[0] t = data['data'][:, 0] x = data['data'][:, 1] rms = 100 C = 1.8e+22 k = 7.3 Su = 446 x = rms * x / np.std(x) # Spectral data sd = FLife.SpectralData(input=x, dt=t[1], nperseg=int(0.1 / t[1])) #Show the PSD plt.figure(figsize=(10, 5)) plt.title('PSD') plt.plot(sd.psd[:, 0], sd.psd[:, 1]) plt.xlim(0, 1200) plt.show()
from os import path if __name__== '__main__': if len(sys.argv) < 2: print("Filename not given") sys.exit(-1) filename = sys.argv[1] metadata_filename = filename[:-4] + '_metadata.txt' data_filename = filename[:-4] + '_serialized.txt' if path.exists(metadata_filename) and path.exists(data_filename): print("Files are already done") sys.exit(0) print(filename) float_formatter = lambda x: "%.2f" % x np.set_printoptions(formatter={'float': '{: 0.5f}'.format}) try: lvm = lvm_read.read(filename, read_from_pickle=False) except IOError as e: print(e) sys.exit(-1) #print lvm.keys() lvm_metadata = [] if lvm.has_key('Fs(MHz)'): lvm_metadata.append(float(lvm['Fs(MHz)'])) else: #np.insert(lvm_metadata, len(lvm_metadata)-1, 0) np.asarray(0) np.savetxt(metadata_filename, np.asarray(lvm_metadata), fmt='%1.4f') np.savetxt(data_filename, lvm[0]['data'], fmt='%1.4f') sys.exit(0)
def task(lvm_file, params): # read params channel = params['channel'] n_fft = params['n_fft'] win_length = params['win_length'] hop_length = params['hop_length'] win_type = params.get('win_type', 'hann') threshold = params['threshold'] min_duration = params['min_duration'] / hop_length # convert to frames print('Processing {}'.format(lvm_file)) # read lvm file lvm = lvm_read.read(lvm_file) signal = lvm[0]['data'][:, channel] # normalize signal signal -= np.mean(signal) signal /= np.std(signal) # process signal fft = librosa.stft(signal, n_fft=n_fft, win_length=win_length, hop_length=hop_length, window=win_type) feats = np.abs(fft) energy = np.mean(feats, axis=0) # compute train mask for frames mask = np.array(energy > threshold, dtype=np.int) diff = mask[0:-1] ^ mask[1:] # make difference changes = np.where(diff != 0)[0] # search for changes changes = np.append(changes, len(mask) - 1) # add the last sample # convert not train frames to train frames if sequence not train frames length < min duration start_index = 0 for index in changes: end_index = index + 1 if mask[start_index] == 0: duration = end_index - start_index if duration < min_duration: mask[start_index:end_index] = 1 start_index = end_index # compute train mask for signal signal_mask = np.zeros_like(signal, dtype=np.int) diff = mask[0:-1] ^ mask[1:] # make difference changes = np.where(diff != 0)[0] # search for changes changes = np.append(changes, len(mask) - 1) # add the last sample start_index = 0 for index in changes: end_index = index + 1 if mask[start_index] == 1: # expand signal for one frame left and right start, end = librosa.frames_to_samples( [start_index - 1, end_index + 1], hop_length=hop_length) signal_mask[start:end] = 1 start_index = end_index results = { lvm_file: { 'signal': signal, 'signal_mask': signal_mask, 'feats': feats, 'energy': energy, 'mask': mask } } return results
def test_data(): results_ref = { 'Rainflow': 906.217537, 'Rainflow-Goodman': 827.866874, 'Dirlik': 1067.423788, 'Tovo Benasciutti 1': 735.084318, 'Tovo Benasciutti 2': 1114.625812, 'Zhao Baker 1': 985.886435, 'Zhao Baker 2': 1048.549852, 'Narrowband': 711.258072, 'Alpha 0.75': 1086.593252, 'Wirsching Light': 1038.1813918800456, 'Rice': 687.739914, 'Gao Moan': 837.392263, 'Petrucci Zuccarello': 4.322102 } data = lvm_read.read('./data/m1.lvm')[0] t = data['data'][:, 0] x = data['data'][:, 1] rms = 100 C = 1.8e+22 k = 7.3 Su = 446 x = rms * x / np.std(x) # Spectral data sd = FLife.SpectralData(input=x, dt=t[1], nperseg=int(0.1 / t[1])) # Rainflow reference fatigue life rf = FLife.Rainflow(sd) # Spectral methods dirlik = FLife.Dirlik(sd) tb = FLife.TovoBenasciutti(sd) zb = FLife.ZhaoBaker(sd) nb = FLife.Narrowband(sd) a075 = FLife.Alpha075(sd) wl = FLife.WirschingLight(sd) rice = FLife.Rice(sd) gm = FLife.GaoMoan(sd) pz = FLife.PetrucciZuccarello(sd) # Test PDF's; expected result should be 1 PDFs = { 'Dirlik': quad(dirlik.get_PDF, a=0, b=np.Inf)[0], 'Rice': quad(nb.get_PDF, a=0, b=np.Inf)[0], 'Rice -inf': quad(rice.get_PDF, a=-np.Inf, b=np.Inf)[0], 'Tovo Benasciutti 1': quad(tb.get_PDF, a=0, b=np.Inf, args=('method 1', ))[0], 'Tovo Benasciutti 2': quad(tb.get_PDF, a=0, b=np.Inf, args=('method 2', ))[0], 'Zhao Baker 1': quad(zb.get_PDF, a=0, b=np.Inf, args=('method 1', ))[0], 'Zhao Baker 2': quad(zb.get_PDF, a=0, b=np.Inf, args=('method 2', ))[0], } for method, value in PDFs.items(): np.testing.assert_almost_equal(value, 1., decimal=5, err_msg=f'Method: {method}') results = { 'Rainflow': rf.get_life(C=C, k=k), 'Rainflow-Goodman': rf.get_life(C=C, k=k, Su=Su), 'Dirlik': dirlik.get_life(C=C, k=k), 'Tovo Benasciutti 1': tb.get_life(C=C, k=k, method='method 1'), 'Tovo Benasciutti 2': tb.get_life(C=C, k=k), 'Zhao Baker 1': zb.get_life(C=C, k=k), 'Zhao Baker 2': zb.get_life(C=C, k=k, method='method 2'), 'Narrowband': nb.get_life(C=C, k=k), 'Alpha 0.75': a075.get_life(C=C, k=k), 'Wirsching Light': wl.get_life(C=C, k=k), 'Rice': rice.get_life(C=C, k=k), 'Gao Moan': gm.get_life(C=C, k=k), 'Petrucci Zuccarello': pz.get_life(C=C, k=k, Su=Su) } for method, value in results.items(): if method == 'Petrucci Zuccarello': compare_to = 'Rainflow-Goodman' else: compare_to = 'Rainflow' err = FLife.tools.relative_error(value, results[compare_to]) print( f'{method:>19s}:{value:6.0f} s,{100*err:>4.0f} % to {compare_to}') np.testing.assert_almost_equal(value, results_ref[method], decimal=5, err_msg=f'Method: {method}') results_via_PDF = { 'Dirlik': dirlik.get_life(C=C, k=k, integrate_pdf=True), 'Tovo Benasciutti 1': tb.get_life(C=C, k=k, method='method 1', integrate_pdf=True), 'Tovo Benasciutti 2': tb.get_life(C=C, k=k, integrate_pdf=True), 'Zhao Baker 1': zb.get_life(C=C, k=k, integrate_pdf=True), 'Zhao Baker 2': zb.get_life(C=C, k=k, method='method 2', integrate_pdf=True), 'Narrowband': nb.get_life(C=C, k=k, integrate_pdf=True), 'Alpha 0.75': a075.get_life(C=C, k=k), 'Wirsching Light': wl.get_life(C=C, k=k), 'Rice': rice.get_life(C=C, k=k), 'Gao Moan': gm.get_life(C=C, k=k), 'Petrucci Zuccarello': pz.get_life(C=C, k=k, Su=Su) } for method, value in results_via_PDF.items(): np.testing.assert_almost_equal(value / results[method], 1.0, decimal=2, err_msg=f'Method: {method}')
import lvm_read import numpy as np fname = "./data_demo/LFDI.lvm" lvm = lvm_read.read(fname) mat = [] for i in range(lvm['Segments']): mat.append(np.array(lvm[i]['data'])[:,0]) mat = np.array(mat).T np.savetxt(fname[0:-3]+"csv", mat[:,0:], delimiter=",")
def test_no_decimal_separator(): data = read('./data/no_decimal_separator.lvm', read_from_pickle=False, dump_file=False) np.testing.assert_equal(data[0]['data'][0, 1], -0.008807)
def test_with_multi_time_column_lvm(): data = read('./data/multi_time_column.lvm', read_from_pickle=False, dump_file=False) np.testing.assert_allclose(data[0]['data'][0],\ np.array([0.000000,-0.035229,0.000000,0.532608]))
def test_with_empty_fields_lvm(): data = read('./data/with_empty_fields.lvm', read_from_pickle=False, dump_file=False) np.testing.assert_equal(data[0]['data'][0, 7], -0.011923)
def test_several_comments(): data = read('./data/with_comments.lvm', read_from_pickle=False, dump_file=False) np.testing.assert_equal(data[0]['data'][0, 1], 1.833787)