class HCFF2(tr.HasStrictTraits): '''High-Cycle Fatigue Filter ''' hcf = tr.Instance(HCFFRoot) def _hcf_default(self): return HCFFRoot(import_manager=FileImportManager()) figure = tr.Instance(Figure) def _figure_default(self): figure = Figure(facecolor='white') figure.set_tight_layout(True) return figure traits_view = ui.View( ui.HSplit( ui.Item(name='hcf', editor=tree_editor, show_label=False, width=0.3 ), ui.UItem('figure', editor=MPLFigureEditor(), resizable=True, springy=True, label='2d plots') ), title='HCF Filter', resizable=True, width=0.6, height=0.6 )
def chooseVariables(self): """Opens a dialog asking user to select columns from a data File that has been selected. THese are then returned as a string suitable for Y cols input""" columns = self.physics.variables.keys() columns.sort() values = zip(range(0, len(columns)), columns) checklist_group = traitsui.Group( '10', # insert vertical space traitsui.Label('Select the additional variables you wish to log'), traitsui.UItem('columns', style='custom', editor=traitsui.CheckListEditor(values=values, cols=6)), traitsui.UItem('selectAllButton')) traits_view = traitsui.View(checklist_group, title='CheckListEditor', buttons=['OK'], resizable=True, kind='livemodal') col = ColumnEditor(numberOfColumns=len(columns)) try: col.columns = [ columns.index(varName) for varName in self.xmlLogVariables ] except Exception as e: logger.error( "couldn't selected correct variable names. Returning empty selection" ) logger.error("%s " % e.message) col.columns = [] col.edit_traits(view=traits_view) logger.debug("value of columns selected = %s ", col.columns) logger.debug("value of columns selected = %s ", [columns[i] for i in col.columns]) return [columns[i] for i in col.columns]
class SingleSelectOverlayFiles(tapi.HasPrivateTraits): choices = tapi.List(tapi.Str) selected = tapi.Str view = tuiapi.View(tuiapi.HGroup( tuiapi.UItem('choices', editor=tuiapi.TabularEditor( show_titles=True, selected='selected', editable=False, multi_select=False, adapter=SingleSelectOverlayFilesAdapter()))), width=224, height=100)
class AboutTool(tr.HasStrictTraits): about_tool_text = tr.Str( 'High-Cycle Fatigue Tool \nVersion: 1.0.0\n\nHCFT is a tool with a graphical user interface \nfor processing CSV files obtained from fatigue \nexperiments up to the high-cycle fatigue ranges.\nAdditionally, tests with monotonic loading can be processed.\n\nDeveloped in:\nRWTH Aachen University - Institute of Structural Concrete\nBy:\nDr.-Ing. Rostislav Chudoba\nM.Sc. Homam Spartali\n\nGithub link:\nhttps://github.com/ishomam/high-cycle-fatigue-tool' ) # ========================================================================= # Configuration of the view # ========================================================================= traits_view = ui.View(ui.VGroup(ui.UItem('about_tool_text', style='readonly'), show_border=True), buttons=[ui.OKButton], title='About HCFT', resizable=True, width=0.3, height=0.25)
class SingleSelect(tapi.HasPrivateTraits): choices = tapi.List(tapi.Str) selected = tapi.Str plot = tapi.Instance(Plot2D) view = tuiapi.View(tuiapi.HGroup( tuiapi.UItem('choices', editor=tuiapi.TabularEditor( show_titles=True, selected='selected', editable=False, multi_select=False, adapter=SingleSelectAdapter()))), width=224, height=668, resizable=True, title='Change X-axis') @tapi.on_trait_change('selected') def _selected_modified(self, object, name, new): self.plot.change_axis(object.choices.index(object.selected))
class MultiSelect(tapi.HasPrivateTraits): choices = tapi.List(tapi.Str) selected = tapi.List(tapi.Str) plot = tapi.Instance(Plot2D) view = tuiapi.View(tuiapi.HGroup( tuiapi.UItem('choices', editor=tuiapi.TabularEditor( show_titles=True, selected='selected', editable=False, multi_select=True, adapter=MultiSelectAdapter()))), width=224, height=568, resizable=True) @tapi.on_trait_change('selected') def _selected_modified(self, object, name, new): ind = [] for i in object.selected: ind.append(object.choices.index(i)) self.plot.change_plot(ind)
class MATSBondSlipDP(MATSEval): node_name = 'bond model: damage-plasticity' tree_node_list = List([]) def _tree_node_list_default(self): return [self.omega_fn, ] @on_trait_change('omega_fn_type') def _update_node_list(self): self.tree_node_list = [self.omega_fn] E_m = bu.Float(30000.0, tooltip='Stiffness of the matrix [MPa]', symbol='E_\mathrm{m}', unit='MPa', desc='Stiffness of the matrix', MAT=True, auto_set=True, enter_set=True) E_f = bu.Float(200000.0, tooltip='Stiffness of the reinforcement [MPa]', symbol='E_\mathrm{f}', unit='MPa', desc='Stiffness of the reinforcement', MAT=True, auto_set=False, enter_set=False) E_b = bu.Float(12900.0, symbol="E_\mathrm{b}", unit='MPa', desc="Bond stiffness", MAT=True, enter_set=True, auto_set=False) gamma = bu.Float(100.0, symbol="\gamma", unit='MPa', desc="Kinematic hardening modulus", MAT=True, enter_set=True, auto_set=False) K = bu.Float(1000.0, symbol="K", unit='MPa', desc="Isotropic hardening modulus", MAT=True, enter_set=True, auto_set=False) tau_bar = bu.Float(5.0, symbol=r'\bar{\tau}', unite='MPa', desc="Reversibility limit", MAT=True, enter_set=True, auto_set=False) uncoupled_dp = Bool(False, MAT=True, label='Uncoupled d-p' ) s_0 = bu.Float(MAT=True, desc='Elastic strain/displacement limit') def __init__(self, *args, **kw): super(MATSBondSlipDP, self).__init__(*args, **kw) self._omega_fn_type_changed() self._update_s0() @on_trait_change('tau_bar,E_b') def _update_s0(self): if not self.uncoupled_dp: if self.E_b == 0: self.s_0 = 0 else: self.s_0 = self.tau_bar / self.E_b self.omega_fn.s_0 = self.s_0 omega_fn_type = Trait('multilinear', dict(li=LiDamageFn, jirasek=JirasekDamageFn, abaqus=AbaqusDamageFn, FRP=FRPDamageFn, multilinear=MultilinearDamageFn ), MAT=True, ) def _omega_fn_type_changed(self): self.omega_fn = self.omega_fn_type_(mats=self, s_0=self.s_0) omega_fn = Instance(IDamageFn, report=True) def _omega_fn_default(self): return MultilinearDamageFn() def omega(self, k): return self.omega_fn(k) def omega_derivative(self, k): return self.omega_fn.diff(k) state_var_shapes = dict(s_p_n=(), alpha_n=(), z_n=(), kappa_n=(), omega_n=()) def get_corr_pred(self, eps_n1, t_n1, s_p_n, alpha_n, z_n, kappa_n, omega_n): D_shape = eps_n1.shape[:-1] + (3, 3) D = np.zeros(D_shape, dtype=np.float_) D[..., 0, 0] = self.E_m D[..., 2, 2] = self.E_f s_n1 = eps_n1[..., 1] sig_pi_trial = self.E_b * (s_n1 - s_p_n) Z = self.K * z_n # for handeling the negative values of isotropic hardening h_1 = self.tau_bar + Z pos_iso = h_1 > 1e-6 X = self.gamma * alpha_n # for handeling the negative values of kinematic hardening (not yet) # h_2 = h * np.sign(sig_pi_trial - X) * \ # np.sign(sig_pi_trial) + X * np.sign(sig_pi_trial) #pos_kin = h_2 > 1e-6 f_trial = np.fabs(sig_pi_trial - X) - h_1 * pos_iso I = f_trial > 1e-6 tau = np.einsum('...st,...t->...s', D, eps_n1) # Return mapping delta_lamda_I = f_trial[I] / (self.E_b + self.gamma + np.fabs(self.K)) # update all the state variables s_p_n[I] += delta_lamda_I * np.sign(sig_pi_trial[I] - X[I]) z_n[I] += delta_lamda_I alpha_n[I] += delta_lamda_I * np.sign(sig_pi_trial[I] - X[I]) kappa_n[I] = np.max( np.array([kappa_n[I], np.fabs(s_n1[I])]), axis=0) omega_n[I] = self.omega(kappa_n[I]) tau[..., 1] = (1 - omega_n) * self.E_b * (s_n1 - s_p_n) domega_ds_I = self.omega_derivative(kappa_n[I]) # Consistent tangent operator D_ed_I = -self.E_b / (self.E_b + self.K + self.gamma) \ * domega_ds_I * self.E_b * (s_n1[I] - s_p_n[I]) \ + (1 - omega_n[I]) * self.E_b * (self.K + self.gamma) / \ (self.E_b + self.K + self.gamma) D[..., 1, 1] = (1 - omega_n) * self.E_b D[I, 1, 1] = D_ed_I return tau, D tree_view = ui.View( ui.VGroup( ui.VGroup( ui.Item('E_m', full_size=True, resizable=True), ui.Item('E_f'), ui.Item('E_b'), ui.Item('gamma'), ui.Item('K'), ui.Item('tau_bar'), ), ui.VGroup( ui.Item('uncoupled_dp'), ui.Item('s_0'), # , enabled_when='uncoupled_dp'), ui.Item('omega_fn_type'), ), ui.UItem('omega_fn@') ) )
class FileImportManager(tr.HasTraits): file_csv = tr.File open_file_csv = tr.Button('Input file') decimal = tr.Enum(',', '.') delimiter = tr.Str(';') skip_rows = tr.Int(4, auto_set=False, enter_set=True) columns_headers_list = tr.List([]) parse_csv_to_npy = tr.Button view = ui.View(ui.VGroup( ui.HGroup( ui.UItem('open_file_csv'), ui.UItem('file_csv', style='readonly'), ), ui.Item('skip_rows'), ui.Item('decimal'), ui.Item('delimiter'), ui.Item('parse_csv_to_npy', show_label=False), )) def _open_file_csv_fired(self): """ Handles the user clicking the 'Open...' button. """ extns = ['*.csv', ] # seems to handle only one extension... wildcard = '|'.join(extns) dialog = FileDialog(title='Select text file', action='open', wildcard=wildcard, default_path=self.file_csv) dialog.open() self.file_csv = dialog.path """ Fill columns_headers_list """ headers_array = np.array( pd.read_csv( self.file_csv, delimiter=self.delimiter, decimal=self.decimal, nrows=1, header=None ) )[0] for i in range(len(headers_array)): headers_array[i] = self.get_valid_file_name(headers_array[i]) self.columns_headers_list = list(headers_array) """ Saving file name and path and creating NPY folder """ dir_path = os.path.dirname(self.file_csv) self.npy_folder_path = os.path.join(dir_path, 'NPY') if os.path.exists(self.npy_folder_path) == False: os.makedirs(self.npy_folder_path) self.file_name = os.path.splitext(os.path.basename(self.file_csv))[0] def get_valid_file_name(self, original_file_name): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) new_valid_file_name = ''.join( c for c in original_file_name if c in valid_chars) return new_valid_file_name def _parse_csv_to_npy_fired(self): print('Parsing csv into npy files...') for i in range(len(self.columns_headers_list)): column_array = np.array(pd.read_csv( self.file_csv, delimiter=self.delimiter, decimal=self.decimal, skiprows=self.skip_rows, usecols=[i])) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy'), column_array) print('Finsihed parsing csv into npy files.')
class CSVFile(tr.HasStrictTraits): path = tr.Str count_lines = tr.Button _lines_number = tr.Int # _ is the private field convention show_lines_number = tr.Bool num_of_first_lines_to_show = tr.Int(10) num_of_last_lines_to_show = tr.Int(10) first_lines = tr.Property( depends_on='path, num_of_first_lines_to_show, first_lines_to_skip') last_lines = tr.Property( depends_on='path, num_of_last_lines_to_show, last_lines_to_skip') first_lines_to_skip = tr.Range(low=0, high=10**9, mode='spinner') last_lines_to_skip = tr.Range(low=0, high=10**9, mode='spinner') def _count_lines_fired(self): # return sum(1 for line in open(self.path)) self.show_lines_number = True self._lines_number = self._count_lines_in_file(self.path) def get_lines_number(self): # If it was not yet calculated, calc it first if self._lines_number == 0: self._lines_number = self._count_lines_in_file(self.path) return self._lines_number def _count_lines_in_file(self, file_name): ''' This method will count the number of lines in a huge file pretty quickly using custom buffering''' f = open(file_name, 'rb') bufgen = takewhile(lambda x: x, (f.raw.read(1024 * 1024) for i in repeat(None))) return sum(buf.count(b'\n') for buf in bufgen) + 1 @tr.cached_property def _get_first_lines(self): first_lines_list = [] with open(self.path) as myfile: for i in range(self.num_of_first_lines_to_show): try: # Get next line if it exists! line = next(myfile) # The following will not executed if an exception was thrown first_lines_list.append(line) except StopIteration: # If last line ends with \n then a new empty line is # actually there if len(first_lines_list) != 0: if first_lines_list[-1].endswith('\n'): first_lines_list.append('') break first_lines_list = self.add_line_numbers(first_lines_list) first_lines_list = first_lines_list[self.first_lines_to_skip:] first_lines_str = ''.join(first_lines_list) return first_lines_str def add_line_numbers(self, lines_list): new_list = [] for line_num, line in zip(range(1, len(lines_list) + 1), lines_list): new_list.append('(' + str(line_num) + ')--> ' + str(line)) return new_list def add_reverse_line_numbers(self, lines_list): new_list = [] for line_num, line in zip(range(len(lines_list), 0, -1), lines_list): new_list.append('(' + str(line_num) + ')--> ' + str(line)) return new_list @tr.cached_property def _get_last_lines(self): last_lines_list = self.get_last_n_lines(self.path, self.num_of_last_lines_to_show, False) last_lines_list = self.add_reverse_line_numbers(last_lines_list) last_lines_list = last_lines_list[0:len(last_lines_list) - self.last_lines_to_skip] last_lines_str = ''.join(last_lines_list) return last_lines_str def get_last_n_lines(self, file_name, N, skip_empty_lines=False): # Create an empty list to keep the track of last N lines list_of_lines = [] # Open file for reading in binary mode with open(file_name, 'rb') as read_obj: # Move the cursor to the end of the file read_obj.seek(0, os.SEEK_END) # Create a buffer to keep the last read line buffer = bytearray() # Get the current position of pointer i.e eof pointer_location = read_obj.tell() # Loop till pointer reaches the top of the file while pointer_location >= 0: # Move the file pointer to the location pointed by pointer_location read_obj.seek(pointer_location) # Shift pointer location by -1 pointer_location = pointer_location - 1 # read that byte / character new_byte = read_obj.read(1) # If the read byte is new line character then it means one line is read if new_byte == b'\n': # Save the line in list of lines line = buffer.decode()[::-1] if (skip_empty_lines): line_is_empty = line.isspace() if (line_is_empty == False): list_of_lines.append(line) else: list_of_lines.append(line) # If the size of list reaches N, then return the reversed list if len(list_of_lines) == N: return list(reversed(list_of_lines)) # Reinitialize the byte array to save next line buffer = bytearray() else: # If last read character is not eol then add it in buffer buffer.extend(new_byte) # As file is read completely, if there is still data in buffer, then its first line. if len(buffer) > 0: list_of_lines.append(buffer.decode()[::-1]) # return the reversed list return list(reversed(list_of_lines)) traits_view = ui.View( ui.Item('path', style='readonly', label='File'), ui.HGroup( ui.UItem('count_lines'), ui.Item('_lines_number', style='readonly', visible_when='show_lines_number == True')), ui.VSplit( ui.HGroup(ui.Item('first_lines', style='custom'), 'first_lines_to_skip', label='First lines in the file'), ui.HGroup(ui.Item('last_lines', style='custom'), 'last_lines_to_skip', label='Last lines in the file')))
class MATSBondSlipMultiLinear(MATSEval): """Multilinear bond-slip law """ name = "multilinear bond law" E_m = bu.Float(28000.0, tooltip='Stiffness of the matrix [MPa]', MAT=True, unit='MPa', symbol='E_\mathrm{m}', desc='E-modulus of the matrix', auto_set=True, enter_set=True) E_f = bu.Float(170000.0, tooltip='Stiffness of the fiber [MPa]', MAT=True, unit='MPa', symbol='E_\mathrm{f}', desc='E-modulus of the reinforcement', auto_set=False, enter_set=True) s_data = Str('', tooltip='Comma-separated list of strain values', MAT=True, unit='mm', symbol='s', desc='slip values', auto_set=True, enter_set=False) tau_data = Str('', tooltip='Comma-separated list of stress values', MAT=True, unit='MPa', symbol=r'\tau', desc='shear stress values', auto_set=True, enter_set=False) s_tau_table = Property def _set_s_tau_table(self, data): s_data, tau_data = data if len(s_data) != len(tau_data): raise ValueError('s array and tau array must have the same size') self.bs_law.set(xdata=s_data, ydata=tau_data) update_bs_law = Button(label='update bond-slip law') def _update_bs_law_fired(self): s_data = np.fromstring(self.s_data, dtype=np.float_, sep=',') tau_data = np.fromstring(self.tau_data, dtype=np.float_, sep=',') if len(s_data) != len(tau_data): raise ValueError('s array and tau array must have the same size') self.bs_law.set(xdata=s_data, ydata=tau_data) self.bs_law.replot() #========================================================================= # Configurational parameters #========================================================================= U_var_shape = (1,) '''Shape of the primary variable required by the TStepState. ''' state_var_shapes = {} r''' Shapes of the state variables to be stored in the global array at the level of the domain. ''' node_name = 'multiply linear bond' def get_corr_pred(self, s, t_n1): n_e, n_ip, _ = s.shape D = np.zeros((n_e, n_ip, 3, 3)) D[..., 0, 0] = self.E_m D[..., 2, 2] = self.E_f tau = np.einsum('...st,...t->...s', D, s) s = s[..., 1] shape = s.shape signs = np.sign(s.flatten()) s_pos = np.fabs(s.flatten()) tau[..., 1] = (signs * self.bs_law(s_pos)).reshape(*shape) D_tau = self.bs_law.diff(s_pos).reshape(*shape) D[..., 1, 1] = D_tau return tau, D update_bs_law = Button(label='update bond-slip law') def _update_bs_law_fired(self): s_data = np.fromstring(self.s_data, dtype=np.float_, sep=',') tau_data = np.fromstring(self.tau_data, dtype=np.float_, sep=',') if len(s_data) != len(tau_data): raise ValueError('s array and tau array must have the same size') self.bs_law.set(xdata=s_data, ydata=tau_data) self.bs_law.replot() bs_law = Instance(MFnLineArray) def _bs_law_default(self): return MFnLineArray( xdata=[0.0, 1.0], ydata=[0.0, 1.0], plot_diff=False ) def write_figure(self, f, rdir, rel_path): fname = 'fig_' + self.node_name.replace(' ', '_') + '.pdf' f.write(r''' \multicolumn{3}{r}{\includegraphics[width=5cm]{%s}}\\ ''' % join(rel_path, fname)) self.bs_law.replot() self.bs_law.savefig(join(rdir, fname)) def plot(self, ax, **kw): ax.plot(self.bs_law.xdata, self.bs_law.ydata, **kw) ax.fill_between(self.bs_law.xdata, self.bs_law.ydata, alpha=0.1, **kw) ipw_view = bu.View( bu.Item('E_m'), bu.Item('E_f'), # ui.Item('s_data'), # ui.Item('tau_data'), # ui.UItem('update_bs_law') ) def update_plot(self, axes): self.plot(axes) tree_view = ui.View( ui.VGroup( ui.VGroup( ui.Item('E_m', full_size=True, resizable=True), ui.Item('E_f'), ui.Item('s_data'), ui.Item('tau_data'), ui.UItem('update_bs_law') ), ui.UItem('bs_law@') ) )
), _traitsui.Label('Variables:'), _traitsui.Item( 'controller.sel_cons_char', editor=_traitsui.CheckListEditor( name='controller.consumer_vars'), style='custom', show_label=False, # height=100, ), show_border=True, ), _traitsui.Group( _traitsui.Label('Liking set:'), _traitsui.UItem('controller.liking_msg', style='readonly', visible_when="controller.selected_design == ''"), _traitsui.Item( 'controller.selected_consumer_liking_sets', editor=_traitsui.CheckListEditor( name='controller.available_consumer_liking_sets'), style='custom', show_label=False, # width=400, enabled_when="controller.selected_design != ''", ), show_border=True, ), orientation='horizontal', ), # _traitsui.Item('controller.model_struct', style='simple', label='Model'),
class CSVJoiner(tr.HasStrictTraits): open_csv_files = tr.Button csv_files = tr.List(CSVFile) num_of_first_lines_to_show = tr.Range(low=0, high=10**9, value=10, mode='spinner') num_of_last_lines_to_show = tr.Range(low=0, high=10**9, value=10, mode='spinner') selected = tr.Instance(CSVFile) join_csv_files = tr.Button accumulate_time = tr.Bool files_end_with_empty_line = tr.Bool(True) columns_headers = tr.List time_column = tr.Enum(values='columns_headers') progress = tr.Int def _join_csv_files_fired(self): output_file_path = self.get_output_file_path() with open(output_file_path, 'w') as outfile: for csv_file, i in zip(self.csv_files, range(len(self.csv_files))): current_line = 1 num_of_first_lines_to_skip = csv_file.first_lines_to_skip num_of_last_lines_to_skip = csv_file.last_lines_to_skip last_line_to_write = csv_file.get_lines_number( ) - num_of_last_lines_to_skip progress_of_a_file = 1.0 / len(self.csv_files) initial_progress = i / len(self.csv_files) with open(csv_file.path) as opened_csv_file: for line in opened_csv_file: if current_line > num_of_first_lines_to_skip and current_line <= last_line_to_write: outfile.write(line) self.progress = int( (initial_progress + progress_of_a_file * (current_line / last_line_to_write)) * 100) current_line += 1 if not self.files_end_with_empty_line: outfile.write('\n') self.progress = 100 dialog = MessageDialog(title='Finished!', message='Files joined successfully, see "' + output_file_path + '"') dialog.open() def get_output_file_path(self): file_path = self.csv_files[0].path file_path_without_ext = os.path.splitext(file_path)[0] file_ext = os.path.splitext(file_path)[1] return file_path_without_ext + '_joined' + file_ext def _accumulate_time_changed(self): pass # if self.csv_files == []: # return # np.array(pd.read_csv( # self.file_csv, delimiter=self.delimiter, decimal=self.decimal, # nrows=1, header=None # ) # )[0] # if self.accumulate_time: # class TimeColumnChooser(tr.HasTraits): # time_column = tr.Enum(values = 'columns_headers') # chooser = TimeColumnChooser() # chooser.configure_traits(kind='modal') def _num_of_first_lines_to_show_changed(self): for file in self.csv_files: file.num_of_first_lines_to_show = self.num_of_first_lines_to_show def _num_of_last_lines_to_show_changed(self): for file in self.csv_files: file.num_of_last_lines_to_show = self.num_of_last_lines_to_show def _open_csv_files_fired(self): extensions = ['*.csv', '*.txt'] # handle only one extension... wildcard = ';'.join(extensions) dialog = pf.FileDialog(title='Select csv files', action='open files', wildcard=wildcard, default_path=os.path.expanduser("~")) result = dialog.open() csv_files_paths = [] # Test if the user opened a file to avoid throwing an exception # if he doesn't if result == pf.OK: csv_files_paths = dialog.paths else: return self.csv_files = [] for file_path in csv_files_paths: csv_file = CSVFile( path=file_path, num_of_first_lines_to_show=self.num_of_first_lines_to_show, num_of_last_lines_to_show=self.num_of_last_lines_to_show, ) self.csv_files.append(csv_file) # ========================================================================= # Configuration of the view # ========================================================================= traits_view = ui.View( ui.VGroup( ui.UItem('open_csv_files', width=150), ui.HGroup(ui.Item('num_of_first_lines_to_show'), ui.spring), ui.HGroup(ui.Item('num_of_last_lines_to_show'), ui.spring), ui.HGroup( ui.Item('files_end_with_empty_line'), # ui.Item('accumulate_time', enabled_when='False'), ui.spring), ui.VGroup( ui.Item('csv_files', show_label=False, style='custom', editor=ui.ListEditor(use_notebook=True, deletable=False, selected='selected', export='DockWindowShell', page_name='.name'))), ui.HGroup( ui.UItem('join_csv_files', width=150), ui.UItem('progress', editor=ProgressEditor(min=0, max=100))), show_border=True), title='CSV files joiner', resizable=True, width=0.6, height=0.7)
class CreasePatternViz3D(Viz3D): '''Visualize the crease Pattern ''' N_selection = Array(int) L_selection = Array(int) F_selection = Array(int) lines = Bool(True) N_L_F = Property(Tuple) '''Geometry with applied selection arrays. ''' def _get_N_L_F(self): cp = self.vis3d x, L, F = cp.x, cp.L, cp.F if len(self.N_selection): x = x[self.N_selection] if len(self.L_selection): L = L[self.L_selection] if len(self.F_selection): F = F[self.F_selection] return x, L, F min_max = Property '''Rectangular bounding box. ''' def _get_min_max(self): vis3d = self.vis3d return np.min(vis3d.x, axis=0), np.max(vis3d.x, axis=0) facet_color = Color((0.6, 0.625, 0.683)) # facet_color = Color((0.0, 0.425, 0.683)) # facet_color = Color((0.4, 0.4, 0.7)) # facet_color = Color((0.0 / 255.0, 84.0 / 255.0, 159.0 / 255.0)) # facet_color = Color((64.0 / 255.0, 127.0 / 255.0, 183.0 / 255.0)) # facet_color = Color((0.0 / 255.0, 97.0 / 255.0, 101.0 / 255.0)) def plot(self): m = self.ftv.mlab N, L, F = self.N_L_F x, y, z = N.T if len(F) > 0: cp_pipe = m.triangular_mesh( x, y, z, F, line_width=3, # color=self.facet_color.toTuple()[:-1], color=(0.6, 0.625, 0.683), # color=(0.0, 0.425, 0.683), name='Crease pattern') # color=self.facet_color.toTuple()[:-1]) if self.lines is True: cp_pipe.mlab_source.dataset.lines = L tube = m.pipeline.tube(cp_pipe, tube_radius=self.tube_radius) lines = m.pipeline.surface(tube, color=(0.1, 0.1, 0.1)) self.pipes['lines'] = lines else: cp_pipe = m.points3d(x, y, z, scale_factor=0.2) cp_pipe.mlab_source.dataset.lines = L self.pipes['cp_pipe'] = cp_pipe def update(self, vot=0.0): N = self.N_L_F[0] cp_pipe = self.pipes['cp_pipe'] cp_pipe.mlab_source.set(points=N) def _get_bounding_box(self): N = self.N_L_F[0] return np.min(N, axis=0), np.max(N, axis=0) def _get_max_length(self): return np.linalg.norm(self._get_bounding_box()) tube_radius = Float(0.03) line_width_factor = Float(0.0024) def _get_line_width(self): return self._get_max_length() * self.line_width_factor traits_view = tui.View( tui.VGroup(tui.Include('viz3d_view'), tui.UItem('tube_radius'), tui.UItem('line_width_factor'))) selection_view = traits_view
class HCFT(tr.HasStrictTraits): '''High-Cycle Fatigue Tool ''' #========================================================================= # Traits definitions #========================================================================= decimal = tr.Enum(',', '.') delimiter = tr.Str(';') records_per_second = tr.Float(100) take_time_from_time_column = tr.Bool(True) file_csv = tr.File open_file_csv = tr.Button('Input file') skip_first_rows = tr.Range(low=1, high=10**9, mode='spinner') columns_headers_list = tr.List([]) x_axis = tr.Enum(values='columns_headers_list') y_axis = tr.Enum(values='columns_headers_list') force_column = tr.Enum(values='columns_headers_list') time_column = tr.Enum(values='columns_headers_list') x_axis_multiplier = tr.Enum(1, -1) y_axis_multiplier = tr.Enum(-1, 1) npy_folder_path = tr.Str file_name = tr.Str apply_filters = tr.Bool plot_settings_btn = tr.Button plot_settings = PlotSettings() plot_settings_active = tr.Bool normalize_cycles = tr.Bool smooth = tr.Bool plot_every_nth_point = tr.Range(low=1, high=1000000, mode='spinner') old_peak_force_before_cycles = tr.Float peak_force_before_cycles = tr.Float window_length = tr.Range(low=1, high=10**9 - 1, value=31, mode='spinner') polynomial_order = tr.Range(low=1, high=10**9, value=2, mode='spinner') activate = tr.Bool(False) add_plot = tr.Button add_creep_plot = tr.Button(desc='Creep plot of X axis array') clear_plot = tr.Button parse_csv_to_npy = tr.Button generate_filtered_and_creep_npy = tr.Button add_columns_average = tr.Button force_max = tr.Float(100) force_min = tr.Float(40) min_cycle_force_range = tr.Float(50) cutting_method = tr.Enum( 'Define min cycle range(force difference)', 'Define Max, Min') columns_to_be_averaged = tr.List figure = tr.Instance(mpl.figure.Figure) log = tr.Str('') clear_log = tr.Button def _figure_default(self): figure = mpl.figure.Figure(facecolor='white') figure.set_tight_layout(True) return figure #========================================================================= # File management #========================================================================= def _open_file_csv_fired(self): try: self.reset() """ Handles the user clicking the 'Open...' button. """ extns = ['*.csv', ] # seems to handle only one extension... wildcard = '|'.join(extns) dialog = FileDialog(title='Select text file', action='open', wildcard=wildcard, default_path=self.file_csv) result = dialog.open() """ Test if the user opened a file to avoid throwing an exception if he doesn't """ if result == OK: self.file_csv = dialog.path else: return """ Filling x_axis and y_axis with values """ headers_array = np.array( pd.read_csv( self.file_csv, delimiter=self.delimiter, decimal=self.decimal, nrows=1, header=None ) )[0] for i in range(len(headers_array)): headers_array[i] = self.get_valid_file_name(headers_array[i]) self.columns_headers_list = list(headers_array) """ Saving file name and path and creating NPY folder """ dir_path = os.path.dirname(self.file_csv) self.npy_folder_path = os.path.join(dir_path, 'NPY') if os.path.exists(self.npy_folder_path) == False: os.makedirs(self.npy_folder_path) self.file_name = os.path.splitext( os.path.basename(self.file_csv))[0] except Exception as e: self.deal_with_exception(e) def _parse_csv_to_npy_fired(self): # Run method on different thread so GUI doesn't freeze #thread = Thread(target = threaded_function, function_args = (10,)) thread = Thread(target=self.parse_csv_to_npy_fired) thread.start() def parse_csv_to_npy_fired(self): try: self.print_custom('Parsing csv into npy files...') for i in range(len(self.columns_headers_list) - len(self.columns_to_be_averaged)): current_column_name = self.columns_headers_list[i] column_array = np.array(pd.read_csv( self.file_csv, delimiter=self.delimiter, decimal=self.decimal, skiprows=self.skip_first_rows, usecols=[i])) if current_column_name == self.time_column and \ self.take_time_from_time_column == False: column_array = np.arange(start=0.0, stop=len(column_array) / self.records_per_second, step=1.0 / self.records_per_second) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + current_column_name + '.npy'), column_array) """ Exporting npy arrays of averaged columns """ for columns_names in self.columns_to_be_averaged: temp = np.zeros((1)) for column_name in columns_names: temp = temp + np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + column_name + '.npy')).flatten() avg = temp / len(columns_names) avg_file_suffex = self.get_suffex_for_columns_to_be_averaged( columns_names) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + avg_file_suffex + '.npy'), avg) self.print_custom('Finsihed parsing csv into npy files.') except Exception as e: self.deal_with_exception(e) def get_suffex_for_columns_to_be_averaged(self, columns_names): suffex_for_saved_file_name = 'avg_' + '_'.join(columns_names) return suffex_for_saved_file_name def get_valid_file_name(self, original_file_name): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) new_valid_file_name = ''.join( c for c in original_file_name if c in valid_chars) return new_valid_file_name def _clear_plot_fired(self): self.figure.clear() self.data_changed = True def _add_columns_average_fired(self): try: columns_average = ColumnsAverage() for name in self.columns_headers_list: columns_average.columns.append(Column(column_name=name)) # kind='modal' pauses the implementation until the window is closed columns_average.configure_traits(kind='modal') columns_to_be_averaged_temp = [] for i in columns_average.columns: if i.selected: columns_to_be_averaged_temp.append(i.column_name) if columns_to_be_averaged_temp: # If it's not empty self.columns_to_be_averaged.append(columns_to_be_averaged_temp) avg_file_suffex = self.get_suffex_for_columns_to_be_averaged( columns_to_be_averaged_temp) self.columns_headers_list.append(avg_file_suffex) except Exception as e: self.deal_with_exception(e) def _generate_filtered_and_creep_npy_fired(self): # Run method on different thread so GUI doesn't freeze #thread = Thread(target = threaded_function, function_args = (10,)) thread = Thread(target=self.generate_filtered_and_creep_npy_fired) thread.start() def generate_filtered_and_creep_npy_fired(self): try: if self.npy_files_exist(os.path.join( self.npy_folder_path, self.file_name + '_' + self.force_column + '.npy')) == False: return self.print_custom('Generating filtered and creep files...') # 1- Export filtered force force = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.force_column + '.npy')).flatten() peak_force_before_cycles_index = np.where( abs((force)) > abs(self.peak_force_before_cycles))[0][0] force_ascending = force[0:peak_force_before_cycles_index] force_rest = force[peak_force_before_cycles_index:] force_max_indices, force_min_indices = self.get_array_max_and_min_indices( force_rest) force_max_min_indices = np.concatenate( (force_min_indices, force_max_indices)) force_max_min_indices.sort() force_rest_filtered = force_rest[force_max_min_indices] force_filtered = np.concatenate( (force_ascending, force_rest_filtered)) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.force_column + '_filtered.npy'), force_filtered) # 2- Export filtered displacements for i in range(0, len(self.columns_headers_list)): if self.columns_headers_list[i] != self.force_column and \ self.columns_headers_list[i] != self.time_column: disp = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy')).flatten() disp_ascending = disp[0:peak_force_before_cycles_index] disp_rest = disp[peak_force_before_cycles_index:] if self.activate == True: disp_ascending = savgol_filter( disp_ascending, window_length=self.window_length, polyorder=self.polynomial_order) disp_rest_filtered = disp_rest[force_max_min_indices] filtered_disp = np.concatenate( (disp_ascending, disp_rest_filtered)) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_filtered.npy'), filtered_disp) # 3- Export creep for displacements # Cutting unwanted max min values to get correct full cycles and remove # false min/max values caused by noise if self.cutting_method == "Define Max, Min": force_max_indices_cutted, force_min_indices_cutted = \ self.cut_indices_of_min_max_range(force_rest, force_max_indices, force_min_indices, self.force_max, self.force_min) elif self.cutting_method == "Define min cycle range(force difference)": force_max_indices_cutted, force_min_indices_cutted = \ self.cut_indices_of_defined_range(force_rest, force_max_indices, force_min_indices, self.min_cycle_force_range) self.print_custom("Cycles number= ", len(force_min_indices)) self.print_custom("Cycles number after cutting fake cycles = ", len(force_min_indices_cutted)) for i in range(0, len(self.columns_headers_list)): if self.columns_headers_list[i] != self.time_column: array = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy')).flatten() array_rest = array[peak_force_before_cycles_index:] array_rest_maxima = array_rest[force_max_indices_cutted] array_rest_minima = array_rest[force_min_indices_cutted] np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_max.npy'), array_rest_maxima) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_min.npy'), array_rest_minima) self.print_custom('Filtered and creep npy files are generated.') except Exception as e: self.deal_with_exception(e) def cut_indices_of_min_max_range(self, array, max_indices, min_indices, range_upper_value, range_lower_value): cutted_max_indices = [] cutted_min_indices = [] for max_index in max_indices: if abs(array[max_index]) > abs(range_upper_value): cutted_max_indices.append(max_index) for min_index in min_indices: if abs(array[min_index]) < abs(range_lower_value): cutted_min_indices.append(min_index) return cutted_max_indices, cutted_min_indices def cut_indices_of_defined_range(self, array, max_indices, min_indices, range_): cutted_max_indices = [] cutted_min_indices = [] for max_index, min_index in zip(max_indices, min_indices): if abs(array[max_index] - array[min_index]) > range_: cutted_max_indices.append(max_index) cutted_min_indices.append(min_index) if max_indices.size > min_indices.size: cutted_max_indices.append(max_indices[-1]) elif min_indices.size > max_indices.size: cutted_min_indices.append(min_indices[-1]) return cutted_max_indices, cutted_min_indices def get_array_max_and_min_indices(self, input_array): # Checking dominant sign positive_values_count = np.sum(np.array(input_array) >= 0) negative_values_count = input_array.size - positive_values_count # Getting max and min indices if (positive_values_count > negative_values_count): force_max_indices = self.get_max_indices(input_array) force_min_indices = self.get_min_indices(input_array) else: force_max_indices = self.get_min_indices(input_array) force_min_indices = self.get_max_indices(input_array) return force_max_indices, force_min_indices def get_max_indices(self, a): # This method doesn't qualify first and last elements as max max_indices = [] i = 1 while i < a.size - 1: previous_element = a[i - 1] # Skip repeated elements and record previous element value first_repeated_element = True while a[i] == a[i + 1] and i < a.size - 1: if first_repeated_element: previous_element = a[i - 1] first_repeated_element = False if i < a.size - 2: i += 1 else: break if a[i] > a[i + 1] and a[i] > previous_element: max_indices.append(i) i += 1 return np.array(max_indices) def get_min_indices(self, a): # This method doesn't qualify first and last elements as min min_indices = [] i = 1 while i < a.size - 1: previous_element = a[i - 1] # Skip repeated elements and record previous element value first_repeated_element = True while a[i] == a[i + 1]: if first_repeated_element: previous_element = a[i - 1] first_repeated_element = False if i < a.size - 2: i += 1 else: break if a[i] < a[i + 1] and a[i] < previous_element: min_indices.append(i) i += 1 return np.array(min_indices) def _activate_changed(self): if self.activate == False: self.old_peak_force_before_cycles = self.peak_force_before_cycles self.peak_force_before_cycles = 0 else: self.peak_force_before_cycles = self.old_peak_force_before_cycles def _window_length_changed(self, new): if new <= self.polynomial_order: dialog = MessageDialog( title='Attention!', message='Window length must be bigger than polynomial order.') dialog.open() if new % 2 == 0 or new <= 0: dialog = MessageDialog( title='Attention!', message='Window length must be odd positive integer.') dialog.open() def _polynomial_order_changed(self, new): if new >= self.window_length: dialog = MessageDialog( title='Attention!', message='Polynomial order must be smaller than window length.') dialog.open() #========================================================================= # Plotting #========================================================================= def _plot_settings_btn_fired(self): try: self.plot_settings.configure_traits(kind='modal') except Exception as e: self.deal_with_exception(e) def npy_files_exist(self, path): if os.path.exists(path) == True: return True else: # TODO fix this self.print_custom( 'Please parse csv file to generate npy files first.') # dialog = MessageDialog( # title='Attention!', # message='Please parse csv file to generate npy files first.') # dialog.open() return False def filtered_and_creep_npy_files_exist(self, path): if os.path.exists(path) == True: return True else: # TODO fix this self.print_custom( 'Please generate filtered and creep npy files first.') # dialog = MessageDialog( # title='Attention!', # message='Please generate filtered and creep npy files first.') # dialog.open() return False data_changed = tr.Event def _add_plot_fired(self): # Run method on different thread so GUI doesn't freeze #thread = Thread(target = threaded_function, function_args = (10,)) thread = Thread(target=self.add_plot_fired) thread.start() def add_plot_fired(self): try: if self.apply_filters: if self.filtered_and_creep_npy_files_exist(os.path.join( self.npy_folder_path, self.file_name + '_' + self.x_axis + '_filtered.npy')) == False: return x_axis_name = self.x_axis + '_filtered' y_axis_name = self.y_axis + '_filtered' self.print_custom('Loading npy files...') # when mmap_mode!=None, the array will be loaded as 'numpy.memmap' # object which doesn't load the array to memory until it's # indexed x_axis_array = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_filtered.npy'), mmap_mode='r') y_axis_array = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.y_axis + '_filtered.npy'), mmap_mode='r') else: if self.npy_files_exist(os.path.join( self.npy_folder_path, self.file_name + '_' + self.x_axis + '.npy')) == False: return x_axis_name = self.x_axis y_axis_name = self.y_axis self.print_custom('Loading npy files...') # when mmap_mode!=None, the array will be loaded as 'numpy.memmap' # object which doesn't load the array to memory until it's # indexed x_axis_array = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '.npy'), mmap_mode='r') y_axis_array = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.y_axis + '.npy'), mmap_mode='r') if self.plot_settings_active: print(self.plot_settings.first_rows) print(self.plot_settings.distance) print(self.plot_settings.num_of_rows_after_each_distance) print(np.size(x_axis_array)) indices = self.get_indices_array(np.size(x_axis_array), self.plot_settings.first_rows, self.plot_settings.distance, self.plot_settings.num_of_rows_after_each_distance) x_axis_array = self.x_axis_multiplier * x_axis_array[indices] y_axis_array = self.y_axis_multiplier * y_axis_array[indices] else: x_axis_array = self.x_axis_multiplier * x_axis_array y_axis_array = self.y_axis_multiplier * y_axis_array self.print_custom('Adding Plot...') mpl.rcParams['agg.path.chunksize'] = 10000 ax = self.figure.add_subplot(1, 1, 1) ax.set_xlabel(x_axis_name) ax.set_ylabel(y_axis_name) ax.plot(x_axis_array, y_axis_array, 'k', linewidth=1.2, color=np.random.rand(3), label=self.file_name + ', ' + x_axis_name) ax.legend() self.data_changed = True self.print_custom('Finished adding plot.') except Exception as e: self.deal_with_exception(e) def _add_creep_plot_fired(self): # Run method on different thread so GUI doesn't freeze #thread = Thread(target = threaded_function, function_args = (10,)) thread = Thread(target=self.add_creep_plot_fired) thread.start() def add_creep_plot_fired(self): try: if self.filtered_and_creep_npy_files_exist(os.path.join( self.npy_folder_path, self.file_name + '_' + self.x_axis + '_max.npy')) == False: return self.print_custom('Loading npy files...') disp_max = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_max.npy')) disp_min = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_min.npy')) complete_cycles_number = disp_max.size self.print_custom('Adding creep-fatigue plot...') mpl.rcParams['agg.path.chunksize'] = 10000 ax = self.figure.add_subplot(1, 1, 1) ax.set_xlabel('Cycles number') ax.set_ylabel(self.x_axis) if self.plot_every_nth_point > 1: disp_max = disp_max[0::self.plot_every_nth_point] disp_min = disp_min[0::self.plot_every_nth_point] if self.smooth: # Keeping the first item of the array and filtering the rest disp_max = np.concatenate(( np.array([disp_max[0]]), savgol_filter(disp_max[1:], window_length=self.window_length, polyorder=self.polynomial_order) )) disp_min = np.concatenate(( np.array([disp_min[0]]), savgol_filter(disp_min[1:], window_length=self.window_length, polyorder=self.polynomial_order) )) if self.normalize_cycles: ax.plot(np.linspace(0, 1., disp_max.size), disp_max, 'k', linewidth=1.2, color=np.random.rand(3), label='Max' + ', ' + self.file_name + ', ' + self.x_axis) ax.plot(np.linspace(0, 1., disp_min.size), disp_min, 'k', linewidth=1.2, color=np.random.rand(3), label='Min' + ', ' + self.file_name + ', ' + self.x_axis) else: ax.plot(np.linspace(0, complete_cycles_number, disp_max.size), disp_max, 'k', linewidth=1.2, color=np.random.rand(3), label='Max' + ', ' + self.file_name + ', ' + self.x_axis) ax.plot(np.linspace(0, complete_cycles_number, disp_min.size), disp_min, 'k', linewidth=1.2, color=np.random.rand(3), label='Min' + ', ' + self.file_name + ', ' + self.x_axis) ax.legend() self.data_changed = True self.print_custom('Finished adding creep-fatigue plot.') except Exception as e: self.deal_with_exception(e) def get_indices_array(self, array_size, first_rows, distance, num_of_rows_after_each_distance): result_1 = np.arange(first_rows) result_2 = np.arange(start=first_rows, stop=array_size, step=distance + num_of_rows_after_each_distance) result_2_updated = np.array([], dtype=np.int_) for result_2_value in result_2: data_slice = np.arange(result_2_value, result_2_value + num_of_rows_after_each_distance) result_2_updated = np.concatenate((result_2_updated, data_slice)) result = np.concatenate((result_1, result_2_updated)) return result def reset(self): self.columns_to_be_averaged = [] self.log = '' def print_custom(self, *input_args): print(*input_args) if self.log == '': self.log = ''.join(str(e) for e in list(input_args)) else: self.log = self.log + '\n' + \ ''.join(str(e) for e in list(input_args)) def deal_with_exception(self, e): self.print_custom('SOMETHING WENT WRONG!') self.print_custom('--------- Error message: ---------') self.print_custom(traceback.format_exc()) self.print_custom('----------------------------------') def _clear_log_fired(self): self.log = '' #========================================================================= # Configuration of the view #========================================================================= traits_view = ui.View( ui.HSplit( ui.VSplit( ui.VGroup( ui.VGroup( ui.Item('decimal'), ui.Item('delimiter'), ui.HGroup( ui.UItem('open_file_csv', has_focus=True), ui.UItem('file_csv', style='readonly', width=0.1)), label='Importing csv file', show_border=True)), ui.VGroup( ui.VGroup( ui.VGroup( ui.Item('take_time_from_time_column'), ui.Item('time_column', enabled_when='take_time_from_time_column == True'), ui.Item('records_per_second', enabled_when='take_time_from_time_column == False'), label='Time calculation', show_border=True), ui.UItem('add_columns_average'), ui.Item('skip_first_rows'), ui.UItem('parse_csv_to_npy', resizable=True), label='Processing csv file', show_border=True)), ui.VGroup( ui.VGroup( ui.HGroup(ui.Item('x_axis'), ui.Item( 'x_axis_multiplier')), ui.HGroup(ui.Item('y_axis'), ui.Item( 'y_axis_multiplier')), ui.VGroup( ui.HGroup(ui.UItem('add_plot'), ui.Item('apply_filters'), ui.Item('plot_settings_btn', label='Settings', show_label=False, enabled_when='plot_settings_active == True'), ui.Item('plot_settings_active', show_label=False) ), show_border=True, label='Plotting X axis with Y axis' ), ui.VGroup( ui.HGroup(ui.UItem('add_creep_plot'), ui.VGroup( ui.Item('normalize_cycles'), ui.Item('smooth'), ui.Item('plot_every_nth_point')) ), show_border=True, label='Plotting Creep-fatigue of X axis variable' ), ui.UItem('clear_plot', resizable=True), show_border=True, label='Plotting')) ), ui.VGroup( ui.Item('force_column'), ui.VGroup(ui.VGroup( ui.Item('window_length'), ui.Item('polynomial_order'), enabled_when='activate == True or smooth == True'), show_border=True, label='Smoothing parameters (Savitzky-Golay filter):' ), ui.VGroup(ui.VGroup( ui.Item('activate'), ui.Item('peak_force_before_cycles', enabled_when='activate == True') ), show_border=True, label='Smooth ascending branch for all displacements:' ), ui.VGroup(ui.Item('cutting_method'), ui.VGroup(ui.Item('force_max'), ui.Item('force_min'), label='Max, Min:', show_border=True, enabled_when='cutting_method == "Define Max, Min"'), ui.VGroup(ui.Item('min_cycle_force_range'), label='Min cycle force range:', show_border=True, enabled_when='cutting_method == "Define min cycle range(force difference)"'), show_border=True, label='Cut fake cycles for creep:'), ui.VSplit( ui.UItem('generate_filtered_and_creep_npy'), ui.VGroup( ui.Item('log', width=0.1, style='custom'), ui.UItem('clear_log'))), show_border=True, label='Filters' ), ui.UItem('figure', editor=MPLFigureEditor(), resizable=True, springy=True, width=0.8, label='2d plots') ), title='High-cycle fatigue tool', resizable=True, width=0.85, height=0.7 )
class Vis2D(HasStrictTraits): '''Each state and operator object can be associated with several visualization objects with a shortened class name Viz3D. In order to introduce a n independent class subsystem into the class structure, objects supporting visualization inherit from Visual3D which introduces a dictionary viz3d objects. ''' def setup(self): pass sim = WeakRef '''Root of the simulation to extract the data ''' vot = Float(0.0, time_change=True) '''Visual object time ''' viz2d_classes = Dict '''Visualization classes applicable to this object. ''' viz2d_class_names = Property(List(Str), depends_on='viz2d_classes') '''Keys of the viz2d classes ''' @cached_property def _get_viz2d_class_names(self): return list(self.viz2d_classes.keys()) selected_viz2d_class = Str def _selected_viz2d_class_default(self): if len(self.viz2d_class_names) > 0: return self.viz2d_class_names[0] else: return '' add_selected_viz2d = Button(label='Add plot viz2d') def _add_selected_viz2d_fired(self): viz2d_class_name = self.selected_viz2d_class self.add_viz2d(viz2d_class_name, '<unnamed>') def add_viz2d(self, class_name, name, **kw): if name == '': name = class_name viz2d_class = self.viz2d_classes[class_name] viz2d = viz2d_class(name=name, vis2d=self, **kw) self.viz2d.append(viz2d) if hasattr(self, 'ui') and self.ui: self.ui.viz_sheet.viz2d_list.append(viz2d) viz2d = List(Viz2D) actions = ui.HGroup( ui.UItem('add_selected_viz2d'), ui.UItem('selected_viz2d_class', springy=True, editor=ui.EnumEditor(name='object.viz2d_class_names', ) ), ) def plt(self, name, label=None): return Viz2DPlot(plot_fn=name, label=label, vis2d=self) view = ui.View( ui.Include('actions'), resizable=True )
class HCFF(tr.HasStrictTraits): '''High-Cycle Fatigue Filter ''' #========================================================================= # Traits definitions #========================================================================= decimal = tr.Enum(',', '.') delimiter = tr.Str(';') file_csv = tr.File open_file_csv = tr.Button('Input file') skip_rows = tr.Int(4, auto_set=False, enter_set=True) columns_headers_list = tr.List([]) x_axis = tr.Enum(values='columns_headers_list') y_axis = tr.Enum(values='columns_headers_list') x_axis_multiplier = tr.Enum(1, -1) y_axis_multiplier = tr.Enum(-1, 1) npy_folder_path = tr.Str file_name = tr.Str apply_filters = tr.Bool force_name = tr.Str('Kraft') peak_force_before_cycles = tr.Float(30) plots_num = tr.Enum(1, 2, 3, 4, 6, 9) plot_list = tr.List() plot = tr.Button add_plot = tr.Button add_creep_plot = tr.Button parse_csv_to_npy = tr.Button generate_filtered_npy = tr.Button add_columns_average = tr.Button force_max = tr.Float(100) force_min = tr.Float(40) figure = tr.Instance(Figure) # plots_list = tr.List(editor=ui.SetEditor( # values=['kumquats', 'pomegranates', 'kiwi'], # can_move_all=False, # left_column_title='List')) #========================================================================= # File management #========================================================================= def _open_file_csv_fired(self): """ Handles the user clicking the 'Open...' button. """ extns = ['*.csv', ] # seems to handle only one extension... wildcard = '|'.join(extns) dialog = FileDialog(title='Select text file', action='open', wildcard=wildcard, default_path=self.file_csv) dialog.open() self.file_csv = dialog.path """ Filling x_axis and y_axis with values """ headers_array = np.array( pd.read_csv( self.file_csv, delimiter=self.delimiter, decimal=self.decimal, nrows=1, header=None ) )[0] for i in range(len(headers_array)): headers_array[i] = self.get_valid_file_name(headers_array[i]) self.columns_headers_list = list(headers_array) """ Saving file name and path and creating NPY folder """ dir_path = os.path.dirname(self.file_csv) self.npy_folder_path = os.path.join(dir_path, 'NPY') if os.path.exists(self.npy_folder_path) == False: os.makedirs(self.npy_folder_path) self.file_name = os.path.splitext(os.path.basename(self.file_csv))[0] #========================================================================= # Parameters of the filter algorithm #========================================================================= def _figure_default(self): figure = Figure(facecolor='white') figure.set_tight_layout(True) return figure def _parse_csv_to_npy_fired(self): print('Parsing csv into npy files...') for i in range(len(self.columns_headers_list)): column_array = np.array(pd.read_csv( self.file_csv, delimiter=self.delimiter, decimal=self.decimal, skiprows=self.skip_rows, usecols=[i])) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy'), column_array) print('Finsihed parsing csv into npy files.') def get_valid_file_name(self, original_file_name): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) new_valid_file_name = ''.join( c for c in original_file_name if c in valid_chars) return new_valid_file_name # def _add_columns_average_fired(self): # columns_average = ColumnsAverage( # columns_names=self.columns_headers_list) # # columns_average.set_columns_headers_list(self.columns_headers_list) # columns_average.configure_traits() def _generate_filtered_npy_fired(self): # 1- Export filtered force force = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.force_name + '.npy')).flatten() peak_force_before_cycles_index = np.where( abs((force)) > abs(self.peak_force_before_cycles))[0][0] force_ascending = force[0:peak_force_before_cycles_index] force_rest = force[peak_force_before_cycles_index:] force_max_indices, force_min_indices = self.get_array_max_and_min_indices( force_rest) force_max_min_indices = np.concatenate( (force_min_indices, force_max_indices)) force_max_min_indices.sort() force_rest_filtered = force_rest[force_max_min_indices] force_filtered = np.concatenate((force_ascending, force_rest_filtered)) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.force_name + '_filtered.npy'), force_filtered) # 2- Export filtered displacements # TODO I skipped time with presuming it's the first column for i in range(1, len(self.columns_headers_list)): if self.columns_headers_list[i] != str(self.force_name): disp = np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy')).flatten() disp_ascending = disp[0:peak_force_before_cycles_index] disp_rest = disp[peak_force_before_cycles_index:] disp_ascending = savgol_filter( disp_ascending, window_length=51, polyorder=2) disp_rest = disp_rest[force_max_min_indices] filtered_disp = np.concatenate((disp_ascending, disp_rest)) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_filtered.npy'), filtered_disp) # 3- Export creep for displacements # Cutting unwanted max min values to get correct full cycles and remove # false min/max values caused by noise force_max_indices_cutted, force_min_indices_cutted = self.cut_indices_in_range(force_rest, force_max_indices, force_min_indices, self.force_max, self.force_min) print("Cycles number= ", len(force_min_indices)) print("Cycles number after cutting unwanted max-min range= ", len(force_min_indices_cutted)) # TODO I skipped time with presuming it's the first column for i in range(1, len(self.columns_headers_list)): if self.columns_headers_list[i] != str(self.force_name): disp_rest_maxima = disp_rest[force_max_indices_cutted] disp_rest_minima = disp_rest[force_min_indices_cutted] np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_max.npy'), disp_rest_maxima) np.save(os.path.join(self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_min.npy'), disp_rest_minima) print('Filtered npy files are generated.') def cut_indices_in_range(self, array, max_indices, min_indices, range_upper_value, range_lower_value): cutted_max_indices = [] cutted_min_indices = [] for max_index in max_indices: if abs(array[max_index]) > abs(range_upper_value): cutted_max_indices.append(max_index) for min_index in min_indices: if abs(array[min_index]) < abs(range_lower_value): cutted_min_indices.append(min_index) return cutted_max_indices, cutted_min_indices def get_array_max_and_min_indices(self, input_array): # Checking dominant sign positive_values_count = np.sum(np.array(input_array) >= 0) negative_values_count = input_array.size - positive_values_count # Getting max and min indices if (positive_values_count > negative_values_count): force_max_indices = argrelextrema(input_array, np.greater_equal)[0] force_min_indices = argrelextrema(input_array, np.less_equal)[0] else: force_max_indices = argrelextrema(input_array, np.less_equal)[0] force_min_indices = argrelextrema(input_array, np.greater_equal)[0] # Remove subsequent max/min indices (np.greater_equal will give 1,2 for # [4, 8, 8, 1]) force_max_indices = self.remove_subsequent_max_values( force_max_indices) force_min_indices = self.remove_subsequent_min_values( force_min_indices) # If size is not equal remove the last element from the big one if force_max_indices.size > force_min_indices.size: force_max_indices = force_max_indices[:-1] elif force_max_indices.size < force_min_indices.size: force_min_indices = force_min_indices[:-1] return force_max_indices, force_min_indices def remove_subsequent_max_values(self, force_max_indices): to_delete_from_maxima = [] for i in range(force_max_indices.size - 1): if force_max_indices[i + 1] - force_max_indices[i] == 1: to_delete_from_maxima.append(i) force_max_indices = np.delete(force_max_indices, to_delete_from_maxima) return force_max_indices def remove_subsequent_min_values(self, force_min_indices): to_delete_from_minima = [] for i in range(force_min_indices.size - 1): if force_min_indices[i + 1] - force_min_indices[i] == 1: to_delete_from_minima.append(i) force_min_indices = np.delete(force_min_indices, to_delete_from_minima) return force_min_indices #========================================================================= # Plotting #========================================================================= plot_figure_num = tr.Int(0) def _plot_fired(self): ax = self.figure.add_subplot() def x_plot_fired(self): self.plot_figure_num += 1 plt.draw() plt.show() data_changed = tr.Event def _add_plot_fired(self): if False: # (len(self.plot_list) >= self.plots_num): dialog = MessageDialog( title='Attention!', message='Max plots number is {}'.format(self.plots_num)) dialog.open() return print('Loading npy files...') if self.apply_filters: x_axis_name = self.x_axis + '_filtered' y_axis_name = self.y_axis + '_filtered' x_axis_array = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_filtered.npy')) y_axis_array = self.y_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.y_axis + '_filtered.npy')) else: x_axis_name = self.x_axis y_axis_name = self.y_axis x_axis_array = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '.npy')) y_axis_array = self.y_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.y_axis + '.npy')) print('Adding Plot...') mpl.rcParams['agg.path.chunksize'] = 50000 # plt.figure(self.plot_figure_num) ax = self.figure.add_subplot(1, 1, 1) ax.set_xlabel('Displacement [mm]') ax.set_ylabel('kN') ax.set_title('Original data', fontsize=20) ax.plot(x_axis_array, y_axis_array, 'k', linewidth=0.8) self.plot_list.append('{}, {}'.format(x_axis_name, y_axis_name)) self.data_changed = True print('Finished adding plot!') def apply_new_subplot(self): plt = self.figure if (self.plots_num == 1): plt.add_subplot(1, 1, 1) elif (self.plots_num == 2): plot_location = int('12' + str(len(self.plot_list) + 1)) plt.add_subplot(plot_location) elif (self.plots_num == 3): plot_location = int('13' + str(len(self.plot_list) + 1)) plt.add_subplot(plot_location) elif (self.plots_num == 4): plot_location = int('22' + str(len(self.plot_list) + 1)) plt.add_subplot(plot_location) elif (self.plots_num == 6): plot_location = int('23' + str(len(self.plot_list) + 1)) plt.add_subplot(plot_location) elif (self.plots_num == 9): plot_location = int('33' + str(len(self.plot_list) + 1)) plt.add_subplot(plot_location) def _add_creep_plot_fired(self): plt = self.figure if (len(self.plot_list) >= self.plots_num): dialog = MessageDialog( title='Attention!', message='Max plots number is {}'.format(self.plots_num)) dialog.open() return disp_max = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_max.npy')) disp_min = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_min.npy')) print('Adding creep plot...') mpl.rcParams['agg.path.chunksize'] = 50000 self.apply_new_subplot() plt.xlabel('Cycles number') plt.ylabel('mm') plt.title('Fatigue creep curve', fontsize=20) plt.plot(np.arange(0, disp_max.size), disp_max, 'k', linewidth=0.8, color='red') plt.plot(np.arange(0, disp_min.size), disp_min, 'k', linewidth=0.8, color='green') self.plot_list.append('Plot {}'.format(len(self.plot_list) + 1)) print('Finished adding creep plot!') #========================================================================= # Configuration of the view #========================================================================= traits_view = ui.View( ui.HSplit( ui.VSplit( ui.HGroup( ui.UItem('open_file_csv'), ui.UItem('file_csv', style='readonly'), label='Input data' ), ui.Item('add_columns_average', show_label=False), ui.VGroup( ui.Item('skip_rows'), ui.Item('decimal'), ui.Item('delimiter'), ui.Item('parse_csv_to_npy', show_label=False), label='Filter parameters' ), ui.VGroup( ui.Item('plots_num'), ui.HGroup(ui.Item('x_axis'), ui.Item('x_axis_multiplier')), ui.HGroup(ui.Item('y_axis'), ui.Item('y_axis_multiplier')), ui.HGroup(ui.Item('add_plot', show_label=False), ui.Item('apply_filters')), ui.HGroup(ui.Item('add_creep_plot', show_label=False)), ui.Item('plot_list'), ui.Item('plot', show_label=False), show_border=True, label='Plotting settings'), ), ui.VGroup( ui.Item('force_name'), ui.HGroup(ui.Item('peak_force_before_cycles'), show_border=True, label='Skip noise of ascending branch:'), # ui.Item('plots_list'), ui.VGroup(ui.Item('force_max'), ui.Item('force_min'), show_border=True, label='Cut fake cycles for creep:'), ui.Item('generate_filtered_npy', show_label=False), show_border=True, label='Filters' ), ui.UItem('figure', editor=MPLFigureEditor(), resizable=True, springy=True, width=0.3, label='2d plots'), ), title='HCFF Filter', resizable=True, width=0.6, height=0.6 )
class HCFF(tr.HasStrictTraits): '''High-Cycle Fatigue Filter ''' #========================================================================= # Traits definitions #========================================================================= decimal = tr.Enum(',', '.') delimiter = tr.Str(';') records_per_second = tr.Float(100) take_time_from_first_column = tr.Bool file_csv = tr.File open_file_csv = tr.Button('Input file') skip_first_rows = tr.Int(3, auto_set=False, enter_set=True) columns_headers_list = tr.List([]) x_axis = tr.Enum(values='columns_headers_list') y_axis = tr.Enum(values='columns_headers_list') x_axis_multiplier = tr.Enum(1, -1) y_axis_multiplier = tr.Enum(-1, 1) npy_folder_path = tr.Str file_name = tr.Str apply_filters = tr.Bool normalize_cycles = tr.Bool smooth = tr.Bool plot_every_nth_point = tr.Range(low=1, high=1000000, mode='spinner') force_name = tr.Str('Kraft') old_peak_force_before_cycles = tr.Float peak_force_before_cycles = tr.Float window_length = tr.Int(31) polynomial_order = tr.Int(2) activate = tr.Bool(False) plots_num = tr.Enum(1, 2, 3, 4, 6, 9) plot_list = tr.List() add_plot = tr.Button add_creep_plot = tr.Button(desc='Creep plot of X axis array') clear_plot = tr.Button parse_csv_to_npy = tr.Button generate_filtered_and_creep_npy = tr.Button add_columns_average = tr.Button force_max = tr.Float(100) force_min = tr.Float(40) min_cycle_force_range = tr.Float(50) cutting_method = tr.Enum('Define min cycle range(force difference)', 'Define Max, Min') columns_to_be_averaged = tr.List figure = tr.Instance(Figure) def _figure_default(self): figure = Figure(facecolor='white') figure.set_tight_layout(True) return figure #========================================================================= # File management #========================================================================= def _open_file_csv_fired(self): self.reset() """ Handles the user clicking the 'Open...' button. """ extns = [ '*.csv', ] # seems to handle only one extension... wildcard = '|'.join(extns) dialog = FileDialog(title='Select text file', action='open', wildcard=wildcard, default_path=self.file_csv) result = dialog.open() """ Test if the user opened a file to avoid throwing an exception if he doesn't """ if result == OK: self.file_csv = dialog.path else: return """ Filling x_axis and y_axis with values """ headers_array = np.array( pd.read_csv(self.file_csv, delimiter=self.delimiter, decimal=self.decimal, nrows=1, header=None))[0] for i in range(len(headers_array)): headers_array[i] = self.get_valid_file_name(headers_array[i]) self.columns_headers_list = list(headers_array) """ Saving file name and path and creating NPY folder """ dir_path = os.path.dirname(self.file_csv) self.npy_folder_path = os.path.join(dir_path, 'NPY') if os.path.exists(self.npy_folder_path) == False: os.makedirs(self.npy_folder_path) self.file_name = os.path.splitext(os.path.basename(self.file_csv))[0] def _parse_csv_to_npy_fired(self): print('Parsing csv into npy files...') for i in range( len(self.columns_headers_list) - len(self.columns_to_be_averaged)): column_array = np.array( pd.read_csv(self.file_csv, delimiter=self.delimiter, decimal=self.decimal, skiprows=self.skip_first_rows, usecols=[i])) """ TODO! Create time array supposing it's column is the first one in the file and that we have 100 reads in 1 second """ if i == 0 and self.take_time_from_first_column == False: column_array = np.arange(start=0.0, stop=len(column_array) / self.records_per_second, step=1.0 / self.records_per_second) np.save( os.path.join( self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy'), column_array) """ Exporting npy arrays of averaged columns """ for columns_names in self.columns_to_be_averaged: temp = np.zeros((1)) for column_name in columns_names: temp = temp + np.load( os.path.join(self.npy_folder_path, self.file_name + '_' + column_name + '.npy')).flatten() avg = temp / len(columns_names) avg_file_suffex = self.get_suffex_for_columns_to_be_averaged( columns_names) np.save( os.path.join(self.npy_folder_path, self.file_name + '_' + avg_file_suffex + '.npy'), avg) print('Finsihed parsing csv into npy files.') def get_suffex_for_columns_to_be_averaged(self, columns_names): suffex_for_saved_file_name = 'avg_' + '_'.join(columns_names) return suffex_for_saved_file_name def get_valid_file_name(self, original_file_name): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) new_valid_file_name = ''.join(c for c in original_file_name if c in valid_chars) return new_valid_file_name def _clear_plot_fired(self): self.figure.clear() self.plot_list = [] self.data_changed = True def _add_columns_average_fired(self): columns_average = ColumnsAverage() for name in self.columns_headers_list: columns_average.columns.append(Column(column_name=name)) # kind='modal' pauses the implementation until the window is closed columns_average.configure_traits(kind='modal') columns_to_be_averaged_temp = [] for i in columns_average.columns: if i.selected: columns_to_be_averaged_temp.append(i.column_name) if columns_to_be_averaged_temp: # If it's not empty self.columns_to_be_averaged.append(columns_to_be_averaged_temp) avg_file_suffex = self.get_suffex_for_columns_to_be_averaged( columns_to_be_averaged_temp) self.columns_headers_list.append(avg_file_suffex) def _generate_filtered_and_creep_npy_fired(self): if self.npy_files_exist( os.path.join(self.npy_folder_path, self.file_name + '_' + self.force_name + '.npy')) == False: return # 1- Export filtered force force = np.load( os.path.join(self.npy_folder_path, self.file_name + '_' + self.force_name + '.npy')).flatten() peak_force_before_cycles_index = np.where( abs((force)) > abs(self.peak_force_before_cycles))[0][0] force_ascending = force[0:peak_force_before_cycles_index] force_rest = force[peak_force_before_cycles_index:] force_max_indices, force_min_indices = self.get_array_max_and_min_indices( force_rest) force_max_min_indices = np.concatenate( (force_min_indices, force_max_indices)) force_max_min_indices.sort() force_rest_filtered = force_rest[force_max_min_indices] force_filtered = np.concatenate((force_ascending, force_rest_filtered)) np.save( os.path.join( self.npy_folder_path, self.file_name + '_' + self.force_name + '_filtered.npy'), force_filtered) # 2- Export filtered displacements # TODO I skipped time presuming it's the first column for i in range(1, len(self.columns_headers_list)): if self.columns_headers_list[i] != str(self.force_name): disp = np.load( os.path.join( self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy')).flatten() disp_ascending = disp[0:peak_force_before_cycles_index] disp_rest = disp[peak_force_before_cycles_index:] if self.activate == True: disp_ascending = savgol_filter( disp_ascending, window_length=self.window_length, polyorder=self.polynomial_order) disp_rest_filtered = disp_rest[force_max_min_indices] filtered_disp = np.concatenate( (disp_ascending, disp_rest_filtered)) np.save( os.path.join( self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_filtered.npy'), filtered_disp) # 3- Export creep for displacements # Cutting unwanted max min values to get correct full cycles and remove # false min/max values caused by noise if self.cutting_method == "Define Max, Min": force_max_indices_cutted, force_min_indices_cutted = \ self.cut_indices_of_min_max_range(force_rest, force_max_indices, force_min_indices, self.force_max, self.force_min) elif self.cutting_method == "Define min cycle range(force difference)": force_max_indices_cutted, force_min_indices_cutted = \ self.cut_indices_of_defined_range(force_rest, force_max_indices, force_min_indices, self.min_cycle_force_range) print("Cycles number= ", len(force_min_indices)) print("Cycles number after cutting fake cycles because of noise= ", len(force_min_indices_cutted)) # TODO I skipped time with presuming it's the first column for i in range(1, len(self.columns_headers_list)): array = np.load( os.path.join( self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '.npy')).flatten() array_rest = array[peak_force_before_cycles_index:] array_rest_maxima = array_rest[force_max_indices_cutted] array_rest_minima = array_rest[force_min_indices_cutted] np.save( os.path.join( self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_max.npy'), array_rest_maxima) np.save( os.path.join( self.npy_folder_path, self.file_name + '_' + self.columns_headers_list[i] + '_min.npy'), array_rest_minima) print('Filtered and creep npy files are generated.') def cut_indices_of_min_max_range(self, array, max_indices, min_indices, range_upper_value, range_lower_value): cutted_max_indices = [] cutted_min_indices = [] for max_index in max_indices: if abs(array[max_index]) > abs(range_upper_value): cutted_max_indices.append(max_index) for min_index in min_indices: if abs(array[min_index]) < abs(range_lower_value): cutted_min_indices.append(min_index) return cutted_max_indices, cutted_min_indices def cut_indices_of_defined_range(self, array, max_indices, min_indices, range_): cutted_max_indices = [] cutted_min_indices = [] for max_index, min_index in zip(max_indices, min_indices): if abs(array[max_index] - array[min_index]) > range_: cutted_max_indices.append(max_index) cutted_min_indices.append(min_index) return cutted_max_indices, cutted_min_indices def get_array_max_and_min_indices(self, input_array): # Checking dominant sign positive_values_count = np.sum(np.array(input_array) >= 0) negative_values_count = input_array.size - positive_values_count # Getting max and min indices if (positive_values_count > negative_values_count): force_max_indices = argrelextrema(input_array, np.greater_equal)[0] force_min_indices = argrelextrema(input_array, np.less_equal)[0] else: force_max_indices = argrelextrema(input_array, np.less_equal)[0] force_min_indices = argrelextrema(input_array, np.greater_equal)[0] # Remove subsequent max/min indices (np.greater_equal will give 1,2 for # [4, 8, 8, 1]) force_max_indices = self.remove_subsequent_max_values( force_max_indices) force_min_indices = self.remove_subsequent_min_values( force_min_indices) # If size is not equal remove the last element from the big one if force_max_indices.size > force_min_indices.size: force_max_indices = force_max_indices[:-1] elif force_max_indices.size < force_min_indices.size: force_min_indices = force_min_indices[:-1] return force_max_indices, force_min_indices def remove_subsequent_max_values(self, force_max_indices): to_delete_from_maxima = [] for i in range(force_max_indices.size - 1): if force_max_indices[i + 1] - force_max_indices[i] == 1: to_delete_from_maxima.append(i) force_max_indices = np.delete(force_max_indices, to_delete_from_maxima) return force_max_indices def remove_subsequent_min_values(self, force_min_indices): to_delete_from_minima = [] for i in range(force_min_indices.size - 1): if force_min_indices[i + 1] - force_min_indices[i] == 1: to_delete_from_minima.append(i) force_min_indices = np.delete(force_min_indices, to_delete_from_minima) return force_min_indices def _activate_changed(self): if self.activate == False: self.old_peak_force_before_cycles = self.peak_force_before_cycles self.peak_force_before_cycles = 0 else: self.peak_force_before_cycles = self.old_peak_force_before_cycles def _window_length_changed(self, new): if new <= self.polynomial_order: dialog = MessageDialog( title='Attention!', message='Window length must be bigger than polynomial order.') dialog.open() if new % 2 == 0 or new <= 0: dialog = MessageDialog( title='Attention!', message='Window length must be odd positive integer.') dialog.open() def _polynomial_order_changed(self, new): if new >= self.window_length: dialog = MessageDialog( title='Attention!', message='Polynomial order must be less than window length.') dialog.open() #========================================================================= # Plotting #========================================================================= plot_list_current_elements_num = tr.Int(0) def npy_files_exist(self, path): if os.path.exists(path) == True: return True else: dialog = MessageDialog( title='Attention!', message='Please parse csv file to generate npy files first.'. format(self.plots_num)) dialog.open() return False def filtered_and_creep_npy_files_exist(self, path): if os.path.exists(path) == True: return True else: dialog = MessageDialog( title='Attention!', message='Please generate filtered and creep npy files first.'. format(self.plots_num)) dialog.open() return False def max_plots_number_is_reached(self): if len(self.plot_list) >= self.plots_num: dialog = MessageDialog(title='Attention!', message='Max plots number is {}'.format( self.plots_num)) dialog.open() return True else: return False def _plot_list_changed(self): if len(self.plot_list) > self.plot_list_current_elements_num: self.plot_list_current_elements_num = len(self.plot_list) data_changed = tr.Event def _add_plot_fired(self): if self.max_plots_number_is_reached() == True: return if self.apply_filters: if self.filtered_and_creep_npy_files_exist( os.path.join( self.npy_folder_path, self.file_name + '_' + self.x_axis + '_filtered.npy')) == False: return x_axis_name = self.x_axis + '_filtered' y_axis_name = self.y_axis + '_filtered' print('Loading npy files...') x_axis_array = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_filtered.npy')) y_axis_array = self.y_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.y_axis + '_filtered.npy')) else: if self.npy_files_exist( os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '.npy')) == False: return x_axis_name = self.x_axis y_axis_name = self.y_axis print('Loading npy files...') x_axis_array = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '.npy')) y_axis_array = self.y_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.y_axis + '.npy')) print('Adding Plot...') mpl.rcParams['agg.path.chunksize'] = 50000 ax = self.apply_new_subplot() ax.set_xlabel(x_axis_name) ax.set_ylabel(y_axis_name) ax.plot(x_axis_array, y_axis_array, 'k', linewidth=1.2, color=np.random.rand(3, ), label=self.file_name + ', ' + x_axis_name) ax.legend() self.plot_list.append('{}, {}'.format(x_axis_name, y_axis_name)) self.data_changed = True print('Finished adding plot!') def apply_new_subplot(self): plt = self.figure if (self.plots_num == 1): return plt.add_subplot(1, 1, 1) elif (self.plots_num == 2): plot_location = int('12' + str(len(self.plot_list) + 1)) return plt.add_subplot(plot_location) elif (self.plots_num == 3): plot_location = int('13' + str(len(self.plot_list) + 1)) return plt.add_subplot(plot_location) elif (self.plots_num == 4): plot_location = int('22' + str(len(self.plot_list) + 1)) return plt.add_subplot(plot_location) elif (self.plots_num == 6): plot_location = int('23' + str(len(self.plot_list) + 1)) return plt.add_subplot(plot_location) elif (self.plots_num == 9): plot_location = int('33' + str(len(self.plot_list) + 1)) return plt.add_subplot(plot_location) def _add_creep_plot_fired(self): if self.filtered_and_creep_npy_files_exist( os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_max.npy')) == False: return if self.max_plots_number_is_reached() == True: return disp_max = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_max.npy')) disp_min = self.x_axis_multiplier * \ np.load(os.path.join(self.npy_folder_path, self.file_name + '_' + self.x_axis + '_min.npy')) complete_cycles_number = disp_max.size print('Adding creep-fatigue plot...') mpl.rcParams['agg.path.chunksize'] = 50000 ax = self.apply_new_subplot() ax.set_xlabel('Cycles number') ax.set_ylabel(self.x_axis) if self.plot_every_nth_point > 1: disp_max = disp_max[0::self.plot_every_nth_point] disp_min = disp_min[0::self.plot_every_nth_point] if self.smooth: # Keeping the first item of the array and filtering the rest disp_max = np.concatenate( (np.array([disp_max[0]]), savgol_filter(disp_max[1:], window_length=self.window_length, polyorder=self.polynomial_order))) disp_min = np.concatenate( (np.array([disp_min[0]]), savgol_filter(disp_min[1:], window_length=self.window_length, polyorder=self.polynomial_order))) if self.normalize_cycles: ax.plot(np.linspace(0, 1., disp_max.size), disp_max, 'k', linewidth=1.2, color='red', label='Max' + ', ' + self.file_name + ', ' + self.x_axis) ax.plot(np.linspace(0, 1., disp_max.size), disp_min, 'k', linewidth=1.2, color='green', label='Min' + ', ' + self.file_name + ', ' + self.x_axis) else: ax.plot(np.linspace(0, complete_cycles_number, disp_max.size), disp_max, 'k', linewidth=1.2, color='red', label='Max' + ', ' + self.file_name + ', ' + self.x_axis) ax.plot(np.linspace(0, complete_cycles_number, disp_max.size), disp_min, 'k', linewidth=1.2, color='green', label='Min' + ', ' + self.file_name + ', ' + self.x_axis) ax.legend() self.plot_list.append('Creep-fatigue: {}, {}'.format( self.x_axis, self.y_axis)) self.data_changed = True print('Finished adding creep-fatigue plot!') def reset(self): self.delimiter = ';' self.skip_first_rows = 3 self.columns_headers_list = [] self.npy_folder_path = '' self.file_name = '' self.apply_filters = False self.force_name = 'Kraft' self.plot_list = [] self.columns_to_be_averaged = [] #========================================================================= # Configuration of the view #========================================================================= traits_view = ui.View(ui.HSplit( ui.VSplit( ui.HGroup(ui.UItem('open_file_csv'), ui.UItem('file_csv', style='readonly', width=0.1), label='Input data'), ui.Item('add_columns_average', show_label=False), ui.VGroup( ui.VGroup(ui.Item( 'records_per_second', enabled_when='take_time_from_first_column == False'), ui.Item('take_time_from_first_column'), label='Time calculation', show_border=True), ui.VGroup(ui.Item('skip_first_rows'), ui.Item('decimal'), ui.Item('delimiter'), ui.Item('parse_csv_to_npy', show_label=False), label='Processing csv file', show_border=True), ui.VGroup(ui.HGroup(ui.Item('plots_num'), ui.Item('clear_plot')), ui.HGroup(ui.Item('x_axis'), ui.Item('x_axis_multiplier')), ui.HGroup(ui.Item('y_axis'), ui.Item('y_axis_multiplier')), ui.VGroup(ui.HGroup( ui.Item('add_plot', show_label=False), ui.Item('apply_filters')), show_border=True, label='Plotting X axis with Y axis'), ui.VGroup(ui.HGroup( ui.Item('add_creep_plot', show_label=False), ui.VGroup(ui.Item('normalize_cycles'), ui.Item('smooth'), ui.Item('plot_every_nth_point'))), show_border=True, label='Plotting Creep-fatigue of x-axis'), ui.Item('plot_list'), show_border=True, label='Plotting'))), ui.VGroup( ui.Item('force_name'), ui.VGroup(ui.VGroup( ui.Item('window_length'), ui.Item('polynomial_order'), enabled_when='activate == True or smooth == True'), show_border=True, label='Smoothing parameters (Savitzky-Golay filter):'), ui.VGroup(ui.VGroup( ui.Item('activate'), ui.Item('peak_force_before_cycles', enabled_when='activate == True')), show_border=True, label='Smooth ascending branch for all displacements:'), ui.VGroup( ui.Item('cutting_method'), ui.VGroup(ui.Item('force_max'), ui.Item('force_min'), label='Max, Min:', show_border=True, enabled_when='cutting_method == "Define Max, Min"'), ui.VGroup( ui.Item('min_cycle_force_range'), label='Min cycle force range:', show_border=True, enabled_when= 'cutting_method == "Define min cycle range(force difference)"' ), show_border=True, label='Cut fake cycles for creep:'), ui.Item('generate_filtered_and_creep_npy', show_label=False), show_border=True, label='Filters'), ui.UItem('figure', editor=MPLFigureEditor(), resizable=True, springy=True, width=0.8, label='2d plots')), title='HCFF Filter', resizable=True, width=0.85, height=0.7)
class Sike(HasTraits): """ Tie several profile-related widgets together. Sike is like Gotcha, only less mature. """ # The main pstats.Stats() object providing the data. stats = Any() # The main results and the subcalls. main_results = Instance(ProfileResults, args=()) caller_results = Instance(ProfileResults, args=()) callee_results = Instance(ProfileResults, args=()) # The records have list of callers. Invert this to give a map from function # to callee. callee_map = Dict() # Map from the (file, lineno, name) tuple to the record. record_map = Dict() #### GUI traits ############################################################ basenames = Bool(True) percentages = Bool(True) filename = Str() line = Int(1) code = Str() traits_view = tui.View( tui.VGroup( tui.HGroup( tui.Item('basenames'), tui.Item('percentages'), ), tui.HGroup( tui.UItem('main_results'), tui.VGroup( tui.Label('Callees'), tui.UItem('callee_results'), tui.Label('Callers'), tui.UItem('caller_results'), tui.UItem( 'filename', style='readonly'), tui.UItem( 'code', editor=tui.CodeEditor(line='line')), ), style='custom', ), ), width=1024, height=768, resizable=True, title='Profiling results', ) @classmethod def fromstats(cls, stats, **traits): """ Instantiate an Sike from a Stats object, Stats.stats dictionary, or Profile object, or a filename of the saved Stats data. """ stats = SillyStatsWrapper.getstats(stats) self = cls(stats=stats, **traits) self._refresh_stats() return self def add_stats(self, stats): """ Add new statistics. """ stats = SillyStatsWrapper.getstats(stats) self.stats.add(stats) self._refresh_stats() def records_from_stats(self, stats): """ Create a list of records from a stats dictionary. """ records = [] for file_line_name, (ncalls, nonrec_calls, inline_time, cum_time, calls) in list(stats.items()): newcalls = [] for sub_file_line_name, sub_call in list(calls.items()): newcalls.append(Subrecord((sub_file_line_name, ) + sub_call)) records.append( Record((file_line_name, ncalls, nonrec_calls, inline_time, cum_time, newcalls))) return records def get_callee_map(self, records): """ Create a callee map. """ callees = defaultdict(list) for record in records: for caller in record.callers: callees[caller.file_line_name].append( Subrecord((record.file_line_name, ) + caller[1:])) return callees @on_trait_change('percentages,basenames') def _adapter_traits_changed(self, object, name, old, new): for obj in [ self.main_results, self.callee_results, self.caller_results ]: setattr(obj, name, new) @on_trait_change('main_results:selected_record') def update_sub_results(self, new): if new is None: return self.caller_results.total_time = new.cum_time self.caller_results.records = new.callers self.callee_results._resort() self.caller_results.selected_record = self.caller_results.activated_record = None self.callee_results.total_time = new.cum_time self.callee_results.records = self.callee_map.get(new.file_line_name, []) self.callee_results._resort() self.callee_results.selected_record = self.callee_results.activated_record = None filename, line, name = new.file_line_name if os.path.exists(filename): with open(filename, 'ru') as f: code = f.read() self.code = code self.filename = filename self.line = line else: self.trait_set( code='', filename='', line=1, ) @on_trait_change('caller_results:dclicked,' 'callee_results:dclicked') def goto_record(self, new): if new is None: return if new.item.file_line_name in self.record_map: record = self.record_map[new.item.file_line_name] self.main_results.selected_record = record @on_trait_change('stats') def _refresh_stats(self): """ Refresh the records from the stored Stats object. """ self.main_results.records = self.main_results.sort_records( self.records_from_stats(self.stats.stats)) self.callee_map = self.get_callee_map(self.main_results.records) self.record_map = {} total_time = 0.0 for record in self.main_results.records: self.record_map[record.file_line_name] = record total_time += record.inline_time self.main_results.total_time = total_time
class HCFF(tr.HasStrictTraits): '''High-Cycle Fatigue Filter ''' something = tr.Instance(Something) decimal = tr.Enum(',', '.') delimiter = tr.Str(';') path_hdf5 = tr.Str('') def _something_default(self): return Something() #========================================================================= # File management #========================================================================= file_csv = tr.File open_file_csv = tr.Button('Input file') def _open_file_csv_fired(self): """ Handles the user clicking the 'Open...' button. """ extns = [ '*.csv', ] # seems to handle only one extension... wildcard = '|'.join(extns) dialog = FileDialog(title='Select text file', action='open', wildcard=wildcard, default_path=self.file_csv) dialog.open() self.file_csv = dialog.path """ Filling x_axis and y_axis with values """ headers_array = np.array( pd.read_csv(self.file_csv, delimiter=self.delimiter, decimal=self.decimal, nrows=1, header=None))[0] for i in range(len(headers_array)): headers_array[i] = self.get_valid_file_name(headers_array[i]) self.columns_headers_list = list(headers_array) #========================================================================= # Parameters of the filter algorithm #========================================================================= chunk_size = tr.Int(10000, auto_set=False, enter_set=True) skip_rows = tr.Int(4, auto_set=False, enter_set=True) # 1) use the decorator @tr.on_trait_change('chunk_size, skip_rows') def whatever_name_size_changed(self): print('chunk-size changed') # 2) use the _changed or _fired extension def _chunk_size_changed(self): print('chunk_size changed - calling the named function') data = tr.Array(dtype=np.float_) read_loadtxt_button = tr.Button() def _read_loadtxt_button_fired(self): self.data = np.loadtxt(self.file_csv, skiprows=self.skip_rows, delimiter=self.delimiter) print(self.data.shape) read_csv_button = tr.Button read_hdf5_button = tr.Button def _read_csv_button_fired(self): self.read_csv() def _read_hdf5_button_fired(self): self.read_hdf5_no_filter() def read_csv(self): '''Read the csv file and transform it to the hdf5 format. The output file has the same name as the input csv file with an extension hdf5 ''' path_csv = self.file_csv # Following splitext splits the path into a pair (root, extension) self.path_hdf5 = os.path.splitext(path_csv)[0] + '.hdf5' for i, chunk in enumerate( pd.read_csv(path_csv, delimiter=self.delimiter, decimal=self.decimal, skiprows=self.skip_rows, chunksize=self.chunk_size)): chunk_array = np.array(chunk) chunk_data_frame = pd.DataFrame( chunk_array, columns=['a', 'b', 'c', 'd', 'e', 'f']) if i == 0: chunk_data_frame.to_hdf(self.path_hdf5, 'all_data', mode='w', format='table') else: chunk_data_frame.to_hdf(self.path_hdf5, 'all_data', append=True) def read_hdf5_no_filter(self): # reading hdf files is really memory-expensive! force = np.array(pd.read_hdf(self.path_hdf5, columns=['b'])) weg = np.array(pd.read_hdf(self.path_hdf5, columns=['c'])) disp1 = np.array(pd.read_hdf(self.path_hdf5, columns=['d'])) disp2 = np.array(pd.read_hdf(self.path_hdf5, columns=['e'])) disp3 = np.array(pd.read_hdf(self.path_hdf5, columns=['f'])) force = np.concatenate((np.zeros((1, 1)), force)) weg = np.concatenate((np.zeros((1, 1)), weg)) disp1 = np.concatenate((np.zeros((1, 1)), disp1)) disp2 = np.concatenate((np.zeros((1, 1)), disp2)) disp3 = np.concatenate((np.zeros((1, 1)), disp3)) dir_path = os.path.dirname(self.file_csv) npy_folder_path = os.path.join(dir_path, 'NPY') if os.path.exists(npy_folder_path) == False: os.makedirs(npy_folder_path) file_name = os.path.splitext(os.path.basename(self.file_csv))[0] np.save( os.path.join(npy_folder_path, file_name + '_Force_nofilter.npy'), force) np.save( os.path.join(npy_folder_path, file_name + '_Displacement_machine_nofilter.npy'), weg) np.save( os.path.join(npy_folder_path, file_name + '_Displacement_sliding1_nofilter.npy'), disp1) np.save( os.path.join(npy_folder_path, file_name + '_Displacement_sliding2_nofilter.npy'), disp2) np.save( os.path.join(npy_folder_path, file_name + '_Displacement_crack1_nofilter.npy'), disp3) # Defining chunk size for matplotlib points visualization mpl.rcParams['agg.path.chunksize'] = 50000 plt.subplot(111) plt.xlabel('Displacement [mm]') plt.ylabel('kN') plt.title('original data', fontsize=20) plt.plot(disp2, force, 'k') plt.show() figure = tr.Instance(Figure) def _figure_default(self): figure = Figure(facecolor='white') figure.set_tight_layout(True) return figure columns_headers_list = tr.List([]) x_axis = tr.Enum(values='columns_headers_list') y_axis = tr.Enum(values='columns_headers_list') npy_folder_path = tr.Str file_name = tr.Str plot = tr.Button def _plot_fired(self): ax = self.figure.add_subplot(111) print('plotting figure') print(type(self.x_axis), type(self.y_axis)) print(self.data[:, 1]) print(self.data[:, self.x_axis]) print(self.data[:, self.y_axis]) ax.plot(self.data[:, self.x_axis], self.data[:, self.y_axis]) traits_view = ui.View(ui.HSplit( ui.VSplit( ui.HGroup(ui.UItem('open_file_csv'), ui.UItem('file_csv', style='readonly'), label='Input data'), ui.VGroup(ui.Item('chunk_size'), ui.Item('skip_rows'), ui.Item('decimal'), ui.Item('delimiter'), label='Filter parameters'), ui.VGroup( ui.HGroup(ui.Item('read_loadtxt_button', show_label=False), ui.Item('plot', show_label=False), show_border=True), ui.HGroup(ui.Item('read_csv_button', show_label=False), ui.Item('read_hdf5_button', show_label=False), show_border=True))), ui.UItem('figure', editor=MPLFigureEditor(), resizable=True, springy=True, label='2d plots'), ), resizable=True, width=0.8, height=0.6)