class SPIRRID(FunctionRandomization): '''Algorithmic class for multivariate random problem. ''' sampling_type = Trait('TGrid', {'TGrid' : TGrid, 'PGrid' : PGrid, 'MCS' : MonteCarlo, 'LHS': LatinHypercubeSampling }, sampling = True) sampling = Property(depends_on = 'sampling') @cached_property def _get_sampling(self): return self.sampling_type_(randomization = self) codegen_type = Trait('numpy', {'numpy' : CodeGenNumpyFactory(), 'weave' : CodeGenCFactory(), 'cython' : CodeGenCythonFactory()}, codegen = True) codegen = Property(depends_on = 'sampling, codegen') @cached_property def _get_codegen(self): return self.codegen_type_(spirrid = self) mu_q_arr = Property(depends_on = 'sampling, codegen') @cached_property def _get_mu_q_arr(self): '''getter for mean value array property . ''' e_orth = make_ogrid(self.evar_lst) mu_q_method = self.codegen.get_code() mu_q_arr, var_q_arr = mu_q_method(*e_orth) return mu_q_arr
class YMBAutoCorrel(HasTraits): data = Instance(IYMBData) var_enum = Trait('radius', var_dict) input_change = Event @on_trait_change('var_enum, data.input_change') def _set_input_change(self): print 'YMBAutoCorrel input change' self.input_change = True corr_arr = Property(Array, depends_on='var_enum, data.input_change') @cached_property def _get_corr_arr(self): corr_data = getattr(self.data, self.var_enum_) # @kelidas: return small differences between ma and numpy corrcoef # print MatSpearman( corr_data ) # return ma.corrcoef( corr_data, rowvar = False, allow_masked = True ) return MatSpearman(corr_data) fit_correl = Property() def _get_fit_correl(self): x_coor = self.data.x_coord var_data = self.corr_arr x = [] y = [] for i in range(0, var_data.shape[1]): x.append(x_coor[i:] - x_coor[i]) y.append(var_data[i, (i):]) x = hstack(x) y = hstack(y) p0 = [1., 1., 1., 1.] plsq = leastsq(self.residual_ls, p0, args=(y, x)) return plsq[0] def residual_ls(self, p, y, x): err = y - self.peval(x, p) return err def peval(self, x, p): return p[0] * x ** 3 + p[1] * x ** 2 + p[2] * x + p[3] traits_view = View(Item('var_enum', label='Variable'))
class CodeGenCompiled(CodeGen): ''' C-code is generated using the inline feature of scipy. ''' # =========================================================================== # Inspection of the randomization - needed by CodeGenCompiled # =========================================================================== evar_names = Property(depends_on='q, recalc') @cached_property def _get_evar_names(self): return self.spirrid.evar_names var_names = Property(depends_on='q, recalc') @cached_property def _get_var_names(self): return self.spirrid.tvar_names # count the random variables n_rand_vars = Property(depends_on='theta_vars, recalc') @cached_property def _get_n_rand_vars(self): return self.spirrid.n_rand_vars # get the indexes of the random variables within the parameter list rand_var_idx_list = Property(depends_on='theta_vars, recalc') @cached_property def _get_rand_var_idx_list(self): return self.spirrid.rand_var_idx_list # get the names of the random variables rand_var_names = Property(depends_on='theta_vars, recalc') @cached_property def _get_rand_var_names(self): return self.var_names[self.rand_var_idx_list] # get the randomization arrays theta_arrs = Property(List, depends_on='theta_vars, recalc') @cached_property def _get_theta_arrs(self): '''Get flattened list of theta arrays. ''' theta = self.spirrid.sampling.theta return _get_flat_arrays_from_list(self.rand_var_idx_list, theta) # get the randomization arrays dG_arrs = Property(List, depends_on='theta_vars, recalc') @cached_property def _get_dG_arrs(self): '''Get flattened list of weight factor arrays. ''' dG = self.spirrid.sampling.dG_ogrid return _get_flat_arrays_from_list(self.rand_var_idx_list, dG) arg_names = Property( depends_on='rf_change, rand_change, +codegen_option, recalc') @cached_property def _get_arg_names(self): arg_names = [] # create argument string for inline function if self.compiled_eps_loop: # @todo: e_arr must be evar_names arg_names += ['mu_q_arr', 'e_arr'] else: arg_names.append('e') arg_names += ['%s_flat' % name for name in self.rand_var_names] arg_names += self._get_arg_names_dG() return arg_names ld = Trait('weave', dict(weave=CodeGenLangDictC(), cython=CodeGenLangDictCython())) # =========================================================================== # Configuration of the code # =========================================================================== # # compiled_eps_loop: # If set True, the loop over the control variable epsilon is compiled # otherwise, python loop is used. compiled_eps_loop = Bool(True, codegen_option=True) # =========================================================================== # compiled_eps_loop - dependent code # =========================================================================== compiled_eps_loop_feature = Property( depends_on='compiled_eps_loop, recalc') @cached_property def _get_compiled_eps_loop_feature(self): if self.compiled_eps_loop == True: return self.ld_.LD_BEGIN_EPS_LOOP_ACTIVE, self.ld_.LD_END_EPS_LOOP_ACTIVE else: return self.ld_.LD_ASSIGN_EPS, '' LD_BEGIN_EPS_LOOP = Property def _get_LD_BEGIN_EPS_LOOP(self): return self.compiled_eps_loop_feature[0] LD_END_EPS_LOOP = Property def _get_LD_END_EPS_LOOP(self): return self.compiled_eps_loop_feature[1] # # cached_dG: # If set to True, the cross product between the pdf values of all random variables # will be precalculated and stored in an n-dimensional grid # otherwise the product is performed for every epsilon in the inner loop anew # cached_dG = Bool(False, codegen_option=True) # =========================================================================== # cached_dG - dependent code # =========================================================================== cached_dG_feature = Property(depends_on='cached_dG, recalc') @cached_property def _get_cached_dG_feature(self): if self.compiled_eps_loop: if self.cached_dG == True: return self.ld_.LD_ACCESS_EPS_IDX, self.ld_.LD_ACCESS_THETA_IDX, self.ld_.LD_ASSIGN_MU_Q_IDX else: return self.ld_.LD_ACCESS_EPS_PTR, self.ld_.LD_ACCESS_THETA_PTR, self.ld_.LD_ASSIGN_MU_Q_PTR else: if self.cached_dG == True: return self.ld_.LD_ACCESS_EPS_IDX, self.ld_.LD_ACCESS_THETA_IDX, self.ld_.LD_ASSIGN_MU_Q_IDX else: return self.ld_.LD_ACCESS_EPS_PTR, self.ld_.LD_ACCESS_THETA_PTR, self.ld_.LD_ASSIGN_MU_Q_PTR LD_ACCESS_EPS = Property def _get_LD_ACCESS_EPS(self): return self.cached_dG_feature[0] LD_ACCESS_THETA = Property def _get_LD_ACCESS_THETA(self): return '%s' + self.cached_dG_feature[1] LD_ASSIGN_MU_Q = Property def _get_LD_ASSIGN_MU_Q(self): return self.cached_dG_feature[2] LD_N_TAB = Property def _get_LD_N_TAB(self): if self.spirrid.sampling_type == 'LHS' or self.spirrid.sampling_type == 'MCS': if self.compiled_eps_loop: return 3 else: return 2 else: if self.compiled_eps_loop: return self.n_rand_vars + 2 else: return self.n_rand_vars + 1 # ------------------------------------------------------------------------------------ # Configurable generation of C-code for the mean curve evaluation # ------------------------------------------------------------------------------------ code = Property( depends_on='rf_change, rand_change, +codegen_option, eps_change, recalc' ) @cached_property def _get_code(self): code_str = '' if self.compiled_eps_loop: # create code string for inline function # n_eps = len(self.spirrid.evar_lst[0]) code_str += self.LD_BEGIN_EPS_LOOP % {'i': n_eps} code_str += self.LD_ACCESS_EPS else: # create code string for inline function # code_str += self.ld_.LD_ASSIGN_EPS code_str += self.ld_.LD_INIT_MU_Q if self.compiled_eps_loop: code_str += '\t' + self.ld_.LD_INIT_Q else: code_str += self.ld_.LD_INIT_Q code_str += self.ld_.LD_LINE_MACRO # create code for constant params for name, distr in zip(self.var_names, self.spirrid.tvar_lst): if type(distr) is float: code_str += self.ld_.LD_INIT_THETA % (name, distr) code_str += self._get_code_dG_declare() inner_code_str = '' lang = self.ld + '_code' q_code = getattr(self.spirrid.q, lang) import textwrap q_code = textwrap.dedent(q_code) q_code_split = q_code.split('\n') for i, s in enumerate(q_code_split): q_code_split[i] = self.LD_N_TAB * '\t' + s q_code = '\n'.join(q_code_split) if self.n_rand_vars > 0: inner_code_str += self._get_code_dG_access() inner_code_str += q_code + '\n' + \ (self.LD_N_TAB) * '\t' + self.ld_.LD_EVAL_MU_Q else: inner_code_str += q_code + \ self.ld_.LD_ADD_MU_Q code_str += self._get_code_inner_loops(inner_code_str) if self.compiled_eps_loop: if self.cached_dG: # blitz matrix code_str += self.ld_.LD_ASSIGN_MU_Q_IDX else: code_str += self.ld_.LD_ASSIGN_MU_Q_PTR code_str += self.LD_END_EPS_LOOP else: code_str += self.ld_.LD_RETURN_MU_Q return code_str compiler_verbose = Int(1) compiler = Property(Str) def _get_compiler(self): if platform.system() == 'Linux': return 'gcc' elif platform.system() == 'Windows': return 'mingw32' def get_code(self): if self.ld == 'weave': return self.get_c_code() elif self.ld == 'cython': return self.get_cython_code() def get_cython_code(self): cython_header = 'print "## spirrid_cython library reloaded!"\nimport numpy as np\ncimport numpy as np\nctypedef np.double_t DTYPE_t\ncimport cython\n\[email protected](False)\[email protected](False)\[email protected](True)\ndef mu_q(%s):\n\tcdef double mu_q\n' # @todo - for Cython cdef variables and generalize function def() arg_values = {} for name, theta_arr in zip(self.rand_var_names, self.theta_arrs): arg_values['%s_flat' % name] = theta_arr arg_values.update(self._get_arg_values_dG()) DECLARE_ARRAY = 'np.ndarray[DTYPE_t, ndim=1] ' def_dec = DECLARE_ARRAY + 'e_arr' def_dec += ',' + DECLARE_ARRAY def_dec += (',' + DECLARE_ARRAY).join(arg_values) cython_header = cython_header % def_dec cython_header += ' cdef double ' cython_header += ', '.join(self.var_names) + ', eps, dG, q\n' cython_header += ' cdef int i_' cython_header += ', i_'.join(self.var_names) + '\n' if self.cached_dG: cython_header = cython_header.replace( r'1] dG_grid', r'%i] dG_grid' % self.n_rand_vars) if self.compiled_eps_loop == False: cython_header = cython_header.replace( r'np.ndarray[DTYPE_t, ndim=1] e_arr', r'double e_arr') cython_header = cython_header.replace(r'eps,', r'eps = e_arr,') cython_code = (cython_header + self.code).replace('\t', ' ') cython_file_name = 'spirrid_cython.pyx' print 'checking for previous cython code' regenerate_code = True if os.path.exists(cython_file_name): f_in = open(cython_file_name, 'r').read() if f_in == cython_code: regenerate_code = False if regenerate_code: infile = open(cython_file_name, 'w') infile.write(cython_code) infile.close() print 'pyx file updated' t = sysclock() import pyximport pyximport.install(reload_support=True, setup_args={"script_args": ["--force"]}) import spirrid_cython if regenerate_code: reload(spirrid_cython) print '>>> pyximport', sysclock() - t mu_q = spirrid_cython.mu_q def mu_q_method(eps): if self.compiled_eps_loop: args = {'e_arr': eps} args.update(arg_values) mu_q_arr = mu_q(**args) else: # Python loop over eps # mu_q_arr = np.zeros_like(eps, dtype=np.float64) for idx, e in enumerate(eps): # C loop over random dimensions # arg_values['e_arr'] = e # prepare the parameter mu_q_val = mu_q(**arg_values) # add the value to the return array mu_q_arr[idx] = mu_q_val return mu_q_arr, None return mu_q_method def get_c_code(self): ''' Return the code for the given sampling of the rand domain. ''' def mu_q_method(e): '''Template for the evaluation of the mean response. ''' self._set_compiler() compiler_args, linker_args = self.extra_args print 'compiler arguments' print compiler_args # prepare the array of the control variable discretization # eps_arr = e mu_q_arr = np.zeros_like(eps_arr) # prepare the parameters for the compiled function in # a separate dictionary arg_values = {} if self.compiled_eps_loop: # for compiled eps_loop the whole input and output array must be passed to c # arg_values['e_arr'] = eps_arr arg_values['mu_q_arr'] = mu_q_arr # prepare the lengths of the arrays to set the iteration bounds # for name, theta_arr in zip(self.rand_var_names, self.theta_arrs): arg_values['%s_flat' % name] = theta_arr arg_values.update(self._get_arg_values_dG()) if self.cached_dG: conv = weave.converters.blitz else: conv = weave.converters.default if self.compiled_eps_loop: # C loop over eps, all inner loops must be compiled as well # weave.inline(self.code, self.arg_names, local_dict=arg_values, extra_compile_args=compiler_args, extra_link_args=linker_args, type_converters=conv, compiler=self.compiler, verbose=self.compiler_verbose) else: # Python loop over eps # for idx, e in enumerate(eps_arr): # C loop over random dimensions # arg_values['e'] = e # prepare the parameter mu_q = weave.inline(self.code, self.arg_names, local_dict=arg_values, extra_compile_args=compiler_args, extra_link_args=linker_args, type_converters=conv, compiler=self.compiler, verbose=self.compiler_verbose) # add the value to the return array mu_q_arr[idx] = mu_q var_q_arr = np.zeros_like(mu_q_arr) return mu_q_arr, var_q_arr return mu_q_method # =========================================================================== # Extra compiler arguments # =========================================================================== use_extra = Bool(False, codegen_option=True) extra_args = Property(depends_on='use_extra, +codegen_option, recalc') @cached_property def _get_extra_args(self): if self.use_extra == True: compiler_args = [ "-DNDEBUG -g -fwrapv -O3 -march=native", "-ffast-math" ] # , "-fno-openmp", "-ftree-vectorizer-verbose=3"] linker_args = [] # ["-fno-openmp"] return compiler_args, linker_args elif self.use_extra == False: return [], [] # =========================================================================== # Auxiliary methods # =========================================================================== def _set_compiler(self): '''Catch eventual mismatch between scipy.weave and compiler ''' if platform.system() == 'Linux': # os.environ['CC'] = 'gcc-4.1' # os.environ['CXX'] = 'g++-4.1' os.environ['OPT'] = '-DNDEBUG -g -fwrapv -O3' elif platform.system() == 'Windows': # not implemented pass def _get_code_dG_declare(self): '''Constant dG value - for PGrid, MCS, LHS ''' return '' def _get_code_dG_access(self): '''Default access to dG array - only needed by TGrid''' return '' def _get_arg_names_dG(self): return [] def _get_arg_values_dG(self): return {} def __str__(self): s = 'C( ' s += 'var_eval = %s, ' % ` self.implicit_var_eval ` s += 'compiled_eps_loop = %s, ' % ` self.compiled_eps_loop ` s += 'cached_dG = %s)' % ` self.cached_dG ` return s
class ECBReinfTexUniform(ECBReinfComponent): '''Cross section characteristics needed for tensile specimens ''' height = DelegatesTo('matrix_cs') '''height of reinforced cross section ''' n_layers = Int(12, auto_set=False, enter_set=True, geo_input=True) '''total number of reinforcement layers [-] ''' n_rovings = Int(23, auto_set=False, enter_set=True, geo_input=True) '''number of rovings in 0-direction of one composite layer of the bending test [-]: ''' A_roving = Float(0.461, auto_set=False, enter_set=True, geo_input=True) '''cross section of one roving [mm**2]''' def convert_eps_tex_u_2_lo(self, eps_tex_u): '''Convert the strain in the lowest reinforcement layer at failure to the strain at the bottom of the cross section''' eps_up = self.state.eps_up return eps_up + (eps_tex_u - eps_up) / self.z_ti_arr[0] * self.height def convert_eps_lo_2_tex_u(self, eps_lo): '''Convert the strain at the bottom of the cross section to the strain in the lowest reinforcement layer at failure''' eps_up = self.state.eps_up return (eps_up + (eps_lo - eps_up) / self.height * self.z_ti_arr[0]) '''Convert the MN to kN ''' #=========================================================================== # material properties #=========================================================================== sig_tex_u = Float(1216., auto_set=False, enter_set=True, tt_input=True) '''Ultimate textile stress measured in the tensile test [MPa] ''' #=========================================================================== # Distribution of reinforcement #=========================================================================== s_tex_z = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''spacing between the layers [m]''' @cached_property def _get_s_tex_z(self): return self.height / (self.n_layers + 1) z_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''property: distance of each reinforcement layer from the top [m]: ''' @cached_property def _get_z_ti_arr(self): return np.array([ self.height - (i + 1) * self.s_tex_z for i in range(self.n_layers) ], dtype=float) zz_ti_arr = Property '''property: distance of reinforcement layers from the bottom ''' def _get_zz_ti_arr(self): return self.height - self.z_ti_arr #=========================================================================== # Discretization conform to the tex layers #=========================================================================== eps_i_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Strain at the level of the i-th reinforcement layer ''' @cached_property def _get_eps_i_arr(self): # ------------------------------------------------------------------------ # geometric params independent from the value for 'eps_t' # ------------------------------------------------------------------------ height = self.height eps_lo = self.state.eps_lo eps_up = self.state.eps_up # strain at the height of each reinforcement layer [-]: # return eps_up + (eps_lo - eps_up) * self.z_ti_arr / height eps_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Tension strain at the level of the i-th layer of the fabrics ''' @cached_property def _get_eps_ti_arr(self): return (np.fabs(self.eps_i_arr) + self.eps_i_arr) / 2.0 eps_ci_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Compression strain at the level of the i-th layer. ''' @cached_property def _get_eps_ci_arr(self): return (-np.fabs(self.eps_i_arr) + self.eps_i_arr) / 2.0 #=========================================================================== # Effective crack bridge law #=========================================================================== ecb_law_type = Trait('fbm', dict(fbm=ECBLFBM, cubic=ECBLCubic, linear=ECBLLinear, bilinear=ECBLBilinear), tt_input=True) '''Selector of the effective crack bridge law type ['fbm', 'cubic', 'linear', 'bilinear']''' ecb_law = Property(Instance(ECBLBase), depends_on='+tt_input') '''Effective crack bridge law corresponding to ecb_law_type''' @cached_property def _get_ecb_law(self): return self.ecb_law_type_(sig_tex_u=self.sig_tex_u, cs=self) show_ecb_law = Button '''Button launching a separate view of the effective crack bridge law. ''' def _show_ecb_law_fired(self): ecb_law_mw = ConstitutiveLawModelView(model=self.ecb_law) ecb_law_mw.edit_traits(kind='live') return tt_modified = Event sig_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Stresses at the i-th fabric layer. ''' @cached_property def _get_sig_ti_arr(self): return self.ecb_law.mfn_vct(self.eps_ti_arr) f_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''force at the height of each reinforcement layer [kN]: ''' @cached_property def _get_f_ti_arr(self): sig_ti_arr = self.sig_ti_arr n_rovings = self.n_rovings A_roving = self.A_roving return sig_ti_arr * n_rovings * A_roving / self.unit_conversion_factor figure = Instance(Figure) def _figure_default(self): figure = Figure(facecolor='white') figure.add_axes([0.08, 0.13, 0.85, 0.74]) return figure data_changed = Event replot = Button def _replot_fired(self): self.figure.clear() ax = self.figure.add_subplot(2, 2, 1) self.plot_eps(ax) ax = self.figure.add_subplot(2, 2, 2) self.plot_sig(ax) ax = self.figure.add_subplot(2, 2, 3) self.cc_law.plot(ax) ax = self.figure.add_subplot(2, 2, 4) self.ecb_law.plot(ax) self.data_changed = True def plot_eps(self, ax): #ax = self.figure.gca() d = self.height # eps ti ax.plot([-self.eps_lo, -self.eps_up], [0, self.height], color='black') ax.hlines(self.zz_ti_arr, [0], -self.eps_ti_arr, lw=4, color='red') # eps cj ec = np.hstack([self.eps_cj_arr] + [0, 0]) zz = np.hstack([self.zz_cj_arr] + [0, self.height]) ax.fill(-ec, zz, color='blue') # reinforcement layers eps_range = np.array([max(0.0, self.eps_lo), min(0.0, self.eps_up)], dtype='float') z_ti_arr = np.ones_like(eps_range)[:, None] * self.z_ti_arr[None, :] ax.plot(-eps_range, z_ti_arr, 'k--', color='black') # neutral axis ax.plot(-eps_range, [d, d], 'k--', color='green', lw=2) ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') def plot_sig(self, ax): d = self.height # f ti ax.hlines(self.zz_ti_arr, [0], -self.f_ti_arr, lw=4, color='red') # f cj f_c = np.hstack([self.f_cj_arr] + [0, 0]) zz = np.hstack([self.zz_cj_arr] + [0, self.height]) ax.fill(-f_c, zz, color='blue') f_range = np.array( [np.max(self.f_ti_arr), np.min(f_c)], dtype='float_') # neutral axis ax.plot(-f_range, [d, d], 'k--', color='green', lw=2) ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') view = View(HSplit( Group( HGroup( Group(Item('height', springy=True), Item('width'), Item('n_layers'), Item('n_rovings'), Item('A_roving'), label='Geometry', springy=True), Group(Item('eps_up', label='Upper strain', springy=True), Item('eps_lo', label='Lower strain'), label='Strain', springy=True), springy=True, ), HGroup( Group(VGroup(Item('cc_law_type', show_label=False, springy=True), Item('cc_law', label='Edit', show_label=False, springy=True), Item('show_cc_law', label='Show', show_label=False, springy=True), springy=True), Item('f_ck', label='Compressive strength'), Item('n_cj', label='Discretization'), label='Concrete', springy=True), Group(VGroup( Item('ecb_law_type', show_label=False, springy=True), Item('ecb_law', label='Edit', show_label=False, springy=True), Item('show_ecb_law', label='Show', show_label=False, springy=True), springy=True, ), label='Reinforcement', springy=True), springy=True, ), Group( Item('s_tex_z', label='vertical spacing', style='readonly'), label='Layout', ), Group(HGroup( Item('M', springy=True, style='readonly'), Item('N', springy=True, style='readonly'), ), label='Stress resultants'), scrollable=True, ), Group( Item('replot', show_label=False), Item('figure', editor=MPLFigureEditor(), resizable=True, show_label=False), id='simexdb.plot_sheet', label='plot sheet', dock='tab', ), ), width=0.8, height=0.7, resizable=True, buttons=['OK', 'Cancel'])
class ECBMatrixCrossSection(ECBCrossSectionComponent): '''Cross section characteristics needed for tensile specimens. ''' n_cj = Float(30, auto_set=False, enter_set=True, geo_input=True) '''Number of integration points. ''' f_ck = Float(55.7, auto_set=False, enter_set=True, cc_input=True) '''Ultimate compression stress [MPa] ''' eps_c_u = Float(0.0033, auto_set=False, enter_set=True, cc_input=True) '''Strain at failure of the matrix in compression [-] ''' height = Float(0.4, auto_set=False, enter_set=True, geo_input=True) '''height of the cross section [m] ''' width = Float(0.20, auto_set=False, enter_set=True, geo_input=True) '''width of the cross section [m] ''' x = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Height of the compressive zone ''' @cached_property def _get_x(self): eps_lo = self.state.eps_lo eps_up = self.state.eps_up if eps_up == eps_lo: # @todo: explain return (abs(eps_up) / (abs(eps_up - eps_lo * 1e-9)) * self.height) else: return (abs(eps_up) / (abs(eps_up - eps_lo)) * self.height) z_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Discretizaton of the compressive zone ''' @cached_property def _get_z_ti_arr(self): if self.state.eps_up <= 0: # bending zx = min(self.height, self.x) return np.linspace(0, zx, self.n_cj) elif self.state.eps_lo <= 0: # bending return np.linspace(self.x, self.height, self.n_cj) else: # no compression return np.array([0], dtype='f') eps_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Compressive strain at each integration layer of the compressive zone [-]: ''' @cached_property def _get_eps_ti_arr(self): # for calibration us measured compressive strain # @todo: use mapped traits instead # height = self.height eps_up = self.state.eps_up eps_lo = self.state.eps_lo eps_j_arr = (eps_up + (eps_lo - eps_up) * self.z_ti_arr / height) return (-np.fabs(eps_j_arr) + eps_j_arr) / 2.0 zz_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Distance of reinforcement layers from the bottom ''' @cached_property def _get_zz_ti_arr(self): return self.height - self.z_ti_arr #=========================================================================== # Compressive concrete constitutive law #=========================================================================== cc_law_type = Trait('constant', dict(constant=CCLawBlock, linear=CCLawLinear, quadratic=CCLawQuadratic, quad=CCLawQuad), cc_input=True) '''Selector of the concrete compression law type ['constant', 'linear', 'quadratic', 'quad']''' cc_law = Property(Instance(CCLawBase), depends_on='+cc_input') '''Compressive concrete law corresponding to cc_law_type''' @cached_property def _get_cc_law(self): return self.cc_law_type_(f_ck=self.f_ck, eps_c_u=self.eps_c_u, cs=self) show_cc_law = Button '''Button launching a separate view of the compression law. ''' def _show_cc_law_fired(self): cc_law_mw = ConstitutiveLawModelView(model=self.cc_law) cc_law_mw.edit_traits(kind='live') return cc_modified = Event #=========================================================================== # Calculation of compressive stresses and forces #=========================================================================== sig_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Stresses at the j-th integration point. ''' @cached_property def _get_sig_ti_arr(self): return -self.cc_law.mfn_vct(-self.eps_ti_arr) f_ti_arr = Property(depends_on=ECB_COMPONENT_AND_EPS_CHANGE) '''Layer force corresponding to the j-th integration point. ''' @cached_property def _get_f_ti_arr(self): return self.width * self.sig_ti_arr * self.unit_conversion_factor def _get_N(self): return np.trapz(self.f_ti_arr, self.z_ti_arr) def _get_M(self): return np.trapz(self.f_ti_arr * self.z_ti_arr, self.z_ti_arr) modified = Event @on_trait_change('+geo_input') def set_modified(self): self.modified = True view = View(HGroup( Group(Item('height', springy=True), Item('width'), Item('n_layers'), Item('n_rovings'), Item('A_roving'), label='Geometry', springy=True), springy=True, ), resizable=True, buttons=['OK', 'Cancel'])
class SPIRRID(FunctionRandomization): '''Set of parallel independent responses with random identical distributions. ''' #=========================================================================== # type of the sampling of the random domain #=========================================================================== sampling_type = Trait('TGrid', { 'TGrid': TGrid, 'PGrid': PGrid, 'MCS': MonteCarlo, 'LHS': LatinHypercubeSampling }, input_change=True) sampling = Property(depends_on='input_change') @cached_property def _get_sampling(self): return self.sampling_type_(randomization=self) #=========================================================================== # Code generator #=========================================================================== codegen_type = Trait('numpy', { 'numpy': CodeGenNumpyFactory(), 'c': CodeGenCFactory(), 'cython': CodeGenCythonFactory() }, input_change=True) # object representing the code generator codegen = Property(depends_on='codegen_type,sampling_type') @cached_property def _get_codegen(self): return self.codegen_type_(spirrid=self) #=========================================================================== # Inspection methods #=========================================================================== def get_samples(self, n=20): '''Return the first n randomly selected samples. ''' return self.sampling.get_samples(n) #=========================================================================== # Template for the integration of a response function in the time loop #=========================================================================== mu_q_method = Property(Callable, depends_on='input_change,alg_option,recalc') @cached_property def _get_mu_q_method(self): '''Generate an integrator method for the particular data type of dG and variables. ''' return self.codegen.get_code() #=========================================================================== # Run the estimation of the mean response #=========================================================================== results = Property(depends_on='input_change,codegen_option, recalc') @cached_property def _get_results(self): '''Estimate the mean value function given the randomization pattern. ''' self.tvar_names self.evar_names time__start = sysclock() e_orth = make_ogrid(self.evar_lst) self.sampling.theta self.sampling.dG time__data_setup = sysclock() self.mu_q_method time__method_setup = sysclock() mu_q_arr, var_q_arr = self.mu_q_method(*e_orth) time__exec = sysclock() return mu_q_arr, var_q_arr, [ time__data_setup - time__start, time__method_setup - time__data_setup, time__exec - time__method_setup ] #=========================================================================== # Access results #=========================================================================== mu_q_arr = Property() def _get_mu_q_arr(self): '''Mean value of q''' return self.results[0] var_q_arr = Property() def _get_var_q_arr(self): '''Variance of q''' # switch on the implicit evaluation of variance # if it has not been the case so far if not self.codegen.implicit_var_eval: self.codegen.implicit_var_eval = True return self.results[1] exec_time = Property() def _get_exec_time(self): return self.results[2] #=========================================================================== # state monitors #=========================================================================== # enable recalculation of results property # (for time efficiency analysis) recalc = Event @on_trait_change('recalc') def set_recalc(self): self.sampling.recalc = True self.codegen.recalc = True # Change propagation (traits having input_change metadata) # are monitored using the input_change event input_change = Event @on_trait_change('+input_change') def set_input_change(self): self.input_change = True # The subcomponents codegen and sampling can use # this method to trigger a change inducing # a recalculation codegen_option = Event # Change in the sampling configuration # (this might be thresholds for covering # the random domain). sampling_option = Event #=========================================================================== # Introspection #=========================================================================== # report the current configuration of the integrator def __str__(self): # get the name either of the method or of the class try: qname = self.q.__name__ except AttributeError: qname = self.q.__class__.__name__ s = '# function:\n' s += 'q = %s(%s)\n' % (qname, string.join(self.var_names, ',')) s += '# evars:\n' s += self.evar_str s += '\n' s += '# tvars[n_int = %d]: \n' % self.n_int s += self.tvar_str s += '\n' s += '# sampling: %s\n' % self.sampling_type s += '# codegen: %s\n' % self.codegen_type s += str(self.codegen) return s
class YMBFieldVar(HasTraits): data = Instance(IYMBData) n_cols = Property() def _get_n_cols(self): return self.data.n_cuts var_enum = Trait('radius', var_dict, modified=True) scalar_arr = Property(depends_on='var_enum') def _get_scalar_arr(self): return getattr(self.data, self.var_enum_) sorted_on = Bool(False, modified=True) scalar_arr_sorted = Property(depends_on='var_enum') def _get_scalar_arr_sorted(self): ''' Return scalar array sorted by the shortest distance from the edge ''' scalar_arr = zeros_like(getattr(self.data, self.var_enum_)) scalar_mask_arr = zeros_like(getattr(self.data, self.var_enum_)) distance_arr = self.data.edge_distance.filled() for i in range(0, self.n_cols): scalar_mask_arr[:, i] = zip( *sorted(zip(distance_arr[:, i], getattr(self.data, self.var_enum_).mask[:, i]), reverse=True))[1] scalar_arr[:, i] = zip( *sorted(zip(distance_arr[:, i], getattr(self.data, self.var_enum_).filled()[:, i]), reverse=True))[1] return ma.array(scalar_arr, mask=array(scalar_mask_arr, dtype=bool)) figure = Instance(Figure, ()) def _figure_default(self): figure = Figure() figure.add_axes([0.1, 0.1, 0.8, 0.8]) return figure data_changed = Event(True) @on_trait_change('+modified, data') def _redraw(self): self.figure.clear() self.figure.add_axes([0.1, 0.1, 0.8, 0.8]) figure = self.figure axes = figure.axes[0] axes.clear() if self.sorted_on == True: scalar_arr = self.scalar_arr_sorted else: scalar_arr = self.scalar_arr xi = linspace(min(self.data.cut_x), max(self.data.cut_x), 100) x = (ones_like(scalar_arr) * self.data.cut_x).flatten() ny_row = scalar_arr.shape[0] dy = max(diff(self.data.cut_x)) yi = linspace(0, ny_row * dy, ny_row) y = (ones_like(scalar_arr).T * linspace(0, ny_row * dy, ny_row)).T.flatten() z = scalar_arr.flatten() zi = griddata(x, y, z, xi, yi, interp='nn') # contour the gridded data, plotting dots at the nonuniform data points # axes.contour( xi, yi, zi, 20, linewidths = .5, colors = 'k' ) # plotting filled contour axes.contourf(xi, yi, zi, 200, cmap=my_cmap_lin) # my_cmap_lin scat = axes.scatter(x, y, marker='o', c=z, s=20, linewidths=0, cmap=my_cmap_lin) figure.colorbar(scat) self.data_changed = True view = View('var_enum', 'sorted_on', Item('figure', style='custom', editor=MPLFigureEditor(), show_label=False), id='yarn_structure_view', resizable=True, scrollable=True, dock='tab', width=0.8, height=0.4)
class LCCTable(HasTraits): '''Loading Case Manager. Generates and sorts the loading case combinations of all specified loading cases. ''' # define ls # ls = Trait('ULS', {'ULS': ULS, 'SLS': SLS}) # lcc-instance for the view # lcc = Instance(LCC) #------------------------------- # Define loading cases: #------------------------------- # path to the directory containing the state data files # data_dir = Directory # list of load cases # lc_list_ = List(Instance(LC)) lc_list = Property(List, depends_on='+filter') def _set_lc_list(self, value): self.lc_list_ = value def _get_lc_list(self): # for lc in self.lc_list_: # if lc.data_filter != self.data_filter: # lc.data_filter = self.data_filter return self.lc_list_ lcc_table_columns = Property(depends_on='lc_list_, +filter') def _get_lcc_table_columns(self): return [ ObjectColumn(label='Id', name='lcc_id') ] + \ [ ObjectColumn(label=lc.name, name=lc.name) for idx, lc in enumerate(self.lc_list) ] + \ [ ObjectColumn(label='assess_value', name='assess_value') ] geo_columns = Property(List(Str), depends_on='lc_list_, +filter') def _get_geo_columns(self): '''derive the order of the geo columns from the first element in 'lc_list'. The internal consistency is checked separately in the 'check_consistency' method. ''' return self.lc_list[0].geo_columns sr_columns = Property(List(Str), depends_on='lc_list_, +filter') def _get_sr_columns(self): '''derive the order of the stress resultants from the first element in 'lc_list'. The internal consistency is checked separately in the 'check_consistency' method. ''' return self.lc_list[0].sr_columns #------------------------------- # check consistency #------------------------------- def _check_for_consistency(self): ''' check input files for consitency: ''' return True #------------------------------- # lc_arr #------------------------------- lc_arr = Property(Array) def _get_lc_arr(self): '''stack stress resultants arrays of all loading cases together. This yields an array of shape ( n_lc, n_elems, n_sr ) ''' sr_arr_list = [lc.sr_arr for lc in self.lc_list] # for x in sr_arr_list: # print x.shape return array(sr_arr_list) #------------------------------- # Array dimensions: #------------------------------- n_sr = Property(Int) def _get_n_sr(self): return len(self.sr_columns) n_lc = Property(Int) def _get_n_lc(self): return len(self.lc_list) n_lcc = Property(Int) def _get_n_lcc(self): return self.combi_arr.shape[0] n_elems = Property(Int) def _get_n_elems(self): return self.lc_list[0].sr_arr.shape[0] #------------------------------- # auxilary method for get_combi_arr #------------------------------- def _product(self, args): """ Get all possible permutations of the security factors without changing the order of the loading cases. The method corresponds to the build-in function 'itertools.product'. Instead of returning a generator object a list of all possible permutations is returned. As argument a list of list needs to be defined. In the original version of 'itertools.product' the function takes a tuple as argument ("*args"). """ pools = map(tuple, args) # within original version args defined as *args result = [[]] for pool in pools: result = [x + [y] for x in result for y in pool] return result # ------------------------------------------------------------ # 'combi_arr' - array containing indices of all loading case combinations: # ------------------------------------------------------------ # list of indices of the position of the imposed loads in 'lc_list' # # imposed_idx_list = Property( List, depends_on = 'lc_list_, lc_list_.+input' ) imposed_idx_list = Property(List, depends_on='lc_list_') @cached_property def _get_imposed_idx_list(self): '''list of indices for the imposed loads ''' imposed_idx_list = [] for i_lc, lc in enumerate(self.lc_list): cat = lc.category if cat == 'imposed-load': imposed_idx_list.append(i_lc) return imposed_idx_list # array containing the psi with name 'psi_key' for the specified # loading cases defined in 'lc_list'. For dead-loads no value for # psi exists. In this case a value of 1.0 is defined. # This yields an array of shape ( n_lc, ) # def _get_psi_arr(self, psi_key): '''psi_key must be defined as: 'psi_0', 'psi_1', or 'psi_2' Returns an 1d-array of shape ( n_lc, ) ''' # get list of ones (used for dead-loads): # psi_list = [1] * len(self.lc_list) # overwrite ones with psi-values in case of imposed-loads: # for imposed_idx in self.imposed_idx_list: psi_value = getattr(self.lc_list[imposed_idx], psi_key) psi_list[imposed_idx] = psi_value return array(psi_list, dtype='float_') # list containing names of the loading cases # lc_name_list = Property(List, depends_on='lc_list_') @cached_property def _get_lc_name_list(self): '''list of names of all loading cases ''' return [lc.name for lc in self.lc_list] show_lc_characteristic = Bool(True) # combination array: # combi_arr = Property(Array, depends_on='lc_list_, combination_SLS') @cached_property def _get_combi_arr(self): '''array containing the security and combination factors corresponding to the specified loading cases. This yields an array of shape ( n_lcc, n_lc ) Properties defined in the subclasses 'LCCTableULS', 'LCCTableSLS': - 'gamma_list' = list of security factors (gamma) - 'psi_lead' = combination factors (psi) of the leading imposed load - 'psi_non_lead' = combination factors (psi) of the non-leading imposed loads ''' # printouts: # if self.ls == 'ULS': print '*** load case combinations for limit state ULS ***' else: print '*** load case combinations for limit state SLS ***' print '*** SLS combination used: % s ***' % (self.combination_SLS) #--------------------------------------------------------------- # get permutations of safety factors ('gamma') #--------------------------------------------------------------- # permutation_list = self._product(self.gamma_list) combi_arr = array(permutation_list) # check if imposed loads are defined # if not no further processing of 'combi_arr' is necessary: # if self.imposed_idx_list == []: # if option is set to 'True' the loading case combination table # is enlarged with an identity matrix in order to see the # characteristic values of each loading case. # if self.show_lc_characteristic: combi_arr = vstack([identity(self.n_lc), combi_arr]) return combi_arr #--------------------------------------------------------------- # get leading and non leading combination factors ('psi') #--------------------------------------------------------------- # go through all possible cases of leading imposed loads # For the currently investigated imposed loading case the # psi value is taken from 'psi_leading_arr' for all other # imposed loads the psi value is taken from 'psi_non_lead_arr' # Properties are defined in the subclasses # psi_lead_arr = self.psi_lead_arr psi_non_lead_arr = self.psi_non_lead_arr # for SLS limit state case 'rare' all imposed loads are multiplied # with 'psi_2'. In this case no distinction between leading or # non-leading imposed loads needs to be performed. # if all(psi_lead_arr == psi_non_lead_arr): combi_arr_psi = combi_arr * psi_lead_arr # generate a list or arrays obtained by multiplication # with the psi-factors. # This yields a list of length = number of imposed-loads. # else: combi_arr_psi_list = [] for imposed_idx in self.imposed_idx_list: # copy in order to preserve initial state of the array # and avoid in place modification psi_arr = copy(psi_non_lead_arr) psi_arr[imposed_idx] = psi_lead_arr[imposed_idx] combi_arr_lead_i = combi_arr[where( combi_arr[:, imposed_idx] != 0)] * psi_arr combi_arr_psi_list.append(combi_arr_lead_i) combi_arr_psi_no_0 = vstack(combi_arr_psi_list) # missing cases without any dead load have to be added # get combinations with all!! imposed = 0 # lcc_all_imposed_zero = where( (combi_arr[:, self.imposed_idx_list] == 0).all(axis=1)) # add to combinations # combi_arr_psi = vstack( (combi_arr[lcc_all_imposed_zero], combi_arr_psi_no_0)) #--------------------------------------------------------------- # get exclusive loading cases ('exclusive_to') #--------------------------------------------------------------- # get a list of lists containing the indices of the loading cases # that are defined exclusive to each other. # The list still contains duplicates, e.g. [1,2] and [2,1] # exclusive_list = [] for i_lc, lc in enumerate(self.lc_list): # get related load case number # for exclusive_name in lc.exclusive_to: if exclusive_name in self.lc_name_list: exclusive_idx = self.lc_name_list.index(exclusive_name) exclusive_list.append([i_lc, exclusive_idx]) # eliminate the duplicates in 'exclusive_list' # exclusive_list_unique = [] for exclusive_list_entry in exclusive_list: if sorted(exclusive_list_entry) not in exclusive_list_unique: exclusive_list_unique.append(sorted(exclusive_list_entry)) # delete the rows in combination array that contain # loading case combinations with imposed-loads that have been defined # as exclusive to each other. # combi_arr_psi_exclusive = combi_arr_psi # print 'combi_arr_psi_exclusive', combi_arr_psi_exclusive for exclusive_list_entry in exclusive_list_unique: # check where maximum one value of the exclusive load cases is unequal to one # LC1 LC2 LC3 (all LCs are defined as exclusive to each other) # # e.g. 1.5 0.9 0.8 (example of 'combi_arr_psi') # 1.5 0.0 0.0 # 0.0 0.0 0.0 (combination with all imposed loads = 0 after multiplication wit psi and gamma) # ... ... ... # # this would yield the following mask_arr (containing ones or zeros): # e.g. 1.0 1.0 1.0 --> sum = 3 --> true combi --> accepted combination # 1.0 0.0 0.0 --> sum = 1 --> false combi --> no accepted combination # e.g. 0.0 0.0 0.0 --> sum = 0 --> true combi --> accepted combination (only body-loads) # ... ... ... # mask_arr = where( combi_arr_psi_exclusive[:, exclusive_list_entry] != 0, 1.0, 0.0) # print 'mask_arr', mask_arr true_combi = where(sum(mask_arr, axis=1) <= 1.0) # print 'true_combi', true_combi combi_arr_psi_exclusive = combi_arr_psi_exclusive[true_combi] #--------------------------------------------------------------- # create array with only unique load case combinations #--------------------------------------------------------------- # If the psi values of an imposed-load are defined as zero this # may led to zero entries in 'combi_arr'. This would yield rows # in 'combi_arr' which are duplicates. Those rows are removed. # Add first row in 'combi_arr_psi_exclusive' to '_unique' array # This array must have shape (1, n_lc) in order to use 'axis'-option # combi_arr_psi_exclusive_unique = combi_arr_psi_exclusive[0][None, :] for row in combi_arr_psi_exclusive: # Check if all factors in one row are equal to the rows in 'unique' array. # If this is not the case for any row the combination is added to 'unique'. # Broadcasting is used for the bool evaluation: # if (row == combi_arr_psi_exclusive_unique).all( axis=1.0).any() == False: combi_arr_psi_exclusive_unique = vstack( (combi_arr_psi_exclusive_unique, row)) # if option is set to 'True' the loading case combination table # is enlarged with an identity matrix in order to see the # characteristic values of each loading case. # # if self.show_lc_characteristic: # combi_arr_psi_exclusive_unique = vstack( [ identity( self.n_lc ), combi_arr_psi_exclusive_unique ] ) return combi_arr_psi_exclusive_unique #------------------------------- # lcc_arr #------------------------------- lcc_arr = Property(Array, depends_on='lc_list_') @cached_property def _get_lcc_arr(self): '''Array of all loading case combinations following the loading cases define in 'lc_list' and the combinations defined in 'combi_arr'. This yields an array of shape ( n_lcc, n_elems, n_sr ) ''' self._check_for_consistency() combi_arr = self.combi_arr # 'combi_arr' is of shape ( n_lcc, n_lc ) # 'lc_arr' is of shape ( n_lc, n_elems, n_sr ) # lc_arr = self.lc_arr # Broadcasting is used to generate array containing the multiplied lc's # yielding an array of shape ( n_lcc, n_lc, n_elems, n_sr ) # lc_combi_arr = lc_arr[None, :, :, :] * combi_arr[:, :, None, None] # Then the sum over index 'n_lc' is evaluated yielding # an array of all loading case combinations. # This yields an array of shape ( n_lcc, n_elem, n_sr ) # lcc_arr = sum(lc_combi_arr, axis=1) return lcc_arr #------------------------------- # lcc_lists #------------------------------- lcc_list = Property(List, depends_on='lc_list_') @cached_property def _get_lcc_list(self): '''list of loading case combinations (instances of LCC) ''' combi_arr = self.combi_arr lcc_arr = self.lcc_arr sr_columns = self.sr_columns geo_columns = self.geo_columns n_lcc = self.n_lcc # return a dictionary of the stress resultants # this is used by LSTable to determine the stress # resultants of the current limit state # lcc_list = [] for i_lcc in range(n_lcc): state_data_dict = {} for i_sr, name in enumerate(sr_columns): state_data_dict[name] = lcc_arr[i_lcc, :, i_sr][:, None] geo_data_dict = self.geo_data_dict lcc = LCC( # lcc_table = self, factors=combi_arr[i_lcc, :], lcc_id=i_lcc, ls_table=LSTable(geo_data=geo_data_dict, state_data=state_data_dict, ls=self.ls)) for idx, lc in enumerate(self.lc_list): lcc.add_trait(lc.name, Int(combi_arr[i_lcc, idx])) lcc_list.append(lcc) return lcc_list #------------------------------- # geo_arr #------------------------------- geo_data_dict = Property(Dict, depends_on='lc_list_') @cached_property def _get_geo_data_dict(self): '''Array of global coords derived from the first loading case defined in lc_list. Coords are identical for all LC's. ''' return self.lc_list[0].geo_data_dict #------------------------------- # min/max-values #------------------------------- def get_min_max_state_data(self): ''' get the surrounding curve of all 'lcc' values ''' lcc_arr = self.lcc_arr min_arr = ndmin(lcc_arr, axis=0) max_arr = ndmax(lcc_arr, axis=0) return min_arr, max_arr #-------------------------------------- # use for case 'max N*' nach ZiE # Fall 'maximale Normalkraft' nach ZiE #-------------------------------------- # max_sr_grouped_dict = Property( Dict ) # @cached_property # def _get_max_sr_grouped_dict( self ): # ''' get the surrounding curve for each stress resultant # shape lcc_array ( n_lcc, n_elems, n_sr ) # ''' # sr_columns = self.sr_columns # lcc_arr = self.lcc_arr # dict = {} # for i, sr in enumerate( self.sr_columns ): # idx_1 = argmax( abs( lcc_arr[:, :, i] ), axis = 0 ) # idx_2 = arange( 0, idx_1.shape[0], 1 ) # dict[sr] = lcc_arr[idx_1, idx_2, :] # return dict #-------------------------------------- # use for case 'max eta' nach ZiE # Fall max Ausnutzungsgrad nach ZiE #-------------------------------------- max_sr_grouped_dict = Property(Dict) @cached_property def _get_max_sr_grouped_dict(self): '''evaluate eta and prepare plot ''' sr_columns = self.sr_columns lcc_arr = self.lcc_arr # ## N_s6cm_d results from 'V_op_d'*1.5 # assume a distribution of stresses as for a simple # supported beam with cantilever corresponding # to the distance of the screws to each other and to the edge # of the TRC shell (33cm/17cm) # N_s6cm_d = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33. # ## V_s6cm_d results from 'N_ip_d'/2 # assume an equal distribution (50% each) of the # normal forces to each screw # V_s6cm_d = lcc_arr[:, :, 0] * 0.56 # V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5 # resistance ac characteristic value obtained from the # experiment and EN DIN 1990 # N_ck = 28.3 V_ck = 63.8 gamma_s = 1.5 eta_N = N_s6cm_d / (N_ck / gamma_s) eta_V = abs(V_s6cm_d / (V_ck / gamma_s)) eta_inter = (eta_N) + (eta_V) idx_max_hinge = eta_inter.argmax(axis=0) dict = {} for i, sr in enumerate(self.sr_columns): idx_1 = idx_max_hinge idx_2 = arange(0, idx_1.shape[0], 1) dict[sr] = lcc_arr[idx_1, idx_2, :] return dict def export_hf_max_grouped(self, filename): """exports the hinge forces as consistent pairs for the two case 'max_eta' or 'max_N*' """ from matplotlib import pyplot sr_columns = self.sr_columns dict = self.max_sr_grouped_dict length_xy_quarter = self.length_xy_quarter def save_bar_plot(x, y, filename='bla', title='Title', xlabel='xlabel', ylabel='ylavel', width=0.1, xmin=0, xmax=1000, ymin=-1000, ymax=1000, figsize=[10, 5]): fig = pyplot.figure(facecolor="white", figsize=figsize) ax1 = fig.add_subplot(1, 1, 1) ax1.bar(x, y, width=width, align='center', color='green') ax1.set_xlim(xmin, xmax) ax1.set_ylim(ymin, ymax) ax1.set_xlabel(xlabel, fontsize=22) ax1.set_ylabel(ylabel, fontsize=22) if title == 'N_ip max': title = 'Fall max $\eta$' # title = 'Fall max $N^{*}$' if title == 'V_ip max': title = 'max $V_{ip}$' if title == 'V_op max': title = 'Fall max $V^{*}$' ax1.set_title(title) fig.savefig(filename, orientation='portrait', bbox_inches='tight') pyplot.clf() X = array(self.geo_data_dict['X_hf']) Y = array(self.geo_data_dict['Y_hf']) # symmetric axes # idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_sym = X[idx_sym].reshape(-1) idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_r0_r1 = Y[idx_r0_r1].reshape(-1) for sr in sr_columns: F_int = dict[sr] # first row N_ip, second V_ip third V_op F_sym = F_int[idx_sym, :].reshape(-1, len(sr_columns)) F_r0_r1 = F_int[idx_r0_r1, :].reshape(-1, len(sr_columns)) save_bar_plot(X_sym, F_sym[:, 0].reshape(-1), xlabel='$X$ [m]', ylabel='$N^{*}_{Ed}$ [kN]', filename=filename + 'N_ip' + '_sym_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=3.5 * length_xy_quarter, figsize=[10, 5], ymin=-30, ymax=+30) if self.link_type == 'inc_V_ip': save_bar_plot(X_sym, F_sym[:, 1].reshape(-1), xlabel='$X$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_sym_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=3.5 * length_xy_quarter, figsize=[10, 5], ymin=-30, ymax=+30) save_bar_plot(X_sym, F_sym[:, 2].reshape(-1), xlabel='$X$ [m]', ylabel='$V^{*}_{Ed}$ [kN]', filename=filename + 'V_op' + '_sym_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=3.5 * length_xy_quarter, figsize=[10, 5], ymin=-10, ymax=+10) # r0_r1 # save_bar_plot(X_r0_r1, F_r0_r1[:, 0].reshape(-1), xlabel='$Y$ [m]', ylabel='$N^{*}_{Ed}$ [kN]', filename=filename + 'N_ip' + '_r0_r1_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=2.0 * length_xy_quarter, figsize=[5, 5], ymin=-30, ymax=+30) if self.link_type == 'inc_V_ip': save_bar_plot(X_r0_r1, F_r0_r1[:, 1].reshape(-1), xlabel='$Y$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_r0_r1_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=2.0 * length_xy_quarter, figsize=[5, 5], ymin=-30, ymax=+30) save_bar_plot(X_r0_r1, F_r0_r1[:, 2].reshape(-1), xlabel='$Y$ [m]', ylabel='$V^{*}_{Ed}$ [kN]', filename=filename + 'V_op' + '_r0_r1_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=2.0 * length_xy_quarter, figsize=[5, 5], ymin=-10, ymax=+10) def plot_interaction_s6cm(self): """get the maximum values (consistent pairs of N and V) and plot them in an interaction plot """ lcc_arr = self.lcc_arr # ## F_Edt results from 'V_op_d'*1.5 # assume a distribution of stresses as for a simple # supported beam with cantilever corresponding # to the distance of the screws to each other and to the edge # of the TRC shell (33cm/17cm) # F_Edt = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33. # ## F_EdV1 results from 'N_ip_d'/2 # assume an equal distribution (50% each) of the # normal forces to each screw # F_EdV1 = lcc_arr[:, :, 0] * 0.56 # V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5 # resistance ac characteristic value obtained from the # experiment and EN DIN 1990 # F_Rkt = 28.3 F_RkV1 = 63.8 gamma_M = 1.5 eta_t = abs(F_Edt / (F_Rkt / gamma_M)) eta_V1 = abs(F_EdV1 / (F_RkV1 / gamma_M)) print 'eta_t.shape', eta_t.shape print 'eta_V1.shape', eta_V1.shape # self.interaction_plot(abs(F_Edt), abs(F_EdV1)) self.interaction_plot(eta_t, eta_V1) # eta_inter = ( eta_N ) + ( eta_V ) # # idx_max_hinge = eta_inter.argmax( axis = 0 ) # idx_hinge = arange( 0, len( idx_max_hinge ), 1 ) # plot_eta_N = eta_N[idx_max_hinge, idx_hinge] # plot_eta_V = eta_V[idx_max_hinge, idx_hinge] # self.interaction_plot( plot_eta_N, plot_eta_V ) def interaction_plot(self, eta_N, eta_V): from matplotlib import font_manager ticks_font = font_manager.FontProperties(family='Times', style='normal', size=18, weight='normal', stretch='normal') from matplotlib import pyplot fig = pyplot.figure(facecolor="white", figsize=[10, 10]) ax1 = fig.add_subplot(1, 1, 1) # x = arange(0, 1.01, 0.01) # y15 = (1 - x ** 1.5) ** (1 / 1.5) # y = (1 - x) ax1.set_xlabel('$F_\mathrm{Ed,V1}/F_\mathrm{Rd,V1}$', fontsize=24) ax1.set_ylabel('$F_\mathrm{Ed,t}/F_\mathrm{Rd,t}$', fontsize=24) # ax1.set_xlabel('$|N_\mathrm{Ed}|$' , fontsize=32) # ax1.set_ylabel('$|V_\mathrm{Ed}|$', fontsize=32) # ax1.plot(x , y, '--', color='black' # , linewidth=2.0) # ax1.plot(x , y15, '--', color='black' # , linewidth=2.0) ax1.plot(eta_V, eta_N, 'wo', markersize=3) # ax1.plot(eta_V, eta_N, 'o', color='green', markersize=8) # ax1.plot( eta_V[where( limit < 1 )] , eta_N[where( limit < 1 )], 'o', markersize = 8 ) # ax1.plot( eta_V[where( limit > 1 )] , eta_N[where( limit > 1 )], 'o', color = 'red', markersize = 8 ) for xlabel_i in ax1.get_xticklabels(): xlabel_i.set_fontsize(24) xlabel_i.set_family('serif') for ylabel_i in ax1.get_yticklabels(): ylabel_i.set_fontsize(24) ylabel_i.set_family('serif') # ax1.plot( x , 1 - x, '--', color = 'black', label = 'lineare Interaktion' ) ax1.set_xlim(0, 1.0) ax1.set_ylim(0, 1.0) ax1.legend() pyplot.show() pyplot.clf() # choose linking type (in-plane shear dof blocked or not) # link_type = Enum('exc_V_ip', 'inc_V_ip') # length of the shell (needed to plot the hinge forces plots correctly) # length_xy_quarter = 3.5 # m def export_hf_lc(self): """exports the hinge forces for each loading case separately """ from matplotlib import pyplot sr_columns = self.sr_columns dict = self.max_sr_grouped_dict length_xy_quarter = self.length_xy_quarter def save_bar_plot(x, y, filename='bla', xlabel='xlabel', ylabel='ylavel', ymin=-10, ymax=10, width=0.1, xmin=0, xmax=1000, figsize=[10, 5]): fig = pyplot.figure(facecolor="white", figsize=figsize) ax1 = fig.add_subplot(1, 1, 1) ax1.bar(x, y, width=width, align='center', color='blue') ax1.set_xlim(xmin, xmax) ax1.set_ylim(ymin, ymax) ax1.set_xlabel(xlabel, fontsize=22) ax1.set_ylabel(ylabel, fontsize=22) fig.savefig(filename, orientation='portrait', bbox_inches='tight') pyplot.clf() X = array(self.geo_data_dict['X_hf']) Y = array(self.geo_data_dict['Y_hf']) # symmetric axes # idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_sym = X[idx_sym].reshape(-1) idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_r0_r1 = Y[idx_r0_r1].reshape(-1) F_int = self.lc_arr for i, lc_name in enumerate(self.lc_name_list): filename = self.lc_list[i].plt_export max_N_ip = max(int(ndmax(F_int[i, :, 0], axis=0)) + 1, 1) max_V_ip = max(int(ndmax(F_int[i, :, 1], axis=0)) + 1, 1) max_V_op = max(int(ndmax(F_int[i, :, 2], axis=0)) + 1, 1) F_int_lc = F_int[i, :, :] # first row N_ip, second V_ip third V_op F_sym = F_int_lc[idx_sym, :].reshape(-1, len(sr_columns)) F_r0_r1 = F_int_lc[idx_r0_r1, :].reshape(-1, len(sr_columns)) save_bar_plot( X_sym, F_sym[:, 0].reshape(-1), # xlabel = '$X$ [m]', ylabel = '$N^{ip}$ [kN]', xlabel='$X$ [m]', ylabel='$N^{*}$ [kN]', filename=filename + 'N_ip' + '_sym', xmin=0.0, xmax=3.5 * length_xy_quarter, ymin=-max_N_ip, ymax=max_N_ip, figsize=[10, 5]) save_bar_plot(X_sym, F_sym[:, 1].reshape(-1), xlabel='$X$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_sym', xmin=0.0, xmax=3.5 * length_xy_quarter, ymin=-max_V_ip, ymax=max_V_ip, figsize=[10, 5]) save_bar_plot( X_sym, F_sym[:, 2].reshape(-1), # xlabel = '$X$ [m]', ylabel = '$V_{op}$ [kN]', xlabel='$X$ [m]', ylabel='$V^{*}$ [kN]', filename=filename + 'V_op' + '_sym', xmin=0.0, xmax=3.5 * length_xy_quarter, ymin=-max_V_op, ymax=max_V_op, figsize=[10, 5]) # r0_r1 # save_bar_plot( X_r0_r1, F_r0_r1[:, 0].reshape(-1), # xlabel = '$Y$ [m]', ylabel = '$N_{ip}$ [kN]', xlabel='$Y$ [m]', ylabel='$N^{*}$ [kN]', filename=filename + 'N_ip' + '_r0_r1', xmin=0.0, xmax=2.0 * length_xy_quarter, ymin=-max_N_ip, ymax=max_N_ip, figsize=[5, 5]) save_bar_plot(X_r0_r1, F_r0_r1[:, 1].reshape(-1), xlabel='$Y$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_r0_r1', xmin=0.0, xmax=2.0 * length_xy_quarter, ymin=-max_V_ip, ymax=max_V_ip, figsize=[5, 5]) save_bar_plot( X_r0_r1, F_r0_r1[:, 2].reshape(-1), # xlabel = '$Y$ [m]', ylabel = '$V_{op}$ [kN]', xlabel='$Y$ [m]', ylabel='$V^{*}$ [kN]', filename=filename + 'V_op' + '_r0_r1', xmin=0.0, xmax=2.0 * length_xy_quarter, ymin=-max_V_op, ymax=max_V_op, figsize=[5, 5]) # ------------------------------------------------------------ # View # ------------------------------------------------------------ traits_view = View(VGroup( VSplit( Item('lcc_list', editor=lcc_list_editor, show_label=False), Item('lcc@', show_label=False), ), ), resizable=True, scrollable=True, height=1.0, width=1.0)
class YMBView2D(HasTraits): data = Instance(YMBData) zero = Constant(0) slider_max = Property() def _get_slider_max(self): return self.data.n_cuts - 1 var_enum = Trait('radius', var_dict, modified=True) cut_slider = Range('zero', 'slider_max', mode='slider', auto_set=False, enter_set=True, modified=True) circle_diameter = Float(20, enter_set=True, auto_set=False, modified=True) underlay = Bool(False, modified=True) variable = Property(Array, depends_on='var_enum') @cached_property def _get_variable(self): return getattr(self.data, self.var_enum_) figure = Instance(Figure) def _figure_default(self): figure = Figure() figure.add_axes([0.1, 0.1, 0.8, 0.8]) return figure data_changed = Event(True) @on_trait_change('+modified, data.input_changed') def _redraw(self): # TODO: set correct ranges, fix axis range (axes.xlim) self.figure.clear() self.figure.add_axes([0.1, 0.1, 0.8, 0.8]) figure = self.figure axes = figure.axes[0] axes.clear() y_arr, z_arr = self.data.cut_data[1:3] y_raw_arr, z_raw_arr = self.data.cut_raw_data[0:2] offset = hstack([0, self.data.cut_raw_data[5]]) scalar_arr = self.variable mask = y_arr[:, self.cut_slider] > -1 axes.scatter( y_raw_arr[offset[self.cut_slider]:offset[self.cut_slider + 1]], z_raw_arr[offset[self.cut_slider]:offset[self.cut_slider + 1]], s=self.circle_diameter, color='k', marker='x', label='identified filament in cut') scat = axes.scatter(y_arr[:, self.cut_slider][mask], z_arr[:, self.cut_slider][mask], s=self.circle_diameter, c=scalar_arr[:, self.cut_slider][mask], cmap=my_cmap_lin, label='connected filaments') axes.set_xlabel('$y\, [\mathrm{mm}]$', fontsize=16) axes.set_ylabel('$z\, [\mathrm{mm}]$', fontsize=16) axes.set_xlim([0, ceil(max(y_arr))]) axes.set_ylim([0, ceil(max(z_arr))]) axes.legend() figure.colorbar(scat) if self.underlay == True: axes.text(axes.get_xlim()[0], axes.get_ylim()[0], 'That\'s all at this moment :-)', color='red', fontsize=20) self.data_changed = True traits_view = View( Group( Item('var_enum'), Item('cut_slider', springy=True), Item('circle_diameter', springy=True), Item('underlay', springy=True), ), Item('figure', style='custom', editor=MPLFigureEditor(), show_label=False), resizable=True, )
class LSTable(HasTraits): '''Assessment tool ''' is_id = Int(0) # geo data: coordinates and element thickness # geo_data = Dict elem_no = Property(Array) def _get_elem_no(self): return self.geo_data['elem_no'] X = Property(Array) def _get_X(self): return self.geo_data['X'] Y = Property(Array) def _get_Y(self): return self.geo_data['Y'] Z = Property(Array) def _get_Z(self): return self.geo_data['Z'] D_elem = Property(Array) def _get_D_elem(self): '''element thickness (units changed form [mm] to [m])''' return self.geo_data['thickness'] / 1000. # state data: stress resultants # state_data = Dict mx = Property(Array) def _get_mx(self): return self.state_data['mx'] my = Property(Array) def _get_my(self): return self.state_data['my'] mxy = Property(Array) def _get_mxy(self): return self.state_data['mxy'] nx = Property(Array) def _get_nx(self): return self.state_data['nx'] ny = Property(Array) def _get_ny(self): return self.state_data['ny'] nxy = Property(Array) def _get_nxy(self): return self.state_data['nxy'] # ------------------------------------------------------------ # Index M: calculate principle moments with corresponding normal forces # ------------------------------------------------------------ princ_values_M = Property(Dict, depends_on='data_file_stress_resultants') @cached_property def _get_princ_values_M(self): '''principle value of the moments forces: and principle angle of the moments forces: mx_M, my_M, nx_M, ny_M: transform the values in the principle direction ''' # stress_resultants in global coordinates # mx = self.mx my = self.my mxy = self.mxy nx = self.nx ny = self.ny nxy = self.nxy # principal values # m1 = 0.5 * (mx + my) + 0.5 * sqrt((mx - my)**2 + 4 * mxy**2) m2 = 0.5 * (mx + my) - 0.5 * sqrt((mx - my)**2 + 4 * mxy**2) alpha_M = pi / 2. * ones_like(m1) bool = m2 != mx alpha_M[bool] = arctan(mxy[bool] / (m2[bool] - mx[bool])) alpha_M_deg = alpha_M * 180. / pi # transform to principal directions # mx_M = 0.5 * (my + mx) - 0.5 * (my - mx) * cos( 2 * alpha_M) - mxy * sin(2 * alpha_M) my_M = 0.5 * (my + mx) + 0.5 * (my - mx) * cos( 2 * alpha_M) + mxy * sin(2 * alpha_M) nx_M = 0.5 * (ny + nx) - 0.5 * (ny - nx) * cos( 2 * alpha_M) - nxy * sin(2 * alpha_M) ny_M = 0.5 * (ny + nx) + 0.5 * (ny - nx) * cos( 2 * alpha_M) + nxy * sin(2 * alpha_M) return { 'm1': m1, 'm2': m2, 'alpha_M': alpha_M_deg, 'mx_M': mx_M, 'my_M': my_M, 'nx_M': nx_M, 'ny_M': ny_M } m1 = Property(Float) def _get_m1(self): return self.princ_values_M['m1'] m2 = Property(Float) def _get_m2(self): return self.princ_values_M['m2'] alpha_M = Property(Float) def _get_alpha_M(self): return self.princ_values_M['alpha_M'] mx_M = Property(Float) def _get_mx_M(self): return self.princ_values_M['mx_M'] my_M = Property(Float) def _get_my_M(self): return self.princ_values_M['my_M'] nx_M = Property(Float) def _get_nx_M(self): return self.princ_values_M['nx_M'] ny_M = Property(Float) def _get_ny_M(self): return self.princ_values_M['ny_M'] # ------------------------------------------------------------ # Index N: principle normal forces with corresponding moments # ------------------------------------------------------------ princ_values_N = Property(Dict, depends_on='data_file_stress_resultants') @cached_property def _get_princ_values_N(self): '''principle value of the normal forces: and principle angle of the normal forces: mx_N, my_N, nx_N, ny_N: transform the values in the principle normal direction ''' # stress_resultants in global coordinates # mx = self.mx my = self.my mxy = self.mxy nx = self.nx ny = self.ny nxy = self.nxy # principal values # n1 = 0.5 * (nx + ny) + 0.5 * sqrt((nx - ny)**2 + 4 * nxy**2) n2 = 0.5 * (nx + ny) - 0.5 * sqrt((nx - ny)**2 + 4 * nxy**2) alpha_N = pi / 2. * ones_like(n1) bool = n2 != nx alpha_N[bool] = arctan(nxy[bool] / (n2[bool] - nx[bool])) alpha_N_deg = alpha_N * 180. / pi # transform to principal directions mx_N = 0.5 * (my + mx) - 0.5 * (my - mx) * cos( 2 * alpha_N) - mxy * sin(2 * alpha_N) my_N = 0.5 * (my + mx) + 0.5 * (my - mx) * cos( 2 * alpha_N) + mxy * sin(2 * alpha_N) nx_N = 0.5 * (ny + nx) - 0.5 * (ny - nx) * cos( 2 * alpha_N) - nxy * sin(2 * alpha_N) ny_N = 0.5 * (ny + nx) + 0.5 * (ny - nx) * cos( 2 * alpha_N) + nxy * sin(2 * alpha_N) return { 'n1': n1, 'n2': n2, 'alpha_N': alpha_N_deg, 'mx_N': mx_N, 'my_N': my_N, 'nx_N': nx_N, 'ny_N': ny_N } n1 = Property(Float) def _get_n1(self): return self.princ_values_N['n1'] n2 = Property(Float) def _get_n2(self): return self.princ_values_N['n2'] alpha_N = Property(Float) def _get_alpha_N(self): return self.princ_values_N['alpha_N'] mx_N = Property(Float) def _get_mx_N(self): return self.princ_values_N['mx_N'] my_N = Property(Float) def _get_my_N(self): return self.princ_values_N['my_N'] nx_N = Property(Float) def _get_nx_N(self): return self.princ_values_N['nx_N'] ny_N = Property(Float) def _get_ny_N(self): return self.princ_values_N['ny_N'] # ------------------------------------------------------------ # Index sig: calculate principle stresses # ------------------------------------------------------------ princ_values_sig = Property(Dict, depends_on='data_file_stress_resultants') @cached_property def _get_princ_values_sig(self): '''principle value of the stresses for the lower ('lo') and upper ('up') face: ''' # stress_resultants in global coordinates # mx = self.mx my = self.my mxy = self.mxy nx = self.nx ny = self.ny nxy = self.nxy # geometrical properties: # A = self.D_elem * 1000. W = self.D_elem**2 / 6 * 1000. # compare the formulae with the RFEM-manual p.290 # normal stresses upper face: # sigx_up = nx / A - mx / W sigy_up = ny / A - my / W sigxy_up = nxy / A - mxy / W # principal stresses upper face: # sig1_up = 0.5 * (sigx_up + sigy_up) + 0.5 * sqrt( (sigx_up - sigy_up)**2 + 4 * sigxy_up**2) sig2_up = 0.5 * (sigx_up + sigy_up) - 0.5 * sqrt( (sigx_up - sigy_up)**2 + 4 * sigxy_up**2) alpha_sig_up = pi / 2. * ones_like(sig1_up) bool = sig2_up != sigx_up alpha_sig_up[bool] = arctan(sigxy_up[bool] / (sigy_up[bool] - sigx_up[bool])) alpha_sig_up_deg = alpha_sig_up * 180. / pi # normal stresses lower face: # sigx_lo = nx / A + mx / W sigy_lo = ny / A + my / W sigxy_lo = nxy / A + mxy / W # principal stresses lower face: # sig1_lo = 0.5 * (sigx_lo + sigy_lo) + 0.5 * sqrt( (sigx_lo - sigy_lo)**2 + 4 * sigxy_lo**2) sig2_lo = 0.5 * (sigx_lo + sigy_lo) - 0.5 * sqrt( (sigx_lo - sigy_lo)**2 + 4 * sigxy_lo**2) alpha_sig_lo = pi / 2. * ones_like(sig1_lo) bool = sig2_lo != sigx_lo alpha_sig_lo[bool] = arctan(sigxy_lo[bool] / (sig2_lo[bool] - sigx_lo[bool])) alpha_sig_lo_deg = alpha_sig_lo * 180. / pi return { 'sigx_up': sigx_up, 'sigy_up': sigy_up, 'sigxy_up': sigxy_up, 'sig1_up': sig1_up, 'sig2_up': sig2_up, 'alpha_sig_up': alpha_sig_up_deg, 'sigx_lo': sigx_lo, 'sigy_lo': sigy_lo, 'sigxy_lo': sigxy_lo, 'sig1_lo': sig1_lo, 'sig2_lo': sig2_lo, 'alpha_sig_lo': alpha_sig_lo_deg, } # stresses upper face: # sigx_up = Property(Float) def _get_sigx_up(self): return self.princ_values_sig['sigx_up'] sigy_up = Property(Float) def _get_sigy_up(self): return self.princ_values_sig['sigy_up'] sigxy_up = Property(Float) def _get_sigxy_up(self): return self.princ_values_sig['sigxy_up'] sig1_up = Property(Float) def _get_sig1_up(self): return self.princ_values_sig['sig1_up'] sig2_up = Property(Float) def _get_sig2_up(self): return self.princ_values_sig['sig2_up'] alpha_sig_up = Property(Float) def _get_alpha_sig_up(self): return self.princ_values_sig['alpha_sig_up'] # stresses lower face: # sigx_lo = Property(Float) def _get_sigx_lo(self): return self.princ_values_sig['sigx_lo'] sigy_lo = Property(Float) def _get_sigy_lo(self): return self.princ_values_sig['sigy_lo'] sigxy_lo = Property(Float) def _get_sigxy_lo(self): return self.princ_values_sig['sigxy_lo'] sig1_lo = Property(Float) def _get_sig1_lo(self): return self.princ_values_sig['sig1_lo'] sig2_lo = Property(Float) def _get_sig2_lo(self): return self.princ_values_sig['sig2_lo'] alpha_sig_lo = Property(Float) def _get_alpha_sig_lo(self): return self.princ_values_sig['alpha_sig_lo'] #------------------------------------------ # combinations of limit states, stress resultants and directions #------------------------------------------ ls = Trait('ULS', {'ULS': ULS, 'SLS': SLS}) sr_tree = Dict def _sr_tree_default(self): '''collect the ls instances in a dictionary tree: sr_tree[ sr ][ dir ] = LSTable-object e.g. sr_tree[ 'M' ][ 'x' ] = ULS( dir = x, sr = M ) ''' dir_list = DIRLIST sr_list = SRLIST ls_class = self.ls_ sr_dict = {} for sr in sr_list: dir_dict = {} for dir in dir_list: dir_dict[dir] = ls_class(ls_table=self, dir=dir, sr=sr) sr_dict[sr] = dir_dict return sr_dict ls_list = Property def _get_ls_list(self): '''collect the ls instances in a list: e.g. ls_list = [ sr_tree['M']['x'], sr_tree['M']['y'], sr_tree['N']['x'], sr_tree['N']['y'] ] (NOTE: list order is undefined because it is constructed from a dictionary) ''' ls_list = [] for sr in self.sr_tree.values(): for dir in sr.values(): ls_list.append(dir) return ls_list #------------------------------------------ # get max value and case for all cases: #------------------------------------------ max_value_and_case = Dict def _max_value_and_case_default(self): dir_list = DIRLIST sr_list = SRLIST sr_tree = self.sr_tree # Derive the column list from the first case. # NOTE: (all cases are structured identically, otherwise no # overall comparison would be possible. # columns = sr_tree['M']['x'].columns # run through allvariables defined in the 'column' # attribute of the limit state class ('LS') # var_dir = {} for var in columns: # reset auxilary vairables # max_value_all = None max_case_all = None # run through all cases (e.g. 'Nx', 'Ny', 'Mx', 'My' ) # for sr in sr_list: for dir in dir_list: # set the 'max_in_column' attribute of the LS-class: # and get the max value of the currently selected limit state: # col = getattr(self.sr_tree[sr][dir], var)[:, 0] max_value_current = max(col) # compare with the maximum reached so far in the investigation # of all cases: # if max_value_current >= max_value_all: max_value_all = max_value_current max_case_all = sr + dir var_dir[var] = { 'max_value': max_value_all, 'max_case': max_case_all } return var_dir #------------------------------------------ # get arrays for the TabularEditor: #------------------------------------------ Mx = Property(Instance(LS)) def _get_Mx(self): return self.sr_tree['M']['x'] My = Property(Instance(LS)) def _get_My(self): return self.sr_tree['M']['y'] Nx = Property(Instance(LS)) def _get_Nx(self): return self.sr_tree['N']['x'] Ny = Property(Instance(LS)) def _get_Ny(self): return self.sr_tree['N']['y'] assess_value = Property def _get_assess_value(self): return max([getattr(ls, ls.assess_name) for ls in self.ls_list]) # ------------------------------------------------------------ # View # ------------------------------------------------------------ traits_view = View(Tabbed( Item('Nx@', label="NX", show_label=False), Item('Ny@', label="NY", show_label=False), Item('Mx@', label="MX", show_label=False), Item('My@', label="MY", show_label=False), scrollable=False, ), resizable=True, scrollable=True, height=1000, width=1100)
class YMBView3D(HasTraits): data = Instance(IYMBData) var_enum = Trait('contact fraction', var_dict) scalar_arr = Property(depends_on='var_enum') @cached_property def _get_scalar_arr(self): return getattr(self.data, self.var_enum_) zero = Constant(0) slider_max = Property def _get_slider_max(self): return self.data.n_filaments - 1 n_fiber = Property(depends_on='data') start_fib = Range('zero', 'slider_max', 0, mode='slider') end_fib = Range('zero', 'slider_max', 'slider_max', mode='slider') color_map = Str('blue-red') scene = Instance(MlabSceneModel, ()) plot = Instance(PipelineBase) # When the scene is activated, or when the parameters are changed, we # update the plot. @on_trait_change('scalar_arr, start_fib, end_fib, scene.activated') def update_plot(self): x_arrr, y_arrr, z_arrr = self.data.cut_data[0:3] scalar_arrr = self.scalar_arr x_arr = x_arrr.filled() # [self.start_fib:self.end_fib + 1, :].filled() y_arr = y_arrr.filled() # [self.start_fib:self.end_fib + 1, :].filled() z_arr = z_arrr.filled() # [self.start_fib:self.end_fib + 1, :].filled() scalar_arr = scalar_arrr.filled() # [self.start_fib:self.end_fib + 1, :].filled() mask = y_arr > -1 x = x_arr[mask] y = y_arr[mask] z = z_arr[mask] scalar = scalar_arr[mask] connections = -ones_like(x_arrr) mesk = x_arrr.filled() > -1 connections[mesk] = range(0, len(connections[mesk])) connections = connections[self.start_fib:self.end_fib + 1, :].filled() connection = connections.astype(int).copy() connection = connection.tolist() # TODO: better for i in range(0, self.data.n_cols + 1): for item in connection: try: item.remove(-1) except: pass if self.plot is None: print 'plot 3d -- 1' # self.scene.parallel_projection = False pts = self.scene.mlab.pipeline.scalar_scatter(array(x), array(y), array(z), array(scalar)) pts.mlab_source.dataset.lines = connection self.plot = self.scene.mlab.pipeline.surface( self.scene.mlab.pipeline.tube( # fig.scene.mlab.pipeline.stripper( pts, figure=self.scene.mayavi_scene , # ), tube_sides=10, tube_radius=0.015, ), ) self.plot.actor.mapper.interpolate_scalars_before_mapping = True self.plot.module_manager.scalar_lut_manager.show_scalar_bar = True self.plot.module_manager.scalar_lut_manager.show_legend = True self.plot.module_manager.scalar_lut_manager.shadow = True self.plot.module_manager.scalar_lut_manager.label_text_property.italic = False self.plot.module_manager.scalar_lut_manager.scalar_bar.orientation = 'horizontal' self.plot.module_manager.scalar_lut_manager.scalar_bar_representation.position2 = array([ 0.61775334, 0.17 ]) self.plot.module_manager.scalar_lut_manager.scalar_bar_representation.position = array([ 0.18606834, 0.08273163]) self.plot.module_manager.scalar_lut_manager.scalar_bar.width = 0.17000000000000004 self.plot.module_manager.scalar_lut_manager.lut_mode = self.color_map # 'black-white' self.plot.module_manager.scalar_lut_manager.data_name = self.var_enum self.plot.module_manager.scalar_lut_manager.label_text_property.font_family = 'times' self.plot.module_manager.scalar_lut_manager.label_text_property.shadow = True self.plot.module_manager.scalar_lut_manager.title_text_property.color = (0.0, 0.0, 0.0) self.plot.module_manager.scalar_lut_manager.label_text_property.color = (0.0, 0.0, 0.0) self.plot.module_manager.scalar_lut_manager.title_text_property.font_family = 'times' self.plot.module_manager.scalar_lut_manager.title_text_property.shadow = True # fig.scene.parallel_projection = True self.scene.scene.background = (1.0, 1.0, 1.0) self.scene.scene.camera.position = [16.319534155794827, 10.477447863842627, 6.1717943847883232] self.scene.scene.camera.focal_point = [3.8980860486356859, 2.4731178194274621, 0.14856957086692035] self.scene.scene.camera.view_angle = 30.0 self.scene.scene.camera.view_up = [-0.27676100729835512, -0.26547169369097656, 0.92354107904740446] self.scene.scene.camera.clipping_range = [7.7372124315754673, 26.343575352248056] self.scene.scene.camera.compute_view_plane_normal() # fig.scene.reset_zoom() axes = Axes() self.scene.engine.add_filter(axes, self.plot) axes.label_text_property.font_family = 'times' axes.label_text_property.shadow = True axes.title_text_property.font_family = 'times' axes.title_text_property.shadow = True axes.property.color = (0.0, 0.0, 0.0) axes.title_text_property.color = (0.0, 0.0, 0.0) axes.label_text_property.color = (0.0, 0.0, 0.0) axes.axes.corner_offset = .1 axes.axes.x_label = 'x' axes.axes.y_label = 'y' axes.axes.z_label = 'z' else: print 'plot 3d -- 2' # self.plot.mlab_source.dataset.reset() # self.plot.mlab_source.set( x = x, y = y, z = z, scalars = scalar ) # self.plot.mlab_source.dataset.points = array( [x, y, z] ).T self.plot.mlab_source.scalars = scalar self.plot.mlab_source.dataset.lines = connection self.plot.module_manager.scalar_lut_manager.data_name = self.var_enum # The layout of the dialog created view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=250, width=300, show_label=False), Group( '_', 'start_fib', 'end_fib', 'var_enum', ), resizable=True, )
class SCMSpirrid(HasTraits): '''Stochastic Cracking Model - compares matrix strength and stress, inserts new CS instances at positions, where the matrix strength is lower than the stress; evaluates stress-strain diagram by integrating the strain profile along the composite''' cb_randomization = Instance(FunctionRandomization) cb_type = Trait('mean', dict(mean=CBMeanFactory, random=CBRandomFactory)) cb_factory = Property(depends_on='cb_type') @cached_property def _get_cb_factory(self): return self.cb_type_(randomization=self.cb_randomization, load_sigma_c_max=self.load_sigma_c_max, load_n_sigma_c=self.load_n_sigma_c, n_w=self.n_w, n_x=self.n_x, n_BC=self.n_BC) n_w = Int n_x = Int n_BC = Int length = Float(desc='composite specimen length') nx = Int(desc='number of discretization points') sigma_c_crack = List cracks_list = List load_sigma_c = Property(depends_on='+load') def _get_load_sigma_c(self): # applied external load in terms of composite stress return np.linspace(self.load_sigma_c_min, self.load_sigma_c_max, self.load_n_sigma_c) load_sigma_c_min = Float(load=True) load_sigma_c_max = Float(load=True) load_n_sigma_c = Int(load=True) x_arr = Property(Array, depends_on='length, nx') @cached_property def _get_x_arr(self): # discretizes the specimen length return np.linspace(0., self.length, self.nx) random_field = Instance(RandomField) matrix_strength = Property(depends_on='random_field.+modified') @cached_property def _get_matrix_strength(self): # evaluates a random field # realization and creates a spline reprezentation rf = self.random_field.random_field rf_spline = interp1d(self.random_field.xgrid, rf) return rf_spline(self.x_arr) def sort_cbs(self): # sorts the CBs by position and adjusts the boundary conditions # sort the CBs cb_list = self.cracks_list[-1] crack_position = cb_list[-1].position cb_list = sorted(cb_list, key=attrgetter('position')) # find idx of the new crack for i, crack in enumerate(cb_list): if crack.position == crack_position: idx = i # specify the boundaries if idx != 0: # there is a crack at the left hand side cbl = cb_list[idx - 1] cb = cb_list[idx] cbl.Lr = (cb.position - cbl.position) / 2. cb.Ll = cbl.Lr else: # the new crack is the first from the left hand side cb_list[idx].Ll = cb_list[idx].position if idx != len(cb_list) - 1: # there is a crack at the right hand side cb, cbr = cb_list[idx], cb_list[idx + 1] cbr.Ll = (cbr.position - cb.position) / 2. cb.Lr = cbr.Ll else: # the new crack is the first from the right hand side cb_list[idx].Lr = self.length - cb_list[idx].position # specify the x range and stress profile for # the new crack and its neighbors idxs = [idx - 1, idx, idx + 1] if idx == 0: idxs.remove(-1) if idx == len(cb_list) - 1: idxs.remove(len(cb_list)) for idx in idxs: mask1 = self.x_arr >= (cb_list[idx].position - cb_list[idx].Ll) if idx == 0: mask1[0] = True mask2 = self.x_arr <= (cb_list[idx].position + cb_list[idx].Lr) cb_list[idx].x = self.x_arr[mask1 * mask2] - cb_list[idx].position self.cracks_list[-1] = cb_list def cb_list(self, load): if len(self.cracks_list) is not 0: idx = np.sum(np.array(self.sigma_c_crack) < load) - 1 return self.cracks_list[idx] else: return [None] def sigma_m(self, load): Em = self.cb_randomization.theta_vars['E_m'] Ef = self.cb_randomization.theta_vars['E_f'] Vf = self.cb_randomization.theta_vars['V_f'] Ec = Ef * Vf + Em * (1. - Vf) sigma_m = load * Em / Ec * np.ones(len(self.x_arr)) cb_load = self.cb_list(load) if cb_load[0] is not None: for cb in cb_load: crack_position_idx = np.argwhere(self.x_arr == cb.position) left = crack_position_idx - len(np.nonzero(cb.x < 0.)[0]) right = crack_position_idx + len(np.nonzero(cb.x > 0.)[0]) + 1 sigma_m[left:right] = cb.get_sigma_x_matrix(load).T return sigma_m def residuum(self, q): return np.min(self.matrix_strength - self.sigma_m(q)) def evaluate(self): # seek for the minimum strength redundancy to find the position # of the next crack last_pos = 0.0 q_min = 0.0 q_max = self.load_sigma_c_max while np.any(self.sigma_m(q_max) > self.matrix_strength): q_min = brentq(self.residuum, q_min, q_max) crack_position = self.x_arr[np.argmin(self.matrix_strength - self.sigma_m(q_min))] cbf = self.cb_factory new_cb = cbf.new_cb() new_cb.position = float(crack_position) new_cb.crack_load_sigma_c = q_min - self.load_sigma_c_max / 1000. self.sigma_c_crack.append(q_min - self.load_sigma_c_max / 1000.) if len(self.cracks_list) is not 0: self.cracks_list.append( copy.copy(self.cracks_list[-1]) + [new_cb]) else: self.cracks_list.append([new_cb]) self.sort_cbs() cb_list = self.cracks_list[-1] cb = [ CB for CB in cb_list if CB.position == float(crack_position) ][0] mu_q = cb.get_sigma_f_x_reinf(self.load_sigma_c, np.array([0.0]), cb.Ll, cb.Lr).flatten() mu_q_real = mu_q[np.isnan(mu_q) == False] new_q_max = np.max( mu_q_real) * self.cb_randomization.theta_vars['V_f'] if new_q_max < q_max: q_max = new_q_max if float(crack_position) == last_pos: raise ValueError('''got stuck in loop, try to adapt x, w, BC ranges''') last_pos = float(crack_position) sigma_m_x = Property(depends_on='''random_field.+modified, +load, nx, length, cb_type''') @cached_property def _get_sigma_m_x(self): sigma_m_x = np.zeros_like(self.load_sigma_c[:, np.newaxis] * self.x_arr[np.newaxis, :]) for i, q in enumerate(self.load_sigma_c): sigma_m_x[i, :] = self.sigma_m(q) return sigma_m_x
class SPIRRIDLAB(HasTraits): '''Class used for elementary parametric studies of spirrid. ''' s = Instance(SPIRRID) eps_vars = DelegatesTo('s') theta_vars = DelegatesTo('s') q = DelegatesTo('s') exact_arr = Array('float') dpi = Int plot_mode = Enum(['subplots', 'figures']) fig_output_dir = Directory('fig') @on_trait_change('fig_output_dir') def _check_dir(self): if os.access(self.fig_output_dir, os.F_OK) == False: os.mkdir(self.fig_output_dir) e_arr = Property def _get_e_arr(self): return self.s.evar_lst[0] hostname = Property def _get_hostname(self): return gethostname() qname = Str def get_qname(self): if self.qname == '': if isinstance(self.q, types.FunctionType): qname = self.q.__name__ else: # if isinstance(self.q, types.ClassType): qname = self.q.__class__.__name__ else: qname = self.qname return qname show_output = False save_output = True plot_sampling_idx = Array(value=[0, 1], dtype=int) def _plot_sampling(self, i, n_col, sampling_type, p=p, ylim=None, xlim=None): '''Construct a spirrid object, run the calculation plot the mu_q / e curve and save it in the subdirectory. ''' s = self.s s.sampling_type = sampling_type plot_idx = self.plot_sampling_idx qname = self.get_qname() # get n randomly selected realizations from the sampling theta = s.sampling.get_samples(500) tvar_x = s.tvar_lst[plot_idx[0]] tvar_y = s.tvar_lst[plot_idx[1]] min_x, max_x, d_x = s.sampling.get_theta_range(tvar_x) min_y, max_y, d_y = s.sampling.get_theta_range(tvar_y) # for vectorized execution add a dimension for control variable theta_args = [ t[:, np.newaxis] for t in theta] q_arr = s.q(self.e_arr[None, :], *theta_args) if self.plot_mode == 'figures': f = p.figure(figsize=(7., 6.)) f.subplots_adjust(left=0.15, right=0.97, bottom=0.15, top=0.92) if self.plot_mode == 'subplots': if i == 0: f = p.figure() p.subplot('2%i%i' % (n_col, (i + 1))) p.plot(theta[plot_idx[0]], theta[plot_idx[1]], 'o', color='grey') p.xlabel('$\lambda$') p.ylabel('$\\xi$') p.xlim(min_x, max_x) p.ylim(min_y, max_y) p.title(s.sampling_type) if self.save_output: fname = os.path.join(self.fig_output_dir, qname + '_sampling_' + s.sampling_type + '.png') p.savefig(fname, dpi=self.dpi) if self.plot_mode == 'figures': f = p.figure(figsize=(7., 5)) f.subplots_adjust(left=0.15, right=0.97, bottom=0.18, top=0.91) elif self.plot_mode == 'subplots': p.subplot('2%i%i' % (n_col, (i + 5))) p.plot(self.e_arr, q_arr.T, color='grey') if len(self.exact_arr) > 0: p.plot(self.e_arr, self.exact_arr, label='exact solution', color='black', linestyle='--', linewidth=2) # numerically obtained result p.plot(self.e_arr, s.mu_q_arr, label='numerical integration', linewidth=3, color='black') p.title(s.sampling_type) p.xlabel('$\\varepsilon$ [-]') p.ylabel(r'$q(\varepsilon;\, \lambda,\, \xi)$') if ylim: p.ylim(0.0, ylim) if xlim: p.xlim(0.0, xlim) p.xticks(position=(0, -.015)) p.legend(loc=2) if self.save_output: fname = os.path.join(self.fig_output_dir, qname + '_' + s.sampling_type + '.png') p.savefig(fname, dpi=self.dpi) sampling_lst = List(['TGrid', 'PGrid', 'MCS', 'LHS']) sampling_structure_btn = Button(label='compare sampling structure') @on_trait_change('sampling_structure_btn') def sampling_structure(self, **kw): '''Plot the response into the file in the fig subdirectory. ''' if self.plot_mode == 'subplots': p.rcdefaults() else: fsize = 28 p.rcParams['font.size'] = fsize rc('legend', fontsize=fsize - 8) rc('axes', titlesize=fsize) rc('axes', labelsize=fsize + 6) rc('xtick', labelsize=fsize - 8) rc('ytick', labelsize=fsize - 8) rc('xtick.major', pad=8) for i, s in enumerate(self.sampling_lst): self._plot_sampling(i, len(self.sampling_lst), sampling_type=s, **kw) if self.show_output: p.show() n_int_range = Array() #=========================================================================== # Output file names for sampling efficiency #=========================================================================== fname_sampling_efficiency_time_nint = Property def _get_fname_sampling_efficiency_time_nint(self): return self.get_qname() + '_' + '%s' % self.hostname + '_time_nint' + '.png' fname_sampling_efficiency_error_nint = Property def _get_fname_sampling_efficiency_error_nint(self): return self.get_qname() + '_' + '%s' % self.hostname + '_error_nint' + '.png' fname_sampling_efficiency_error_time = Property def _get_fname_sampling_efficiency_error_time(self): return self.get_qname() + '_' + '%s' % self.hostname + '_error_time' + '.png' fnames_sampling_efficiency = Property def _get_fnames_sampling_efficiency(self): fnames = [self.fname_sampling_efficiency_time_nint] if len(self.exact_arr) > 0: fnames += [self.fname_sampling_efficiency_error_nint, self.fname_sampling_efficiency_error_time ] return fnames #=========================================================================== # Run sampling efficiency studies #=========================================================================== sampling_types = Trait('all (TGrid, PGrid, LHS and MCS)', {'all (TGrid, PGrid, LHS and MCS)' : np.array(['TGrid', 'PGrid', 'MCS', 'LHS'], dtype=str), 'TGrid' : np.array(['TGrid'], dtype=str), 'PGrid' : np.array(['PGrid'], dtype=str), 'MCS' : np.array(['MCS'], dtype=str), 'LHS' : np.array(['LHS'], dtype=str)}) exec_time_lst = Trait('total time', {'total time' : 'total time', 'data setup' : 'data setup', 'method setup' : 'method setup', 'exec time' : 'exec time'}) exec_time_dict = Property(Dict) def _get_exec_time_dict(self): return {'total time' : np.sum(self.s.exec_time), 'data setup' : self.s.exec_time[0], 'method setup' : self.s.exec_time[1], 'exec time' : self.s.exec_time[2]} regression = Bool(True) sampling_efficiency_btn = Button(label='compare sampling efficiency') @on_trait_change('sampling_efficiency_btn') def sampling_efficiency(self): ''' Run the code for all available sampling types. Plot the results. ''' def run_estimation(n_int, sampling_type): # instantiate spirrid with samplingetization methods print 'running', sampling_type, n_int self.s.set(n_int=n_int, sampling_type=sampling_type) self.s.recalc = True n_sim = self.s.sampling.n_sim exec_time = self.exec_time_dict[self.exec_time_lst] return self.s.mu_q_arr, exec_time, n_sim # vectorize the estimation to accept arrays run_estimation_vct = np.vectorize(run_estimation, [object, float, int]) #=========================================================================== # Generate the inspected domain of input parameters using broadcasting #=========================================================================== run_estimation_vct([5], ['PGrid']) sampling_types = self.sampling_types_ sampling_colors = np.array(['grey', 'black', 'grey', 'black'], dtype=str) # 'blue', 'green', 'red', 'magenta' sampling_linestyle = np.array(['--', '--', '-', '-'], dtype=str) # run the estimation on all combinations of n_int and sampling_types mu_q, exec_time, n_sim_range = run_estimation_vct(self.n_int_range[:, None], sampling_types[None, :]) p.rcdefaults() f = p.figure(figsize=(12, 6)) f.subplots_adjust(left=0.06, right=0.94) #=========================================================================== # Plot the results #=========================================================================== p.subplot(1, 2, 1) p.title('response for %d $n_\mathrm{sim}$' % n_sim_range[-1, -1]) for i, (sampling, color, linestyle) in enumerate(zip(sampling_types, sampling_colors, sampling_linestyle)): p.plot(self.e_arr, mu_q[-1, i], color=color, label=sampling, linestyle=linestyle) if len(self.exact_arr) > 0: p.plot(self.e_arr, self.exact_arr, color='black', label='Exact solution') p.legend(loc=1) p.xlabel('e', fontsize=18) p.ylabel('q', fontsize=18) # @todo: get n_sim - x-axis p.subplot(1, 2, 2) for i, (sampling, color, linestyle) in enumerate(zip(sampling_types, sampling_colors, sampling_linestyle)): p.plot(n_sim_range[:, i], exec_time[:, i], color=color, label=sampling, linestyle=linestyle) p.legend(loc=2) p.xlabel('$n_\mathrm{sim}$', fontsize=18) p.ylabel('$t$ [s]', fontsize=18) if self.save_output: basename = self.fname_sampling_efficiency_time_nint fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) #=========================================================================== # Evaluate the error #=========================================================================== if len(self.exact_arr) > 0: er = ErrorEval(exact_arr=self.exact_arr) def eval_error(mu_q, error_measure): return error_measure(mu_q) eval_error_vct = np.vectorize(eval_error) error_measures = np.array([er.eval_error_max, er.eval_error_energy, er.eval_error_rms ]) error_table = eval_error_vct(mu_q[:, :, None], error_measures[None, None, :]) f = p.figure(figsize=(14, 6)) f.subplots_adjust(left=0.07, right=0.97, wspace=0.26) p.subplot(1, 2, 1) p.title('max rel. lack of fit') for i, (sampling, color, linestyle) in enumerate(zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(n_sim_range[:, i], error_table[:, i, 0], color=color, label=sampling, linestyle=linestyle) if self.regression: # Linear regression using stats.linregress (a_s, b_s, r, tt, stderr) = stats.linregress(np.log(n_sim_range[:, i]), np.log(error_table[:, i, 0])) print('Linear regression using stats.linregress') print('%s regression: a=%.2f b=%.2f, std error= %.3f => %f' % (sampling, a_s, b_s, stderr, 1. / a_s)) err_reg = polyval([a_s, b_s], np.log(n_sim_range[:, i])) x = n_sim_range[:, i] y = np.exp(err_reg) p.loglog(x, y, 'r-') if str(a_s) != 'nan': p.text(x[x.shape[0] / 2.], y[y.shape[0] / 2.], '%.3f' % (1. / a_s), color='red') # p.ylim( 0, 10 ) p.legend() p.xlabel('$n_\mathrm{sim}$', fontsize=18) p.ylabel('$\mathrm{e}_{\max}$ [-]', fontsize=18) p.subplot(1, 2, 2) p.title('rel. root mean square error') for i, (sampling, color, linestyle) in enumerate(zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(n_sim_range[:, i], error_table[:, i, 2], color=color, label=sampling, linestyle=linestyle) p.legend() p.xlabel('$n_{\mathrm{sim}}$', fontsize=18) p.ylabel('$\mathrm{e}_{\mathrm{rms}}$ [-]', fontsize=18) if self.save_output: basename = self.fname_sampling_efficiency_error_nint fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) f = p.figure(figsize=(14, 6)) f.subplots_adjust(left=0.07, right=0.97, wspace=0.26) p.subplot(1, 2, 1) p.title('rel. max lack of fit') for i, (sampling, color, linestyle) in enumerate(zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(exec_time[:, i], error_table[:, i, 0], color=color, label=sampling, linestyle=linestyle) p.legend() p.xlabel('time [s]', fontsize=18) p.ylabel('$\mathrm{e}_{\max}$ [-]', fontsize=18) p.subplot(1, 2, 2) p.title('rel. root mean square error') for i, (sampling, color, linestyle) in enumerate(zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(exec_time[:, i], error_table[:, i, 2], color=color, label=sampling, linestyle=linestyle) p.legend() p.xlabel('time [s]', fontsize=18) p.ylabel('$\mathrm{e}_{\mathrm{rms}}$ [-]', fontsize=18) if self.save_output: basename = self.fname_sampling_efficiency_error_time fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) if self.show_output: p.show() #=========================================================================== # Efficiency of numpy versus C code #=========================================================================== run_lst_detailed_config = Property(List) def _get_run_lst_detailed_config(self): run_lst = [] if hasattr(self.q, 'weave_code'): run_lst += [ # ('weave', # {'cached_dG' : True, # 'compiled_eps_loop' : True }, # 'go-', # '$\mathsf{C}_{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec', # ), # ('weave', # {'cached_dG' : True, # 'compiled_eps_loop' : False }, # 'r-2', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec' # ), # ('weave', # {'cached_dG' : False, # 'compiled_eps_loop' : True }, # 'r-2', # '$\mathsf{C}_{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\}\,\} $ - %4.2f sec' # ), ('weave', {'cached_dG' : False, 'compiled_eps_loop' : False }, 'bx-', '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', ) ] if hasattr(self.q, 'cython_code'): run_lst += [ # ('cython', # {'cached_dG' : True, # 'compiled_eps_loop' : True }, # 'go-', # '$\mathsf{Cython}_{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec', # ), # ('cython', # {'cached_dG' : True, # 'compiled_eps_loop' : False }, # 'r-2', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec' # ), # ('cython', # {'cached_dG' : False, # 'compiled_eps_loop' : True }, # 'r-2', # '$\mathsf{Cython}_{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\}\,\} $ - %4.2f sec' # ), # ('cython', # {'cached_dG' : False, # 'compiled_eps_loop' : False }, # 'bx-', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', # ) ] if hasattr(self.q, '__call__'): run_lst += [ # ('numpy', # {}, # 'y--', # '$\mathsf{Python}_{\\varepsilon} \{\, \mathsf{Numpy}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\} \,\} $ - %4.2f sec' # ) ] return run_lst # number of recalculations to get new time. n_recalc = Int(2) def codegen_efficiency(self): # define a tables with the run configurations to start in a batch basenames = [] qname = self.get_qname() s = self.s legend = [] legend_lst = [] time_lst = [] p.figure() for idx, run in enumerate(self.run_lst_detailed_config): code, run_options, plot_options, legend_string = run s.codegen_type = code s.codegen.set(**run_options) s.recalc = True print 'run', idx, run_options for i in range(self.n_recalc): s.recalc = True # automatically proagated within spirrid print 'execution time', self.exec_time_dict['total time'] p.plot(s.evar_lst[0], s.mu_q_arr, plot_options) # @todo: this is not portable!! # legend.append(legend_string % s.exec_time) # legend_lst.append(legend_string[:-12]) time_lst.append(self.exec_time) p.xlabel('strain [-]') p.ylabel('stress') # p.legend(legend, loc = 2) p.title(qname) if self.save_output: print 'saving codegen_efficiency' basename = qname + '_' + 'codegen_efficiency' + '.png' basenames.append(basename) fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) self._bar_plot(legend_lst, time_lst) p.title('%s' % s.sampling_type) if self.save_output: basename = qname + '_' + 'codegen_efficiency_%s' % s.sampling_type + '.png' basenames.append(basename) fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) if self.show_output: p.show() return basenames #=========================================================================== # Efficiency of numpy versus C code #=========================================================================== run_lst_language_config = Property(List) def _get_run_lst_language_config(self): run_lst = [] if hasattr(self.q, 'weave_code'): run_lst += [ ('weave', {'cached_dG' : False, 'compiled_eps_loop' : False }, 'bx-', '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', )] # if hasattr(self.q, 'cython_code'): # run_lst += [ # ('cython', # {'cached_dG' : False, # 'compiled_eps_loop' : False }, # 'bx-', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', # )] if hasattr(self.q, '__call__'): run_lst += [ ('numpy', {}, 'y--', '$\mathsf{Python}_{\\varepsilon} \{\, \mathsf{Numpy}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\} \,\} $ - %4.2f sec' )] return run_lst extra_compiler_args = Bool(True) le_sampling_lst = List(['LHS', 'PGrid']) le_n_int_lst = List([440, 5000]) #=========================================================================== # Output file names for language efficiency #=========================================================================== fnames_language_efficiency = Property def _get_fnames_language_efficiency(self): return ['%s_codegen_efficiency_%s_extra_%s.png' % (self.qname, self.hostname, extra) for extra in [self.extra_compiler_args]] language_efficiency_btn = Button(label='compare language efficiency') @on_trait_change('language_efficiency_btn') def codegen_language_efficiency(self): # define a tables with the run configurations to start in a batch # pyxbld_dir = os.path.join(HOME_DIR, '.pyxbld') # if os.path.exists(pyxbld_dir): # shutil.rmtree(pyxbld_dir) if os.path.exists(PYTHON_COMPILED_DIR): shutil.rmtree(PYTHON_COMPILED_DIR) for extra, fname in zip([self.extra_compiler_args], self.fnames_language_efficiency): print 'extra compilation args:', extra legend_lst = [] error_lst = [] n_sim_lst = [] exec_times_sampling = [] meth_lst = zip(self.le_sampling_lst, self.le_n_int_lst) for item, n_int in meth_lst: print 'sampling method:', item s = self.s self.exec_time_dict['total time'] # eliminate first load time delay (first column) s.n_int = n_int s.sampling_type = item exec_times_lang = [] for idx, run in enumerate(self.run_lst_language_config): code, run_options, plot_options, legend_string = run # os.system('rm -fr ~/.python27_compiled') s.codegen_type = code s.codegen.set(**run_options) if s.codegen_type == 'weave': s.codegen.set(**dict(use_extra=extra)) print 'run', idx, run_options exec_times_run = [] for i in range(self.n_recalc): s.recalc = True # automatically propagated exec_times = [self.exec_time_dict['data setup'], self.exec_time_dict['method setup'], self.exec_time_dict['exec time']] exec_times_run.append(exec_times) print 'execution time', self.exec_time_dict['total time'] # legend_lst.append(legend_string[:-12]) legend_lst = [dict(weave='weave', cython='cython', numpy='numpy')[x[0]] for x in self.run_lst_language_config] if s.codegen_type == 'weave': # load weave.inline time from tmp file and fix values in time_arr # @todo - does not work on windows import tempfile tdir = tempfile.gettempdir() f = open(os.path.join(tdir, 'w_time'), 'r') value_t = float(f.read()) f.close() exec_times_run[0][1] = value_t exec_times_run[0][2] -= value_t exec_times_lang.append(exec_times_run) else: exec_times_lang.append(exec_times_run) print 'legend_lst', legend_lst n_sim_lst.append(s.sampling.n_sim) exec_times_sampling.append(exec_times_lang) #=========================================================================== # Evaluate the error #=========================================================================== if len(self.exact_arr) > 0: er = ErrorEval(exact_arr=self.exact_arr) error_lst.append((er.eval_error_rms(s.mu_q_arr), er.eval_error_max(s.mu_q_arr))) times_arr = np.array(exec_times_sampling, dtype='d') self._multi_bar_plot(meth_lst, legend_lst, times_arr, error_lst, n_sim_lst) if self.save_output: fname_path = os.path.join(self.fig_output_dir, fname) p.savefig(fname_path, dpi=self.dpi) if self.show_output: p.show() def combination_efficiency(self, theta_vars_det, theta_vars_rand): ''' Run the code for all available random parameter combinations. Plot the results. ''' qname = self.get_qname() s = self.s s.set(sampling_type='TGrid') # list of all combinations of response function parameters rv_comb_lst = list(powerset(s.theta_vars.keys())) p.figure() exec_time_lst = [] for id, rv_comb in enumerate(rv_comb_lst[163:219]): # [1:-1] s.theta_vars = theta_vars_det print 'Combination', rv_comb for rv in rv_comb: s.theta_vars[rv] = theta_vars_rand[rv] # legend = [] # p.figure() time_lst = [] for idx, run in enumerate(self.run_lst): code, run_options, plot_options, legend_string = run print 'run', idx, run_options s.codegen_type = code s.codegen.set(**run_options) # p.plot(s.evar_lst[0], s.mu_q_arr, plot_options) # print 'integral of the pdf theta', s.eval_i_dG_grid() print 'execution time', self.exec_time time_lst.append(self.exec_time) # legend.append(legend_string % s.exec_time) exec_time_lst.append(time_lst) p.plot(np.array((1, 2, 3, 4)), np.array(exec_time_lst).T) p.xlabel('method') p.ylabel('time') if self.save_output: print 'saving codegen_efficiency' fname = os.path.join(self.fig_output_dir, qname + '_' + 'combination_efficiency' + '.png') p.savefig(fname, dpi=self.dpi) if self.show_output: p.title(s.q.title) p.show() def _bar_plot(self, legend_lst, time_lst): rc('font', size=15) # rc('font', family = 'serif', style = 'normal', variant = 'normal', stretch = 'normal', size = 15) fig = p.figure(figsize=(10, 5)) n_tests = len(time_lst) times = np.array(time_lst) x_norm = times[1] xmax = times.max() rel_xmax = xmax / x_norm rel_times = times / x_norm m = int(rel_xmax % 10) if m < 5: x_max_plt = int(rel_xmax) - m + 10 else: x_max_plt = int(rel_xmax) - m + 15 ax1 = fig.add_subplot(111) p.subplots_adjust(left=0.45, right=0.88) # fig.canvas.set_window_title('window title') pos = np.arange(n_tests) + 0.5 rects = ax1.barh(pos, rel_times, align='center', height=0.5, color='w', edgecolor='k') ax1.set_xlabel('normalized execution time [-]') ax1.axis([0, x_max_plt, 0, n_tests]) ax1.set_yticks(pos) ax1.set_yticklabels(legend_lst) for rect, t in zip(rects, rel_times): width = rect.get_width() xloc = width + (0.03 * rel_xmax) clr = 'black' align = 'left' yloc = rect.get_y() + rect.get_height() / 2.0 ax1.text(xloc, yloc, '%4.2f' % t, horizontalalignment=align, verticalalignment='center', color=clr) # , weight = 'bold') ax2 = ax1.twinx() ax1.plot([1, 1], [0, n_tests], 'k--') ax2.set_yticks([0] + list(pos) + [n_tests]) ax2.set_yticklabels([''] + ['%4.2f s' % s for s in list(times)] + ['']) ax2.set_xticks([0, 1] + range(5 , x_max_plt + 1, 5)) ax2.set_xticklabels(['%i' % s for s in ([0, 1] + range(5 , x_max_plt + 1, 5))]) def _multi_bar_plot(self, title_lst, legend_lst, time_arr, error_lst, n_sim_lst): '''Plot the results if the code efficiency. ''' p.rcdefaults() fsize = 14 fig = p.figure(figsize=(15, 3)) rc('font', size=fsize) rc('legend', fontsize=fsize - 2) # legend_lst = ['weave', 'cython', 'numpy'] # times are stored in 3d array - dimensions are: n_sampling, n_lang, n_run, n_times = time_arr.shape print 'arr', time_arr.shape times_sum = np.sum(time_arr, axis=n_times) p.subplots_adjust(left=0.1, right=0.95, wspace=0.1, bottom=0.15, top=0.8) for meth_i in range(n_sampling): ax1 = fig.add_subplot(1, n_sampling, meth_i + 1) ax1.set_xlabel('execution time [s]') ytick_pos = np.arange(n_lang) + 1 # ax1.axis([0, x_max_plt, 0, n_lang]) # todo: **2 n_vars if len(self.exact_arr) > 0: ax1.set_title('%s: $ n_\mathrm{sim} = %s, \mathrm{e}_\mathrm{rms}=%s, \mathrm{e}_\mathrm{max}=%s$' % (title_lst[meth_i][0], self._formatSciNotation('%.2e' % n_sim_lst[meth_i]), self._formatSciNotation('%.2e' % error_lst[meth_i][0]), self._formatSciNotation('%.2e' % error_lst[meth_i][1]))) else: ax1.set_title('%s: $ n_\mathrm{sim} = %s$' % (title_lst[meth_i][0], self._formatSciNotation('%.2e' % n_sim_lst[meth_i]))) ax1.set_yticks(ytick_pos) if meth_i == 0: ax1.set_yticklabels(legend_lst, fontsize=fsize + 2) else: ax1.set_yticklabels([]) ax1.set_xlim(0, 1.2 * np.max(times_sum[meth_i])) distance = 0.2 height = 1.0 / n_run - distance offset = height / 2.0 colors = ['w', 'w', 'w', 'r', 'y', 'b', 'g', 'm' ] hatches = [ '/', '\\', 'x', '-', '+', '|', 'o', 'O', '.', '*' ] label_lst = ['sampling', 'compilation', 'integration'] for i in range(n_run): pos = np.arange(n_lang) + 1 - offset + i * height end_bar_pos = np.zeros((n_lang,), dtype='d') for j in range(n_times): if i > 0: label = label_lst[j] else: label = None bar_lengths = time_arr[meth_i, :, i, j] rects = ax1.barh(pos, bar_lengths , align='center', height=height, left=end_bar_pos, color=colors[j], edgecolor='k', hatch=hatches[j], label=label) end_bar_pos += bar_lengths for k in range(n_lang): x_val = times_sum[meth_i, k, i] + 0.01 * np.max(times_sum[meth_i]) ax1.text(x_val, pos[k], '$%4.2f\,$s' % x_val, horizontalalignment='left', verticalalignment='center', color='black') # , weight = 'bold') if meth_i == 0: ax1.text(0.02 * np.max(times_sum[0]), pos[k], '$%i.$' % (i + 1), horizontalalignment='left', verticalalignment='center', color='black', bbox=dict(pad=0., ec="w", fc="w")) p.legend(loc=0) def _formatSciNotation(self, s): # transform 1e+004 into 1e4, for example tup = s.split('e') try: significand = tup[0].rstrip('0').rstrip('.') sign = tup[1][0].replace('+', '') exponent = tup[1][1:].lstrip('0') if significand == '1': # reformat 1x10^y as 10^y significand = '' if exponent: exponent = '10^{%s%s}' % (sign, exponent) if significand and exponent: return r'%s{\cdot}%s' % (significand, exponent) else: return r'%s%s' % (significand, exponent) except IndexError, msg: return s