class CodeGen(HasStrictTraits): ''' Base class for generators of the integration code. The code may be scripted or compiled depending on the choice of the class. ''' # backward link to the spirrid object spirrid = WeakRef recalc = Event #=========================================================================== # Consfiguration of the algorithm #=========================================================================== implicit_var_eval = Bool(False, codegen_option=True) #=========================================================================== # Propagate the change to the spirrid #=========================================================================== @on_trait_change('+codegen_option') def set_codegen_option_changed(self): self.spirrid.codegen_option = True
class CompositeCrackBridgeGf(HasTraits): reinforcement_lst = List(Instance(Reinforcement)) w = Float E_m = Float Ll = Float Lr = Float ft = Float Gf = Float(1.0) w_unld = Float(0.0) damage_switch = Bool(True) @on_trait_change('damage_switch') def switch_damage(self): '''freezes the loading history''' self.cont_fibers_instance.damage_switch = self.damage_switch self.short_fibers_instance.damage_switch = self.damage_switch epsm_softening = Property(depends_on='w,ft,Gf,E_m') @cached_property def _get_epsm_softening(self): if self.w >= self.w_unld: if self.damage_switch == True: self.w_unld += self.w - self.w_unld return self.ft * np.exp(-self.ft / self.Gf * self.w) / self.E_m else: return self.w / self.w_unld * self.ft * np.exp( -self.ft / self.Gf * self.w_unld) / self.E_m V_f_tot = Property(depends_on='reinforcement_lst+') @cached_property def _get_V_f_tot(self): V_f_tot = 0.0 for reinf in self.reinforcement_lst: V_f_tot += reinf.V_f return V_f_tot E_c = Property(depends_on='reinforcement_lst+') @cached_property def _get_E_c(self): E_fibers = 0.0 for reinf in self.reinforcement_lst: E_fibers += reinf.V_f * reinf.E_f E_c = self.E_m * (1. - self.V_f_tot) + E_fibers return E_c * (1. + 1e-15) sorted_reinf_lst = Property(Tuple(List, List), depends_on='reinforcement_lst') @cached_property def _get_sorted_reinf_lst(self): cont_reinf_lst = [] short_reinf_lst = [] for reinf in self.reinforcement_lst: if reinf.__class__ == ContinuousFibers: cont_reinf_lst.append(reinf) elif reinf.__class__ == ShortFibers: short_reinf_lst.append(reinf) return cont_reinf_lst, short_reinf_lst cont_fibers_instance = Instance(CrackBridgeContFibersGf) def _cont_fibers_instance_default(self): return CrackBridgeContFibersGf() cont_fibers = Property(Instance(CrackBridgeContFibersGf), depends_on='reinforcement_lst+,Ll,Lr,E_m,w') @cached_property def _get_cont_fibers(self): cbcf = self.cont_fibers_instance cbcf.w = self.w cbcf.Ll = self.Ll cbcf.Lr = self.Lr cbcf.E_m = self.E_m cbcf.E_c = self.E_c cbcf.w_unld = self.w_unld cbcf.cont_reinf_lst = self.sorted_reinf_lst[0] cbcf.epsm_softening = self.epsm_softening #print self.w_unld, self.w, self.epsm_softening, self.ft, self.Gf return cbcf short_fibers_instance = Instance(CrackBridgeShortFibersGf) def _short_fibers_instance_default(self): return CrackBridgeShortFibersGf() short_fibers = Property(Instance(CrackBridgeShortFibersGf), depends_on='reinforcement_lst+,E_m,w') @cached_property def _get_short_fibers(self): cbsf = self.short_fibers_instance cbsf.w = self.w cbsf.E_m = self.E_m cbsf.E_c = self.E_c cbsf.short_reinf_lst = self.sorted_reinf_lst[1] cbsf.epsm_softening = self.epsm_softening return cbsf _x_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get__x_arr(self): if len(self.sorted_reinf_lst[0]) != 0 and len( self.sorted_reinf_lst[1]) != 0: added_x = np.hstack( (self.cont_fibers.x_arr, self.short_fibers.x_arr)) sorted_unique_x = np.unique(added_x) return sorted_unique_x elif len(self.sorted_reinf_lst[0]) != 0: return self.cont_fibers.x_arr elif len(self.sorted_reinf_lst[1]) != 0: return self.short_fibers.x_arr _epsm_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get__epsm_arr(self): if len(self.sorted_reinf_lst[0]) != 0 and len( self.sorted_reinf_lst[1]) != 0: epsm_cont_interp = MFnLineArray(xdata=self.cont_fibers.x_arr, ydata=self.cont_fibers.epsm_arr) epsm_short_interp = MFnLineArray(xdata=self.short_fibers.x_arr, ydata=self.short_fibers.epsm_arr) added_epsm_cont = self.cont_fibers.epsm_arr + epsm_short_interp.get_values( self.cont_fibers.x_arr) added_epsm_short = self.short_fibers.epsm_arr + epsm_cont_interp.get_values( self.short_fibers.x_arr) sorted_unique_idx = np.unique(np.hstack( (self.cont_fibers.x_arr, self.short_fibers.x_arr)), return_index=True)[1] return np.hstack( (added_epsm_cont, added_epsm_short))[sorted_unique_idx] elif len(self.sorted_reinf_lst[0]) != 0: return self.cont_fibers.epsm_arr elif len(self.sorted_reinf_lst[1]) != 0: self.short_fibers.w = self.w return self.short_fibers.epsm_arr _epsf_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get__epsf_arr(self): ''' only for continuous reinforcement ''' if len(self.sorted_reinf_lst[0]) != 0 and len( self.sorted_reinf_lst[1]) == 0: self.cont_fibers.w = self.w return self.cont_fibers.epsf_arr else: raise ValueError('epsf can only be computed for continuous fibers') _epsf0_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get__epsf0_arr(self): if len(self.sorted_reinf_lst[0]) != 0 and len( self.sorted_reinf_lst[1]) != 0: epsf0_cont = self.cont_fibers.epsf0_arr epsf0_short = self.short_fibers.epsf0_arr elif len(self.sorted_reinf_lst[0]) != 0: epsf0_cont = self.cont_fibers.epsf0_arr epsf0_short = np.array([]) elif len(self.sorted_reinf_lst[1]) != 0: epsf0_cont = np.array([]) epsf0_short = self.short_fibers.epsf0_arr return epsf0_cont, epsf0_short _epsf0_arr_cont = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get__epsf0_arr_cont(self): return self._epsf0_arr[0] _epsf0_arr_short = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get__epsf0_arr_short(self): return self._epsf0_arr[1] sigma_c = Property(depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get_sigma_c(self): if len(self.sorted_reinf_lst[0]) != 0 and len( self.sorted_reinf_lst[1]) != 0: sigma_c_cont = np.sum( self._epsf0_arr_cont * self.cont_fibers.sorted_stats_weights * self.cont_fibers.sorted_V_f * self.cont_fibers.sorted_nu_r * self.cont_fibers.sorted_E_f * (1. - self.cont_fibers.damage)) sigma_c_short = np.sum(self._epsf0_arr_short * self.short_fibers.sorted_V_f * self.short_fibers.sorted_E_f) elif len(self.sorted_reinf_lst[0]) != 0: sigma_c_cont = np.sum( self._epsf0_arr_cont * self.cont_fibers.sorted_stats_weights * self.cont_fibers.sorted_V_f * self.cont_fibers.sorted_nu_r * self.cont_fibers.sorted_E_f * (1. - self.cont_fibers.damage)) sigma_c_short = 0.0 elif len(self.sorted_reinf_lst[1]) != 0: sigma_c_cont = 0.0 sigma_c_short = np.sum(self._epsf0_arr_short * self.short_fibers.sorted_V_f * self.short_fibers.sorted_E_f) return sigma_c_cont + sigma_c_short + self.epsm_softening * self.E_m * ( 1. - self.V_f_tot) secant_K = Property(depends_on='w,E_m,Ll,Lr,reinforcement_lst+') @cached_property def _get_secant_K(self): ''' secant stiffness at given w ''' if len(self.sorted_reinf_lst[0]) != 0 and len( self.sorted_reinf_lst[1]) == 0: self.cont_fibers.w = self.w ef0, a_short, a_long, em, a = self.cont_fibers.profile( self.cont_fibers.damage) K_cont = np.sum( self.cont_fibers.sorted_stats_weights * self.cont_fibers.sorted_V_f * self.cont_fibers.sorted_nu_r * self.cont_fibers.sorted_E_f * (1. - self.cont_fibers.damage) / (a_short + a_long)) return K_cont else: raise ValueError( 'secant stiffness not yet implemented for short fibers')
class SimBT4PT(IBVModel): '''Simulation: four point bending test. ''' input_change = Event @on_trait_change('+input,ccs_unit_cell.input_change') def _set_input_change(self): self.input_change = True implements(ISimModel) #----------------- # discretization: #----------------- # specify weather the elastomer is to be modeled or if the load is # introduced as line load # elstmr_flag = Bool(True) # discretization in x-direction (longitudinal): # outer_zone_shape_x = Int(6, input=True, ps_levels=(4, 12, 3)) # discretization in x-direction (longitudinal): # load_zone_shape_x = Int(2, input=True, ps_levels=(1, 4, 1)) # middle part discretization in x-direction (longitudinal): # mid_zone_shape_x = Int(3, input=True, ps_levels=(1, 4, 1)) # discretization in y-direction (width): # shape_y = Int(2, input=True, ps_levels=(1, 4, 2)) # discretization in z-direction: # shape_z = Int(2, input=True, ps_levels=(1, 3, 3)) #----------------- # geometry: #----------------- # # edge length of the bending specimen (beam) (entire length without symmetry) # length = Float(1.50, input=True) elstmr_length = Float(0.05, input=True) mid_zone_length = Float(0.50, input=True) elstmr_thickness = Float(0.005, input=True) width = Float(0.20, input=True) thickness = Float(0.06, input=True) #----------------- # derived geometric parameters #----------------- # # half the length of the elastomer (load introduction # with included symmetry) # sym_specmn_length = Property def _get_sym_specmn_length(self): return self.length / 2. sym_mid_zone_specmn_length = Property def _get_sym_mid_zone_specmn_length(self): return self.mid_zone_length / 2. # half the length of the elastomer (load introduction # with included symmetry # sym_elstmr_length = Property def _get_sym_elstmr_length(self): return self.elstmr_length / 2. # half the specimen width # sym_width = Property def _get_sym_width(self): return self.width / 2. #---------------------------------------------------------------------------------- # mats_eval #---------------------------------------------------------------------------------- # age of the plate at the time of testing # NOTE: that the same phi-function is used independent of age. This assumes a # an afine/proportional damage evolution for different ages. # age = Int(28, input=True) # time stepping params # tstep = Float(0.05, auto_set=False, enter_set=True, input=True) tmax = Float(1.0, auto_set=False, enter_set=True, input=True) tolerance = Float(0.001, auto_set=False, enter_set=True, input=True) # specify type of 'linalg.norm' # default value 'None' sets norm to 2-norm, # i.e "norm = sqrt(sum(x_i**2)) # # set 'ord=np.inf' to switch norm to # "norm = max(abs(x_i))" # ord = Enum(np.inf, None) n_mp = Int(30, input=True) # @todo: for mats_eval the information of the unit cell should be used # in order to use the same number of microplanes and model version etc... # specmn_mats = Property(Instance(MATS2D5MicroplaneDamage), depends_on='input_change') @cached_property def _get_specmn_mats(self): return MATS2D5MicroplaneDamage( E=self.E_c, # E=self.E_m, nu=self.nu, # corresponding to settings in "MatsCalib" n_mp=self.n_mp, symmetrization='sum-type', model_version='compliance', phi_fn=self.phi_fn) if elstmr_flag: elstmr_mats = Property(Instance(MATS3DElastic), depends_on='input_change') @cached_property def _get_elstmr_mats(self): # specify a small elastomer stiffness (approximation) E_elast = self.E_c / 10. print 'effective elastomer E_modulus', E_elast return MATS3DElastic(E=E_elast, nu=0.4) #----------------- # fets: #----------------- # specify element shrink factor in plot of fe-model # vtk_r = Float(0.95) # use quadratic serendipity elements # specmn_fets = Property(Instance(FETSEval), depends_on='input_change') @cached_property def _get_specmn_fets(self): fets = FETS2D58H20U(mats_eval=self.specmn_mats) fets.vtk_r *= self.vtk_r return fets # use quadratic serendipity elements # elstmr_fets = Property(Instance(FETSEval), depends_on='input_change') @cached_property def _get_elstmr_fets(self): fets = FETS2D58H20U(mats_eval=self.elstmr_mats) fets.vtk_r *= self.vtk_r return fets fe_domain = Property(depends_on='+ps_levels, +input') @cached_property def _get_fe_domain(self): return FEDomain() #=========================================================================== # fe level #=========================================================================== outer_zone_specmn_fe_level = Property(depends_on='+ps_levels, +input') def _get_outer_zone_specmn_fe_level(self): return FERefinementGrid(name='outer zone specimen patch', fets_eval=self.specmn_fets, domain=self.fe_domain) load_zone_specmn_fe_level = Property(depends_on='+ps_levels, +input') def _get_load_zone_specmn_fe_level(self): return FERefinementGrid(name='load zone specimen patch', fets_eval=self.specmn_fets, domain=self.fe_domain) mid_zone_specmn_fe_level = Property(depends_on='+ps_levels, +input') def _get_mid_zone_specmn_fe_level(self): return FERefinementGrid(name='mid zone specimen patch', fets_eval=self.specmn_fets, domain=self.fe_domain) elstmr_fe_level = Property(depends_on='+ps_levels, +input') def _get_elstmr_fe_level(self): return FERefinementGrid(name='elastomer patch', fets_eval=self.elstmr_fets, domain=self.fe_domain) #=========================================================================== # Grid definition #=========================================================================== mid_zone_specmn_fe_grid = Property(Instance(FEGrid), depends_on='+ps_levels, +input') @cached_property def _get_mid_zone_specmn_fe_grid(self): # only a quarter of the beam is simulated due to symmetry: fe_grid = FEGrid(coord_min=(0., 0., 0.), coord_max=(self.sym_mid_zone_specmn_length - self.sym_elstmr_length, self.sym_width, self.thickness), shape=(self.mid_zone_shape_x, self.shape_y, self.shape_z), level=self.mid_zone_specmn_fe_level, fets_eval=self.specmn_fets) return fe_grid load_zone_specmn_fe_grid = Property(Instance(FEGrid), depends_on='+ps_levels, +input') @cached_property def _get_load_zone_specmn_fe_grid(self): # only a quarter of the beam is simulated due to symmetry: fe_grid = FEGrid(coord_min=(self.sym_mid_zone_specmn_length - self.sym_elstmr_length, 0., 0.), coord_max=(self.sym_mid_zone_specmn_length + self.sym_elstmr_length, self.sym_width, self.thickness), shape=(self.load_zone_shape_x, self.shape_y, self.shape_z), level=self.load_zone_specmn_fe_level, fets_eval=self.specmn_fets) return fe_grid # if elstmr_flag: elstmr_fe_grid = Property(Instance(FEGrid), depends_on='+ps_levels, +input') @cached_property def _get_elstmr_fe_grid(self): fe_grid = FEGrid(coord_min=(self.sym_mid_zone_specmn_length - self.sym_elstmr_length, 0., self.thickness), coord_max=(self.sym_mid_zone_specmn_length + self.sym_elstmr_length, self.sym_width, self.thickness + self.elstmr_thickness), level=self.elstmr_fe_level, shape=(self.load_zone_shape_x, self.shape_y, 1), fets_eval=self.elstmr_fets) return fe_grid outer_zone_specmn_fe_grid = Property(Instance(FEGrid), depends_on='+ps_levels, +input') @cached_property def _get_outer_zone_specmn_fe_grid(self): # only a quarter of the plate is simulated due to symmetry: fe_grid = FEGrid(coord_min=(self.sym_mid_zone_specmn_length + self.sym_elstmr_length, 0., 0.), coord_max=(self.sym_specmn_length, self.sym_width, self.thickness), shape=(self.outer_zone_shape_x, self.shape_y, self.shape_z), level=self.outer_zone_specmn_fe_level, fets_eval=self.specmn_fets) return fe_grid #=========================================================================== # Boundary conditions #=========================================================================== w_max = Float(-0.030, input=True) # [m] w_max = Float(-0.030, input=True) # [m] bc_list = Property(depends_on='+ps_levels, +input') @cached_property def _get_bc_list(self): mid_zone_specimen = self.mid_zone_specmn_fe_grid load_zone_specimen = self.load_zone_specmn_fe_grid outer_zone_specimen = self.outer_zone_specmn_fe_grid if self.elstmr_flag: elastomer = self.elstmr_fe_grid #-------------------------------------------------------------- # boundary conditions for the symmetry #-------------------------------------------------------------- # symmetry in the xz-plane # (Note: the x-axis corresponds to the axis of symmetry along the longitudinal axis of the beam) # bc_outer_zone_symplane_xz = BCSlice(var='u', value=0., dims=[1], slice=outer_zone_specimen[:, 0, :, :, 0, :]) bc_load_zone_symplane_xz = BCSlice(var='u', value=0., dims=[1], slice=load_zone_specimen[:, 0, :, :, 0, :]) bc_mid_zone_symplane_xz = BCSlice(var='u', value=0., dims=[1], slice=mid_zone_specimen[:, 0, :, :, 0, :]) if self.elstmr_flag: bc_el_symplane_xz = BCSlice(var='u', value=0., dims=[1], slice=elastomer[:, 0, :, :, 0, :]) # symmetry in the yz-plane # bc_mid_zone_symplane_yz = BCSlice(var='u', value=0., dims=[0], slice=mid_zone_specimen[0, :, :, 0, :, :]) #-------------------------------------------------------------- # boundary conditions for the support #-------------------------------------------------------------- bc_support_0y0 = BCSlice(var='u', value=0., dims=[2], slice=outer_zone_specimen[-1, :, 0, -1, :, 0]) #-------------------------------------------------------------- # connect all grids #-------------------------------------------------------------- link_loadzn_outerzn = BCDofGroup( var='u', value=0., dims=[0, 1, 2], get_dof_method=load_zone_specimen.get_right_dofs, get_link_dof_method=outer_zone_specimen.get_left_dofs, link_coeffs=[1.]) link_midzn_loadzn = BCDofGroup( var='u', value=0., dims=[0, 1, 2], get_dof_method=mid_zone_specimen.get_right_dofs, get_link_dof_method=load_zone_specimen.get_left_dofs, link_coeffs=[1.]) if self.elstmr_flag: link_elstmr_loadzn_z = BCDofGroup( var='u', value=0., dims=[2], get_dof_method=elastomer.get_back_dofs, get_link_dof_method=load_zone_specimen.get_front_dofs, link_coeffs=[1.]) # hold elastomer in a single point in order to avoid kinematic movement yielding singular K_mtx # bc_elstmr_fix = BCSlice(var='u', value=0., dims=[0], slice=elastomer[0, 0, 0, 0, 0, 0]) #-------------------------------------------------------------- # loading #-------------------------------------------------------------- # w_max = center displacement: w_max = self.w_max if self.elstmr_flag: # apply displacement at all top nodes of the elastomer (surface load) # bc_w = BCSlice(var='u', value=w_max, dims=[2], slice=elastomer[:, :, -1, :, :, -1]) else: bc_w = BCSlice( var='u', value=w_max, dims=[2], # slice is only exactly in the center of the loading zone for 'load_zone_shape_x' = 2 # center line of the load zone slice=load_zone_specimen[0, :, -1, -1, :, -1]) # f_max = 0.010 / 4. / self.sym_width # bc_line_f = BCSlice(var = 'f', value = f_max, dims = [2], # # slice is only valid for 'load_zone_shape_x' = 2 # # center line of the load zone # slice = load_zone_specimen[0, :, -1, -1, :, -1]) bc_list = [ bc_outer_zone_symplane_xz, bc_load_zone_symplane_xz, bc_mid_zone_symplane_xz, bc_mid_zone_symplane_yz, # link_midzn_loadzn, link_loadzn_outerzn, bc_support_0y0, # bc_w, ] if self.elstmr_flag: bc_list += [bc_el_symplane_xz, link_elstmr_loadzn_z, bc_elstmr_fix] return bc_list #---------------------- # tloop #---------------------- tloop = Property(depends_on='input_change') @cached_property def _get_tloop(self): #-------------------------------------------------------------- # ts #-------------------------------------------------------------- mid_zone_spec = self.mid_zone_specmn_fe_grid load_zone_spec = self.load_zone_specmn_fe_grid outer_zone_spec = self.outer_zone_specmn_fe_grid if self.elstmr_flag: # ELSTRMR TOP SURFACE # dofs at elastomer top surface (used to integrate the force) # elastomer = self.elstmr_fe_grid elstmr_top_dofs_z = elastomer[:, :, -1, :, :, -1].dofs[:, :, 2].flatten() load_dofs_z = np.unique(elstmr_top_dofs_z) print 'load_dofs_z', load_dofs_z else: # LINE LOAD TOP OF LOAD ZONE # dofs at center line of the specmn load zone (used to integrate the force) # note slice index in x-direction is only valid for load_zone_shape_x = 2 ! # load_zone_spec_topline_dofs_z = load_zone_spec[ 0, :, -1, -1, :, -1].dofs[:, :, 2].flatten() load_dofs_z = np.unique(load_zone_spec_topline_dofs_z) print 'load_dofs_z', load_dofs_z # SUPPRT LINE # dofs at support line of the specmn (used to integrate the force) # outer_zone_spec_supprtline_dofs_z = outer_zone_spec[ -1, :, 0, -1, :, 0].dofs[:, :, 2].flatten() supprt_dofs_z = np.unique(outer_zone_spec_supprtline_dofs_z) print 'supprt_dofs_z', supprt_dofs_z # CENTER DOF (used for tracing of the displacement) # center_bottom_dof = mid_zone_spec[0, 0, 0, 0, 0, 0].dofs[0, 0, 2] print 'center_bottom_dof', center_bottom_dof # THIRDPOINT DOF (used for tracing of the displacement) # dofs at center middle of the laod zone at the bottom side # # NOTE: slice index in x-direction is only valid for load_zone_shape_x = 2 ! thirdpoint_bottom_dof = load_zone_spec[0, 0, 0, -1, 0, 0].dofs[0, 0, 2] print 'thirdpoint_bottom_dof', thirdpoint_bottom_dof # force-displacement-diagram (CENTER) # self.f_w_diagram_center = RTraceGraph( name='displacement_elasttop (center) - force', var_x='U_k', idx_x=center_bottom_dof, var_y='F_int', idx_y_arr=load_dofs_z, record_on='update', transform_x='-x * 1000', # %g * x' % ( fabs( w_max ),), # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis): # transform_y='-4000. * y') # force-displacement-diagram_supprt (SUPPRT) # self.f_w_diagram_supprt = RTraceGraph( name='displacement_supprtline (center) - force', var_x='U_k', idx_x=center_bottom_dof, var_y='F_int', idx_y_arr=supprt_dofs_z, record_on='update', transform_x='-x * 1000', # %g * x' % ( fabs( w_max ),), # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis): # transform_y='4000. * y') # force-displacement-diagram (THIRDPOINT) # self.f_w_diagram_thirdpoint = RTraceGraph( name='displacement_elasttop (thirdpoint) - force', var_x='U_k', idx_x=thirdpoint_bottom_dof, var_y='F_int', idx_y_arr=load_dofs_z, record_on='update', transform_x='-x * 1000', # %g * x' % ( fabs( w_max ),), # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis): # transform_y='-4000. * y') ts = TS( sdomain=self.fe_domain, bcond_list=self.bc_list, rtrace_list=[ self.f_w_diagram_center, self.f_w_diagram_thirdpoint, self.f_w_diagram_supprt, RTraceDomainListField(name='Displacement', var='u', idx=0, warp=True), # RTraceDomainListField(name = 'Stress' , # var = 'sig_app', idx = 0, warp = True, # record_on = 'update'), # RTraceDomainListField(name = 'Strain' , # var = 'eps_app', idx = 0, warp = True, # record_on = 'update'), # RTraceDomainListField(name = 'Damage' , # var = 'omega_mtx', idx = 0, warp = True, # record_on = 'update'), RTraceDomainListField(name='max_omega_i', warp=True, var='max_omega_i', idx=0, record_on='update'), # RTraceDomainListField(name = 'IStress' , # position = 'int_pnts', # var = 'sig_app', idx = 0, # record_on = 'update'), # RTraceDomainListField(name = 'IStrain' , # position = 'int_pnts', # var = 'eps_app', idx = 0, # record_on = 'update'), ]) # Add the time-loop control tloop = TLoop(tstepper=ts, KMAX=50, tolerance=self.tolerance, RESETMAX=0, tline=TLine(min=0.0, step=self.tstep, max=self.tmax), ord=self.ord) return tloop def peval(self): ''' Evaluate the model and return the array of results specified in the method get_sim_outputs. ''' U = self.tloop.eval() self.f_w_diagram_center.refresh() F_max = max(self.f_w_diagram_center.trace.ydata) u_center_top_z = U[self.center_top_dofs][0, 0, 2] return array([u_center_top_z, F_max], dtype='float_') def get_sim_outputs(self): ''' Specifies the results and their order returned by the model evaluation. ''' return [ SimOut(name='u_center_top_z', unit='m'), SimOut(name='F_max', unit='kN') ]
class SPIRRIDLAB(HasTraits): '''Class used for elementary parametric studies of spirrid. ''' s = Instance(SPIRRID) evars = DelegatesTo('s') tvars = DelegatesTo('s') q = DelegatesTo('s') exact_arr = Array('float') dpi = Int plot_mode = Enum(['subplots', 'figures']) fig_output_dir = Directory('fig') @on_trait_change('fig_output_dir') def _check_dir(self): if os.access(self.fig_output_dir, os.F_OK) == False: os.mkdir(self.fig_output_dir) e_arr = Property def _get_e_arr(self): return self.s.evar_lst[0] hostname = Property def _get_hostname(self): return gethostname() qname = Str def get_qname(self): if self.qname == '': if isinstance(self.q, types.FunctionType): qname = self.q.__name__ else: # if isinstance(self.q, types.ClassType): qname = self.q.__class__.__name__ else: qname = self.qname return qname show_output = False save_output = True plot_sampling_idx = Array(value=[0, 1], dtype=int) def _plot_sampling(self, i, n_col, sampling_type, p=p, ylim=None, xlim=None): '''Construct a spirrid object, run the calculation plot the mu_q / e curve and save it in the subdirectory. ''' s = self.s s.sampling_type = sampling_type plot_idx = self.plot_sampling_idx qname = self.get_qname() # get n randomly selected realizations from the sampling theta = s.sampling.get_samples(500) tvar_x = s.tvar_lst[plot_idx[0]] tvar_y = s.tvar_lst[plot_idx[1]] min_x, max_x, d_x = s.sampling.get_theta_range(tvar_x) min_y, max_y, d_y = s.sampling.get_theta_range(tvar_y) # for vectorized execution add a dimension for control variable theta_args = [t[:, np.newaxis] for t in theta] q_arr = s.q(self.e_arr[None, :], *theta_args) if self.plot_mode == 'figures': f = p.figure(figsize=(7., 6.)) f.subplots_adjust(left=0.15, right=0.97, bottom=0.15, top=0.92) if self.plot_mode == 'subplots': if i == 0: f = p.figure() p.subplot('2%i%i' % (n_col, (i + 1))) p.plot(theta[plot_idx[0]], theta[plot_idx[1]], 'o', color='grey') p.xlabel('$\lambda$') p.ylabel('$\\xi$') p.xlim(min_x, max_x) p.ylim(min_y, max_y) p.title(s.sampling_type) if self.save_output: fname = os.path.join( self.fig_output_dir, qname + '_sampling_' + s.sampling_type + '.png') p.savefig(fname, dpi=self.dpi) if self.plot_mode == 'figures': f = p.figure(figsize=(7., 5)) f.subplots_adjust(left=0.15, right=0.97, bottom=0.18, top=0.91) elif self.plot_mode == 'subplots': p.subplot('2%i%i' % (n_col, (i + 5))) p.plot(self.e_arr, q_arr.T, color='grey') if len(self.exact_arr) > 0: p.plot(self.e_arr, self.exact_arr, label='exact solution', color='black', linestyle='--', linewidth=2) # numerically obtained result p.plot(self.e_arr, s.mu_q_arr, label='numerical integration', linewidth=3, color='black') p.title(s.sampling_type) p.xlabel('$\\varepsilon$ [-]') p.ylabel(r'$q(\varepsilon;\, \lambda,\, \xi)$') if ylim: p.ylim(0.0, ylim) if xlim: p.xlim(0.0, xlim) p.xticks(position=(0, -.015)) p.legend(loc=2) if self.save_output: fname = os.path.join(self.fig_output_dir, qname + '_' + s.sampling_type + '.png') p.savefig(fname, dpi=self.dpi) sampling_structure_btn = Button(label='compare sampling structure') @on_trait_change('sampling_structure_btn') def sampling_structure(self, **kw): '''Plot the response into the file in the fig subdirectory. ''' if self.plot_mode == 'subplots': p.rcdefaults() else: fsize = 28 p.rcParams['font.size'] = fsize rc('legend', fontsize=fsize - 8) rc('axes', titlesize=fsize) rc('axes', labelsize=fsize + 6) rc('xtick', labelsize=fsize - 8) rc('ytick', labelsize=fsize - 8) rc('xtick.major', pad=8) s_lst = ['TGrid', 'PGrid', 'MCS', 'LHS'] for i, s in enumerate(s_lst): self._plot_sampling(i, len(s_lst), sampling_type=s, **kw) if self.show_output: p.show() n_int_range = Array() #=========================================================================== # Output file names for sampling efficiency #=========================================================================== fname_sampling_efficiency_time_nint = Property def _get_fname_sampling_efficiency_time_nint(self): return self.get_qname( ) + '_' + '%s' % self.hostname + '_time_nint' + '.png' fname_sampling_efficiency_error_nint = Property def _get_fname_sampling_efficiency_error_nint(self): return self.get_qname( ) + '_' + '%s' % self.hostname + '_error_nint' + '.png' fname_sampling_efficiency_error_time = Property def _get_fname_sampling_efficiency_error_time(self): return self.get_qname( ) + '_' + '%s' % self.hostname + '_error_time' + '.png' fnames_sampling_efficiency = Property def _get_fnames_sampling_efficiency(self): fnames = [self.fname_sampling_efficiency_time_nint] if len(self.exact_arr) > 0: fnames += [ self.fname_sampling_efficiency_error_nint, self.fname_sampling_efficiency_error_time ] return fnames #=========================================================================== # Run sampling efficiency studies #=========================================================================== sampling_types = Array(value=['TGrid', 'PGrid', 'MCS', 'LHS'], dtype=str) sampling_efficiency_btn = Button(label='compare sampling efficiency') @on_trait_change('sampling_efficiency_btn') def sampling_efficiency(self): ''' Run the code for all available sampling types. Plot the results. ''' def run_estimation(n_int, sampling_type): # instantiate spirrid with samplingetization methods print 'running', sampling_type, n_int self.s.set(n_int=n_int, sampling_type=sampling_type) n_sim = self.s.sampling.n_sim exec_time = np.sum(self.s.exec_time) return self.s.mu_q_arr, exec_time, n_sim # vectorize the estimation to accept arrays run_estimation_vct = np.vectorize(run_estimation, [object, float, int]) #=========================================================================== # Generate the inspected domain of input parameters using broadcasting #=========================================================================== run_estimation_vct([5], ['PGrid']) sampling_types = self.sampling_types sampling_colors = np.array( ['grey', 'black', 'grey', 'black'], dtype=str) # 'blue', 'green', 'red', 'magenta' sampling_linestyle = np.array(['--', '--', '-', '-'], dtype=str) # run the estimation on all combinations of n_int and sampling_types mu_q, exec_time, n_sim_range = run_estimation_vct( self.n_int_range[:, None], sampling_types[None, :]) p.rcdefaults() f = p.figure(figsize=(12, 6)) f.subplots_adjust(left=0.06, right=0.94) #=========================================================================== # Plot the results #=========================================================================== p.subplot(1, 2, 1) p.title('response for %d $n_\mathrm{sim}$' % n_sim_range[-1, -1]) for i, (sampling, color, linestyle) in enumerate( zip(sampling_types, sampling_colors, sampling_linestyle)): p.plot(self.e_arr, mu_q[-1, i], color=color, label=sampling, linestyle=linestyle) if len(self.exact_arr) > 0: p.plot(self.e_arr, self.exact_arr, color='black', label='Exact solution') p.legend(loc=1) p.xlabel('e', fontsize=18) p.ylabel('q', fontsize=18) # @todo: get n_sim - x-axis p.subplot(1, 2, 2) for i, (sampling, color, linestyle) in enumerate( zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(n_sim_range[:, i], exec_time[:, i], color=color, label=sampling, linestyle=linestyle) p.legend(loc=2) p.xlabel('$n_\mathrm{sim}$', fontsize=18) p.ylabel('$t$ [s]', fontsize=18) if self.save_output: basename = self.fname_sampling_efficiency_time_nint fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) #=========================================================================== # Evaluate the error #=========================================================================== if len(self.exact_arr) > 0: er = ErrorEval(exact_arr=self.exact_arr) def eval_error(mu_q, error_measure): return error_measure(mu_q) eval_error_vct = np.vectorize(eval_error) error_measures = np.array( [er.eval_error_max, er.eval_error_energy, er.eval_error_rms]) error_table = eval_error_vct(mu_q[:, :, None], error_measures[None, None, :]) f = p.figure(figsize=(14, 6)) f.subplots_adjust(left=0.07, right=0.97, wspace=0.26) p.subplot(1, 2, 1) p.title('max rel. lack of fit') for i, (sampling, color, linestyle) in enumerate( zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(n_sim_range[:, i], error_table[:, i, 0], color=color, label=sampling, linestyle=linestyle) #p.ylim( 0, 10 ) p.legend() p.xlabel('$n_\mathrm{sim}$', fontsize=18) p.ylabel('$\mathrm{e}_{\max}$ [-]', fontsize=18) p.subplot(1, 2, 2) p.title('rel. root mean square error') for i, (sampling, color, linestyle) in enumerate( zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(n_sim_range[:, i], error_table[:, i, 2], color=color, label=sampling, linestyle=linestyle) p.legend() p.xlabel('$n_{\mathrm{sim}}$', fontsize=18) p.ylabel('$\mathrm{e}_{\mathrm{rms}}$ [-]', fontsize=18) if self.save_output: basename = self.fname_sampling_efficiency_error_nint fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) f = p.figure(figsize=(14, 6)) f.subplots_adjust(left=0.07, right=0.97, wspace=0.26) p.subplot(1, 2, 1) p.title('rel. max lack of fit') for i, (sampling, color, linestyle) in enumerate( zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(exec_time[:, i], error_table[:, i, 0], color=color, label=sampling, linestyle=linestyle) p.legend() p.xlabel('time [s]', fontsize=18) p.ylabel('$\mathrm{e}_{\max}$ [-]', fontsize=18) p.subplot(1, 2, 2) p.title('rel. root mean square error') for i, (sampling, color, linestyle) in enumerate( zip(sampling_types, sampling_colors, sampling_linestyle)): p.loglog(exec_time[:, i], error_table[:, i, 2], color=color, label=sampling, linestyle=linestyle) p.legend() p.xlabel('time [s]', fontsize=18) p.ylabel('$\mathrm{e}_{\mathrm{rms}}$ [-]', fontsize=18) if self.save_output: basename = self.fname_sampling_efficiency_error_time fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) if self.show_output: p.show() #=========================================================================== # Efficiency of numpy versus C code #=========================================================================== run_lst_detailed_config = Property(List) def _get_run_lst_detailed_config(self): run_lst = [] if hasattr(self.q, 'c_code'): run_lst += [ # ('c', # {'cached_dG' : True, # 'compiled_eps_loop' : True }, # 'go-', # '$\mathsf{C}_{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec', # ), # ('c', # {'cached_dG' : True, # 'compiled_eps_loop' : False }, # 'r-2', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec' # ), # ('c', # {'cached_dG' : False, # 'compiled_eps_loop' : True }, # 'r-2', # '$\mathsf{C}_{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\}\,\} $ - %4.2f sec' # ), ( 'c', { 'cached_dG': False, 'compiled_eps_loop': False }, 'bx-', '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', ) ] if hasattr(self.q, 'cython_code'): run_lst += [ # ('cython', # {'cached_dG' : True, # 'compiled_eps_loop' : True }, # 'go-', # '$\mathsf{Cython}_{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec', # ), # ('cython', # {'cached_dG' : True, # 'compiled_eps_loop' : False }, # 'r-2', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec' # ), # ('cython', # {'cached_dG' : False, # 'compiled_eps_loop' : True }, # 'r-2', # '$\mathsf{Cython}_{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\}\,\} $ - %4.2f sec' # ), # ('cython', # {'cached_dG' : False, # 'compiled_eps_loop' : False }, # 'bx-', # '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', # ) ] if hasattr(self.q, '__call__'): run_lst += [ # ('numpy', # {}, # 'y--', # '$\mathsf{Python}_{\\varepsilon} \{\, \mathsf{Numpy}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\} \,\} $ - %4.2f sec' # ) ] return run_lst # number of recalculations to get new time. n_recalc = Int(2) def codegen_efficiency(self): # define a tables with the run configurations to start in a batch basenames = [] qname = self.get_qname() s = self.s legend = [] legend_lst = [] time_lst = [] p.figure() for idx, run in enumerate(self.run_lst_detailed_config): code, run_options, plot_options, legend_string = run s.codegen_type = code s.codegen.set(**run_options) print 'run', idx, run_options for i in range(self.n_recalc): s.recalc = True # automatically proagated within spirrid print 'execution time', s.exec_time p.plot(s.evar_lst[0], s.mu_q_arr, plot_options) # @todo: this is not portable!! #legend.append(legend_string % s.exec_time) #legend_lst.append(legend_string[:-12]) time_lst.append(s.exec_time) p.xlabel('strain [-]') p.ylabel('stress') #p.legend(legend, loc = 2) p.title(qname) if self.save_output: print 'saving codegen_efficiency' basename = qname + '_' + 'codegen_efficiency' + '.png' basenames.append(basename) fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) self._bar_plot(legend_lst, time_lst) p.title('%s' % s.sampling_type) if self.save_output: basename = qname + '_' + 'codegen_efficiency_%s' % s.sampling_type + '.png' basenames.append(basename) fname = os.path.join(self.fig_output_dir, basename) p.savefig(fname, dpi=self.dpi) if self.show_output: p.show() return basenames #=========================================================================== # Efficiency of numpy versus C code #=========================================================================== run_lst_language_config = Property(List) def _get_run_lst_language_config(self): run_lst = [] if hasattr(self.q, 'c_code'): run_lst += [( 'c', { 'cached_dG': False, 'compiled_eps_loop': False }, 'bx-', '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', )] if hasattr(self.q, 'cython_code'): run_lst += [( 'cython', { 'cached_dG': False, 'compiled_eps_loop': False }, 'bx-', '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec', )] if hasattr(self.q, '__call__'): run_lst += [( 'numpy', {}, 'y--', '$\mathsf{Python}_{\\varepsilon} \{\, \mathsf{Numpy}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot G[\\theta] \,\} \,\} $ - %4.2f sec' )] return run_lst extra_compiler_args = Bool(True) le_sampling_lst = List(['LHS', 'PGrid']) le_n_int_lst = List([440, 5000]) #=========================================================================== # Output file names for language efficiency #=========================================================================== fnames_language_efficiency = Property def _get_fnames_language_efficiency(self): return [ '%s_codegen_efficiency_%s_extra_%s.png' % (self.qname, self.hostname, extra) for extra in [self.extra_compiler_args] ] language_efficiency_btn = Button(label='compare language efficiency') @on_trait_change('language_efficiency_btn') def codegen_language_efficiency(self): # define a tables with the run configurations to start in a batch home_dir = expanduser("~") # pyxbld_dir = os.path.join(home_dir, '.pyxbld') # if os.path.exists(pyxbld_dir): # shutil.rmtree(pyxbld_dir) python_compiled_dir = os.path.join(home_dir, '.python27_compiled') if os.path.exists(python_compiled_dir): shutil.rmtree(python_compiled_dir) for extra, fname in zip([self.extra_compiler_args], self.fnames_language_efficiency): print 'extra compilation args:', extra legend_lst = [] error_lst = [] n_sim_lst = [] exec_times_sampling = [] meth_lst = zip(self.le_sampling_lst, self.le_n_int_lst) for item, n_int in meth_lst: print 'sampling method:', item s = self.s s.exec_time # eliminate first load time delay (first column) s.n_int = n_int s.sampling_type = item exec_times_lang = [] for idx, run in enumerate(self.run_lst_language_config): code, run_options, plot_options, legend_string = run #os.system('rm -fr ~/.python27_compiled') s.codegen_type = code s.codegen.set(**run_options) if s.codegen_type == 'c': s.codegen.set(**dict(use_extra=extra)) print 'run', idx, run_options exec_times_run = [] for i in range(self.n_recalc): s.recalc = True # automatically propagated exec_times_run.append(s.exec_time) print 'execution time', s.exec_time legend_lst.append(legend_string[:-12]) if s.codegen_type == 'c': # load weave.inline time from tmp file and fix values in time_arr #@todo - does not work on windows import tempfile tdir = tempfile.gettempdir() f = open(os.path.join(tdir, 'w_time'), 'r') value_t = float(f.read()) f.close() exec_times_run[0][1] = value_t exec_times_run[0][2] -= value_t exec_times_lang.append(exec_times_run) else: exec_times_lang.append(exec_times_run) print 'legend_lst', legend_lst n_sim_lst.append(s.sampling.n_sim) exec_times_sampling.append(exec_times_lang) #=========================================================================== # Evaluate the error #=========================================================================== if len(self.exact_arr) > 0: er = ErrorEval(exact_arr=self.exact_arr) error_lst.append((er.eval_error_rms(s.mu_q_arr), er.eval_error_max(s.mu_q_arr))) times_arr = np.array(exec_times_sampling, dtype='d') self._multi_bar_plot(meth_lst, legend_lst, times_arr, error_lst, n_sim_lst) if self.save_output: fname_path = os.path.join(self.fig_output_dir, fname) p.savefig(fname_path, dpi=self.dpi) if self.show_output: p.show() def combination_efficiency(self, tvars_det, tvars_rand): ''' Run the code for all available random parameter combinations. Plot the results. ''' qname = self.get_qname() s = self.s s.set(sampling_type='TGrid') # list of all combinations of response function parameters rv_comb_lst = list(powerset(s.tvars.keys())) p.figure() exec_time_lst = [] for id, rv_comb in enumerate(rv_comb_lst[163:219]): # [1:-1] s.tvars = tvars_det print 'Combination', rv_comb for rv in rv_comb: s.tvars[rv] = tvars_rand[rv] #legend = [] #p.figure() time_lst = [] for idx, run in enumerate(self.run_lst): code, run_options, plot_options, legend_string = run print 'run', idx, run_options s.codegen_type = code s.codegen.set(**run_options) #p.plot(s.evar_lst[0], s.mu_q_arr, plot_options) #print 'integral of the pdf theta', s.eval_i_dG_grid() print 'execution time', s.exec_time time_lst.append(s.exec_time) #legend.append(legend_string % s.exec_time) exec_time_lst.append(time_lst) p.plot(np.array((1, 2, 3, 4)), np.array(exec_time_lst).T) p.xlabel('method') p.ylabel('time') if self.save_output: print 'saving codegen_efficiency' fname = os.path.join( self.fig_output_dir, qname + '_' + 'combination_efficiency' + '.png') p.savefig(fname, dpi=self.dpi) if self.show_output: p.title(s.q.title) p.show() def _bar_plot(self, legend_lst, time_lst): rc('font', size=15) #rc('font', family = 'serif', style = 'normal', variant = 'normal', stretch = 'normal', size = 15) fig = p.figure(figsize=(10, 5)) n_tests = len(time_lst) times = np.array(time_lst) x_norm = times[1] xmax = times.max() rel_xmax = xmax / x_norm rel_times = times / x_norm m = int(rel_xmax % 10) if m < 5: x_max_plt = int(rel_xmax) - m + 10 else: x_max_plt = int(rel_xmax) - m + 15 ax1 = fig.add_subplot(111) p.subplots_adjust(left=0.45, right=0.88) #fig.canvas.set_window_title('window title') pos = np.arange(n_tests) + 0.5 rects = ax1.barh(pos, rel_times, align='center', height=0.5, color='w', edgecolor='k') ax1.set_xlabel('normalized execution time [-]') ax1.axis([0, x_max_plt, 0, n_tests]) ax1.set_yticks(pos) ax1.set_yticklabels(legend_lst) for rect, t in zip(rects, rel_times): width = rect.get_width() xloc = width + (0.03 * rel_xmax) clr = 'black' align = 'left' yloc = rect.get_y() + rect.get_height() / 2.0 ax1.text(xloc, yloc, '%4.2f' % t, horizontalalignment=align, verticalalignment='center', color=clr) #, weight = 'bold') ax2 = ax1.twinx() ax1.plot([1, 1], [0, n_tests], 'k--') ax2.set_yticks([0] + list(pos) + [n_tests]) ax2.set_yticklabels([''] + ['%4.2f s' % s for s in list(times)] + ['']) ax2.set_xticks([0, 1] + range(5, x_max_plt + 1, 5)) ax2.set_xticklabels( ['%i' % s for s in ([0, 1] + range(5, x_max_plt + 1, 5))]) def _multi_bar_plot(self, title_lst, legend_lst, time_arr, error_lst, n_sim_lst): '''Plot the results if the code efficiency. ''' p.rcdefaults() fsize = 14 fig = p.figure(figsize=(15, 3)) rc('font', size=fsize) rc('legend', fontsize=fsize - 2) legend_lst = ['weave', 'cython', 'numpy'] # times are stored in 3d array - dimensions are: n_sampling, n_lang, n_run, n_times = time_arr.shape print 'arr', time_arr.shape times_sum = np.sum(time_arr, axis=n_times) p.subplots_adjust(left=0.1, right=0.95, wspace=0.1, bottom=0.15, top=0.8) for meth_i in range(n_sampling): ax1 = fig.add_subplot(1, n_sampling, meth_i + 1) ax1.set_xlabel('execution time [s]') ytick_pos = np.arange(n_lang) + 1 # ax1.axis([0, x_max_plt, 0, n_lang]) # todo: **2 n_vars if len(self.exact_arr) > 0: ax1.set_title( '%s: $ n_\mathrm{sim} = %s, \mathrm{e}_\mathrm{rms}=%s, \mathrm{e}_\mathrm{max}=%s$' % (title_lst[meth_i][0], self._formatSciNotation('%.2e' % n_sim_lst[meth_i]), self._formatSciNotation('%.2e' % error_lst[meth_i][0]), self._formatSciNotation('%.2e' % error_lst[meth_i][1]))) else: ax1.set_title( '%s: $ n_\mathrm{sim} = %s$' % (title_lst[meth_i][0], self._formatSciNotation('%.2e' % n_sim_lst[meth_i]))) ax1.set_yticks(ytick_pos) if meth_i == 0: ax1.set_yticklabels(legend_lst, fontsize=fsize + 2) else: ax1.set_yticklabels([]) ax1.set_xlim(0, 1.2 * np.max(times_sum[meth_i])) distance = 0.2 height = 1.0 / n_run - distance offset = height / 2.0 colors = ['w', 'w', 'w', 'r', 'y', 'b', 'g', 'm'] hatches = ['/', '\\', 'x', '-', '+', '|', 'o', 'O', '.', '*'] label_lst = ['sampling', 'compilation', 'integration'] for i in range(n_run): pos = np.arange(n_lang) + 1 - offset + i * height end_bar_pos = np.zeros((n_lang, ), dtype='d') for j in range(n_times): if i > 0: label = label_lst[j] else: label = None bar_lengths = time_arr[meth_i, :, i, j] rects = ax1.barh(pos, bar_lengths, align='center', height=height, left=end_bar_pos, color=colors[j], edgecolor='k', hatch=hatches[j], label=label) end_bar_pos += bar_lengths for k in range(n_lang): x_val = times_sum[meth_i, k, i] + 0.01 * np.max(times_sum[meth_i]) ax1.text(x_val, pos[k], '$%4.2f\,$s' % x_val, horizontalalignment='left', verticalalignment='center', color='black') #, weight = 'bold') if meth_i == 0: ax1.text(0.02 * np.max(times_sum[0]), pos[k], '$%i.$' % (i + 1), horizontalalignment='left', verticalalignment='center', color='black', bbox=dict(pad=0., ec="w", fc="w")) p.legend(loc=0) def _formatSciNotation(self, s): # transform 1e+004 into 1e4, for example tup = s.split('e') try: significand = tup[0].rstrip('0').rstrip('.') sign = tup[1][0].replace('+', '') exponent = tup[1][1:].lstrip('0') if significand == '1': # reformat 1x10^y as 10^y significand = '' if exponent: exponent = '10^{%s%s}' % (sign, exponent) if significand and exponent: return r'%s{\cdot}%s' % (significand, exponent) else: return r'%s%s' % (significand, exponent) except IndexError, msg: return s
class CodeGenCompiled(CodeGen): ''' C-code is generated using the inline feature of scipy. ''' # =========================================================================== # Inspection of the randomization - needed by CodeGenCompiled # =========================================================================== evar_names = Property(depends_on='q, recalc') @cached_property def _get_evar_names(self): return self.spirrid.evar_names var_names = Property(depends_on='q, recalc') @cached_property def _get_var_names(self): return self.spirrid.tvar_names # count the random variables n_rand_vars = Property(depends_on='theta_vars, recalc') @cached_property def _get_n_rand_vars(self): return self.spirrid.n_rand_vars # get the indexes of the random variables within the parameter list rand_var_idx_list = Property(depends_on='theta_vars, recalc') @cached_property def _get_rand_var_idx_list(self): return self.spirrid.rand_var_idx_list # get the names of the random variables rand_var_names = Property(depends_on='theta_vars, recalc') @cached_property def _get_rand_var_names(self): return self.var_names[self.rand_var_idx_list] # get the randomization arrays theta_arrs = Property(List, depends_on='theta_vars, recalc') @cached_property def _get_theta_arrs(self): '''Get flattened list of theta arrays. ''' theta = self.spirrid.sampling.theta return _get_flat_arrays_from_list(self.rand_var_idx_list, theta) # get the randomization arrays dG_arrs = Property(List, depends_on='theta_vars, recalc') @cached_property def _get_dG_arrs(self): '''Get flattened list of weight factor arrays. ''' dG = self.spirrid.sampling.dG_ogrid return _get_flat_arrays_from_list(self.rand_var_idx_list, dG) arg_names = Property( depends_on='rf_change, rand_change, +codegen_option, recalc') @cached_property def _get_arg_names(self): arg_names = [] # create argument string for inline function if self.compiled_eps_loop: # @todo: e_arr must be evar_names arg_names += ['mu_q_arr', 'e_arr'] else: arg_names.append('e') arg_names += ['%s_flat' % name for name in self.rand_var_names] arg_names += self._get_arg_names_dG() return arg_names ld = Trait('weave', dict(weave=CodeGenLangDictC(), cython=CodeGenLangDictCython())) # =========================================================================== # Configuration of the code # =========================================================================== # # compiled_eps_loop: # If set True, the loop over the control variable epsilon is compiled # otherwise, python loop is used. compiled_eps_loop = Bool(True, codegen_option=True) # =========================================================================== # compiled_eps_loop - dependent code # =========================================================================== compiled_eps_loop_feature = Property( depends_on='compiled_eps_loop, recalc') @cached_property def _get_compiled_eps_loop_feature(self): if self.compiled_eps_loop == True: return self.ld_.LD_BEGIN_EPS_LOOP_ACTIVE, self.ld_.LD_END_EPS_LOOP_ACTIVE else: return self.ld_.LD_ASSIGN_EPS, '' LD_BEGIN_EPS_LOOP = Property def _get_LD_BEGIN_EPS_LOOP(self): return self.compiled_eps_loop_feature[0] LD_END_EPS_LOOP = Property def _get_LD_END_EPS_LOOP(self): return self.compiled_eps_loop_feature[1] # # cached_dG: # If set to True, the cross product between the pdf values of all random variables # will be precalculated and stored in an n-dimensional grid # otherwise the product is performed for every epsilon in the inner loop anew # cached_dG = Bool(False, codegen_option=True) # =========================================================================== # cached_dG - dependent code # =========================================================================== cached_dG_feature = Property(depends_on='cached_dG, recalc') @cached_property def _get_cached_dG_feature(self): if self.compiled_eps_loop: if self.cached_dG == True: return self.ld_.LD_ACCESS_EPS_IDX, self.ld_.LD_ACCESS_THETA_IDX, self.ld_.LD_ASSIGN_MU_Q_IDX else: return self.ld_.LD_ACCESS_EPS_PTR, self.ld_.LD_ACCESS_THETA_PTR, self.ld_.LD_ASSIGN_MU_Q_PTR else: if self.cached_dG == True: return self.ld_.LD_ACCESS_EPS_IDX, self.ld_.LD_ACCESS_THETA_IDX, self.ld_.LD_ASSIGN_MU_Q_IDX else: return self.ld_.LD_ACCESS_EPS_PTR, self.ld_.LD_ACCESS_THETA_PTR, self.ld_.LD_ASSIGN_MU_Q_PTR LD_ACCESS_EPS = Property def _get_LD_ACCESS_EPS(self): return self.cached_dG_feature[0] LD_ACCESS_THETA = Property def _get_LD_ACCESS_THETA(self): return '%s' + self.cached_dG_feature[1] LD_ASSIGN_MU_Q = Property def _get_LD_ASSIGN_MU_Q(self): return self.cached_dG_feature[2] LD_N_TAB = Property def _get_LD_N_TAB(self): if self.spirrid.sampling_type == 'LHS' or self.spirrid.sampling_type == 'MCS': if self.compiled_eps_loop: return 3 else: return 2 else: if self.compiled_eps_loop: return self.n_rand_vars + 2 else: return self.n_rand_vars + 1 # ------------------------------------------------------------------------------------ # Configurable generation of C-code for the mean curve evaluation # ------------------------------------------------------------------------------------ code = Property( depends_on='rf_change, rand_change, +codegen_option, eps_change, recalc' ) @cached_property def _get_code(self): code_str = '' if self.compiled_eps_loop: # create code string for inline function # n_eps = len(self.spirrid.evar_lst[0]) code_str += self.LD_BEGIN_EPS_LOOP % {'i': n_eps} code_str += self.LD_ACCESS_EPS else: # create code string for inline function # code_str += self.ld_.LD_ASSIGN_EPS code_str += self.ld_.LD_INIT_MU_Q if self.compiled_eps_loop: code_str += '\t' + self.ld_.LD_INIT_Q else: code_str += self.ld_.LD_INIT_Q code_str += self.ld_.LD_LINE_MACRO # create code for constant params for name, distr in zip(self.var_names, self.spirrid.tvar_lst): if type(distr) is float: code_str += self.ld_.LD_INIT_THETA % (name, distr) code_str += self._get_code_dG_declare() inner_code_str = '' lang = self.ld + '_code' q_code = getattr(self.spirrid.q, lang) import textwrap q_code = textwrap.dedent(q_code) q_code_split = q_code.split('\n') for i, s in enumerate(q_code_split): q_code_split[i] = self.LD_N_TAB * '\t' + s q_code = '\n'.join(q_code_split) if self.n_rand_vars > 0: inner_code_str += self._get_code_dG_access() inner_code_str += q_code + '\n' + \ (self.LD_N_TAB) * '\t' + self.ld_.LD_EVAL_MU_Q else: inner_code_str += q_code + \ self.ld_.LD_ADD_MU_Q code_str += self._get_code_inner_loops(inner_code_str) if self.compiled_eps_loop: if self.cached_dG: # blitz matrix code_str += self.ld_.LD_ASSIGN_MU_Q_IDX else: code_str += self.ld_.LD_ASSIGN_MU_Q_PTR code_str += self.LD_END_EPS_LOOP else: code_str += self.ld_.LD_RETURN_MU_Q return code_str compiler_verbose = Int(1) compiler = Property(Str) def _get_compiler(self): if platform.system() == 'Linux': return 'gcc' elif platform.system() == 'Windows': return 'mingw32' def get_code(self): if self.ld == 'weave': return self.get_c_code() elif self.ld == 'cython': return self.get_cython_code() def get_cython_code(self): cython_header = 'print "## spirrid_cython library reloaded!"\nimport numpy as np\ncimport numpy as np\nctypedef np.double_t DTYPE_t\ncimport cython\n\[email protected](False)\[email protected](False)\[email protected](True)\ndef mu_q(%s):\n\tcdef double mu_q\n' # @todo - for Cython cdef variables and generalize function def() arg_values = {} for name, theta_arr in zip(self.rand_var_names, self.theta_arrs): arg_values['%s_flat' % name] = theta_arr arg_values.update(self._get_arg_values_dG()) DECLARE_ARRAY = 'np.ndarray[DTYPE_t, ndim=1] ' def_dec = DECLARE_ARRAY + 'e_arr' def_dec += ',' + DECLARE_ARRAY def_dec += (',' + DECLARE_ARRAY).join(arg_values) cython_header = cython_header % def_dec cython_header += ' cdef double ' cython_header += ', '.join(self.var_names) + ', eps, dG, q\n' cython_header += ' cdef int i_' cython_header += ', i_'.join(self.var_names) + '\n' if self.cached_dG: cython_header = cython_header.replace( r'1] dG_grid', r'%i] dG_grid' % self.n_rand_vars) if self.compiled_eps_loop == False: cython_header = cython_header.replace( r'np.ndarray[DTYPE_t, ndim=1] e_arr', r'double e_arr') cython_header = cython_header.replace(r'eps,', r'eps = e_arr,') cython_code = (cython_header + self.code).replace('\t', ' ') cython_file_name = 'spirrid_cython.pyx' print 'checking for previous cython code' regenerate_code = True if os.path.exists(cython_file_name): f_in = open(cython_file_name, 'r').read() if f_in == cython_code: regenerate_code = False if regenerate_code: infile = open(cython_file_name, 'w') infile.write(cython_code) infile.close() print 'pyx file updated' t = sysclock() import pyximport pyximport.install(reload_support=True, setup_args={"script_args": ["--force"]}) import spirrid_cython if regenerate_code: reload(spirrid_cython) print '>>> pyximport', sysclock() - t mu_q = spirrid_cython.mu_q def mu_q_method(eps): if self.compiled_eps_loop: args = {'e_arr': eps} args.update(arg_values) mu_q_arr = mu_q(**args) else: # Python loop over eps # mu_q_arr = np.zeros_like(eps, dtype=np.float64) for idx, e in enumerate(eps): # C loop over random dimensions # arg_values['e_arr'] = e # prepare the parameter mu_q_val = mu_q(**arg_values) # add the value to the return array mu_q_arr[idx] = mu_q_val return mu_q_arr, None return mu_q_method def get_c_code(self): ''' Return the code for the given sampling of the rand domain. ''' def mu_q_method(e): '''Template for the evaluation of the mean response. ''' self._set_compiler() compiler_args, linker_args = self.extra_args print 'compiler arguments' print compiler_args # prepare the array of the control variable discretization # eps_arr = e mu_q_arr = np.zeros_like(eps_arr) # prepare the parameters for the compiled function in # a separate dictionary arg_values = {} if self.compiled_eps_loop: # for compiled eps_loop the whole input and output array must be passed to c # arg_values['e_arr'] = eps_arr arg_values['mu_q_arr'] = mu_q_arr # prepare the lengths of the arrays to set the iteration bounds # for name, theta_arr in zip(self.rand_var_names, self.theta_arrs): arg_values['%s_flat' % name] = theta_arr arg_values.update(self._get_arg_values_dG()) if self.cached_dG: conv = weave.converters.blitz else: conv = weave.converters.default if self.compiled_eps_loop: # C loop over eps, all inner loops must be compiled as well # weave.inline(self.code, self.arg_names, local_dict=arg_values, extra_compile_args=compiler_args, extra_link_args=linker_args, type_converters=conv, compiler=self.compiler, verbose=self.compiler_verbose) else: # Python loop over eps # for idx, e in enumerate(eps_arr): # C loop over random dimensions # arg_values['e'] = e # prepare the parameter mu_q = weave.inline(self.code, self.arg_names, local_dict=arg_values, extra_compile_args=compiler_args, extra_link_args=linker_args, type_converters=conv, compiler=self.compiler, verbose=self.compiler_verbose) # add the value to the return array mu_q_arr[idx] = mu_q var_q_arr = np.zeros_like(mu_q_arr) return mu_q_arr, var_q_arr return mu_q_method # =========================================================================== # Extra compiler arguments # =========================================================================== use_extra = Bool(False, codegen_option=True) extra_args = Property(depends_on='use_extra, +codegen_option, recalc') @cached_property def _get_extra_args(self): if self.use_extra == True: compiler_args = [ "-DNDEBUG -g -fwrapv -O3 -march=native", "-ffast-math" ] # , "-fno-openmp", "-ftree-vectorizer-verbose=3"] linker_args = [] # ["-fno-openmp"] return compiler_args, linker_args elif self.use_extra == False: return [], [] # =========================================================================== # Auxiliary methods # =========================================================================== def _set_compiler(self): '''Catch eventual mismatch between scipy.weave and compiler ''' if platform.system() == 'Linux': # os.environ['CC'] = 'gcc-4.1' # os.environ['CXX'] = 'g++-4.1' os.environ['OPT'] = '-DNDEBUG -g -fwrapv -O3' elif platform.system() == 'Windows': # not implemented pass def _get_code_dG_declare(self): '''Constant dG value - for PGrid, MCS, LHS ''' return '' def _get_code_dG_access(self): '''Default access to dG array - only needed by TGrid''' return '' def _get_arg_names_dG(self): return [] def _get_arg_values_dG(self): return {} def __str__(self): s = 'C( ' s += 'var_eval = %s, ' % ` self.implicit_var_eval ` s += 'compiled_eps_loop = %s, ' % ` self.compiled_eps_loop ` s += 'cached_dG = %s)' % ` self.cached_dG ` return s
class HPShell(HasTraits): '''Geometry definition. ''' # dimensions of the shell structure [m] # (only one quart of the shell structure) # # NOTE: lenth_z = 1.0 m + 0.062 m = 1.062 # NOTE: lenth_z = 0.865 m + 0.062 m = 0.927 length_x = Float(3.50) length_y = Float(3.50) length_z = Float(0.927) # corresponds to the delta in the geometry obj file '4x4m' delta_h = Float(0.865) # [m] # factor to scale height of lower surface # thickness remains unchanged as 0.06 m # delta_h_scalefactor = Float(1.00) # [-] # cut of the z-coordinates of the lowerface if set to True # cut_off_lowerface = Bool(True) geo_input_name = Enum('350x350cm') geo_filter = Dict({'4x4m': delete_second_rows}) def _read_arr(self, side='lowerface_'): file_name = side + self.geo_input_name + '.robj' file_path = join('geometry_files', file_name) v_arr = read_rsurface(file_path) filter = self.geo_filter.get(self.geo_input_name, None) if filter != None: v_arr = filter(v_arr) return v_arr # array of the vertex positions in global # x,y,z-coordinates defining the lower surface of the shell vl_arr = Property(Array(float)) @cached_property def _get_vl_arr(self): vl_arr = self._read_arr('lowerface_') if self.cut_off_lowerface == True: print '--- lower face z-coords cut off ---' # z-values of the coords from the lower face are cut off. # From the highest z-coordinate of the lower face the vertical # distance is 1 m (=delta h). At this limit the lower face is # cut off. Global z coordinate is assumed to point up. # vl_z_max = max(vl_arr[:, 2]) if self.geo_input_name == '4x4m': # NOTE: the global z-coordinates are given in the geo data file in [m] # no conversion of unites necessary (self.delta_h is given in [m]) delta_h = self.delta_h elif self.geo_input_name == '350x350cm': # NOTE: the global z-coordinates are given in the geo data file in [cm] # convert delta_h from [m] to [cm] # delta_h = self.delta_h * 100. vl_z_min = vl_z_max - self.delta_h vl_arr_z = where(vl_arr[:, 2] < vl_z_min, vl_z_min, vl_arr[:, 2]) vl_arr = c_[vl_arr[:, 0:2], vl_arr_z] return vl_arr # array of the vertex positions in global # x,y,z-coordinates defining the upper surface of the shell vu_arr = Property(Array(float)) @cached_property def _get_vu_arr(self): return self._read_arr('upperface_') def get_mid_surface_and_thickness(self, points, perpendicular_t=True): '''Return the global coordinates of the supplied local points. ''' print '*** get mid surface and thickness ***' #----------------------------------------------- # get the global coordinates as defined in the # input file and transform them to the coordinate # system of the master quarter #----------------------------------------------- # if self.geo_input_name == '350x350cm': X0 = [3.50, 3.50, 0.] else: X0 = [0., 0., 0.] # number of global grid points for each coordinate direction # xi, yi = points[:, 0] - X0[0], points[:, 1] - X0[1] # NOTE: # -- The available rbf-function is only defined for a quarter of one shell. # in order to get z and t values for an entire shell the abs-function # is used. The coordinate system of the quarter must be defined in the # lower left corner; the coordinate systemn of the entire one shell must # be defined in the center of the shell so that the coordinate system # for the master quarter remains unchanged. # -- The transformation is performed based on the defined class attributes # of hp_shell_stb: length_x, length_y, length_z, delta_h, delta_h_scalefactor # characterizing the properties of the master quarter # number of local grid points for each coordinate direction # values must range between 0 and 1 # points_tilde_list = [] for i_row in range(points.shape[0]): # get the x, y coordinate pair defined in the input # file in global coordinates # x = xi[i_row] y = yi[i_row] # transform values to local coordinate system, # i.e. move point to the 'master roof' containing the # global coordinate system: # if x <= self.length_x and y <= self.length_y: # point lays in first (master) roof # x_tilde = x y_tilde = y elif x >= self.length_x and y <= self.length_y: # point lays in second roof: # # roof length = 2* length of the master quarter # (e.g. 2*4,0m = 8,00m for obj-file "4x4m") x_tilde = x - 2 * self.length_x y_tilde = y elif x <= self.length_x and y >= self.length_y: # point lays in third roof: # x_tilde = x y_tilde = y - 2 * self.length_y elif x >= self.length_x and y >= self.length_y: # point lays in fourth roof: # x_tilde = x - 2 * self.length_x y_tilde = y - 2 * self.length_y points_tilde_list.append([x_tilde, y_tilde]) points_tilde_arr = array(points_tilde_list, dtype='float_') xi_tilde = points_tilde_arr[:, 0] yi_tilde = points_tilde_arr[:, 1] # print 'points_tilde_arr', points_tilde_arr xi_ = abs(xi_tilde) / self.length_x yi_ = abs(yi_tilde) / self.length_y #----------------------------------------------- # get the normalized rbf-function for the upper # and lower face of the master quarter #----------------------------------------------- # NOTE: the underline character indicates a normalized value # normalized coordinates of the vertices for lower- and upper-face # vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr) # use a radial basis function approximation (rbf) (i.e. interpolation of # scattered data) based on the normalized vertex points of the lower face # x_ = vl_arr_[:, 0] y_ = vl_arr_[:, 1] if self.geo_input_name == '350x350cm': x_ = 1 - vl_arr_[:, 0] z_l_ = vl_arr_[:, 2] rbf_l = Rbf(x_, y_, z_l_, function='cubic') # get the z-value at the supplied local grid points # of the lower face # zi_lower_ = rbf_l(xi_, yi_) # use a radial basis function approximation (rbf) (i.e. interpolation of # scattered data) based on the normalized vertex points of the upper face # x_ = vu_arr_[:, 0] y_ = vu_arr_[:, 1] if self.geo_input_name == '350x350cm': x_ = 1 - vu_arr_[:, 0] z_u_ = vu_arr_[:, 2] rbf_u = Rbf(x_, y_, z_u_, function='cubic') # get the z-value at the supplied local grid points # of the upper face # zi_upper_ = rbf_u(xi_, yi_) # approach of the slope to get thickness perpendicular to slope # # thickness is multiplied by the supplied zi coordinate # and z value of mid plane # t_ = zi_upper_ - zi_lower_ z_middle_ = (zi_lower_ + (zi_upper_ - zi_lower_) * 0.5 / self.delta_h_scalefactor) * self.delta_h_scalefactor if perpendicular_t == True: # delta shift of x and y for estimation of slope will be done in 4 direction # 0, 45, 90 and 135 degrees print "--- perpendicular ---" delta = 0.000001 # shift in x dz_x_p_ = (rbf_u(xi_ + delta, yi_) + rbf_l(xi_ + delta, yi_)) / 2.0 dz_x_m_ = (rbf_u(xi_ - delta, yi_) + rbf_l(xi_ - delta, yi_)) / 2.0 slope_x_ = (dz_x_p_ - dz_x_m_) / (2.0 * delta) angle_x = arctan(slope_x_ * self.length_z / self.length_x) f_1 = cos(angle_x) # shift in y dz_y_p_ = (rbf_u(xi_, yi_ + delta) + rbf_l(xi_, yi_ + delta)) / 2.0 dz_y_m_ = (rbf_u(xi_, yi_ - delta) + rbf_l(xi_, yi_ - delta)) / 2.0 slope_y_ = (dz_y_p_ - dz_y_m_) / (2.0 * delta) angle_y = arctan(slope_y_ * self.length_z / self.length_x) f_2 = cos(angle_y) #shift +x +y; -x -y dz_x_p_y_p_ = (rbf_u(xi_ + delta, yi_ + delta) + rbf_l(xi_ + delta, yi_ + delta)) / 2.0 dz_x_m_y_m_ = (rbf_u(xi_ - delta, yi_ - delta) + rbf_l(xi_ - delta, yi_ - delta)) / 2.0 slope_x_p_y_p_ = (dz_x_p_y_p_ - dz_x_m_y_m_) / (2.0 * sqrt(2) * delta) angle_x_p_y_p = arctan(slope_x_p_y_p_ * self.length_z / (self.length_x**2 + self.length_y**2)**0.5) f_3 = cos(angle_x_p_y_p) # shift in +x,-y ; -x and +y dz_x_p_y_m_ = (rbf_u(xi_ + delta, yi_ - delta) + rbf_l(xi_ + delta, yi_ - delta)) / 2.0 dz_x_m_y_p_ = (rbf_u(xi_ - delta, yi_ + delta) + rbf_l(xi_ - delta, yi_ + delta)) / 2.0 slope_x_p_y_m_ = (dz_x_p_y_m_ - dz_x_m_y_p_) / (sqrt(2) * 2.0 * delta) angle_x_p_y_m = arctan(slope_x_p_y_m_ * self.length_z / (self.length_x**2 + self.length_y**2)**0.5) f_4 = cos(angle_x_p_y_m) # obtain minimum factor for good estimate of maximum slope factor = min([f_1, f_2, f_3, f_4], axis=0) t_ = t_ * factor return xi, yi, z_middle_ * self.length_z, t_ * self.length_z def _read_thickness_data(self, file_name): '''to read the stb - X and Y coordinates ( m ) save the xls - worksheet to a csv - file using ';' as filed delimiter and ' ' ( blank ) as text delimiter. Stb Data needs to have same range of values in X and Y direction and same unit [m], as defined as length_x and length_y ''' print '*** reading thickness data from file: ', file_name, ' ***' # get the column headings defined in the second row # of the csv thickness input file # "Nr.;X;Y;Z;[mm]" # file = open(file_name, 'r') lines = file.readlines() column_headings = array(lines[1].split(';')) elem_no_idx = where('Nr.' == column_headings)[0] X_idx = where('X' == column_headings)[0] Y_idx = where('Y' == column_headings)[0] Z_idx = where('Z' == column_headings)[0] thickness_idx = where('[mm]\n' == column_headings)[0] input_arr = loadtxt(file_name, delimiter=';', skiprows=2) # elem number: # elem_no = input_arr[:, elem_no_idx] # coordinates [m]: # X = input_arr[:, X_idx][:, 0] Y = input_arr[:, Y_idx][:, 0] # print 'thickness_idx', thickness_idx if thickness_idx != []: thickness_stb = input_arr[:, thickness_idx][:, 0] / 1000. return elem_no, X, Y, thickness_stb else: thickness_stb = ones_like(elem_no) return elem_no, X, Y, thickness_stb def _read_elem_coords(self, file_name): '''x,y -coordinates must be read from old file ''' input_arr = loadtxt(file_name, delimiter=';', skiprows=2) elem_no = input_arr[:, 0] X = input_arr[:, 2] Y = input_arr[:, 3] return elem_no, X, Y def _read_nodal_coords(self, file_name): '''read the nodal coordinates of the mid - surface defined in a csv - file. To export the excel sheet to csv use ";" as a field delimiter and "" ( none ) as a text delimiter. Note that some lines do not contain values ! ''' print '*** reading nodal coordinates from file: ', file_name, ' ***' file = open(file_name, 'r') # read the column headings (first two lines) # first_line = file.readline() second_line = file.readline() column_headings = second_line.split(';') # remove '\n' from last string element in list column_headings[-1] = column_headings[-1][:-1] column_headings_arr = array(column_headings) # check in which column the node number and the # carthesian coordinates can be found # elem_no_idx = where('Nr.' == column_headings_arr)[0] X_idx = where('X [m]' == column_headings_arr)[0] Y_idx = where('Y [m]' == column_headings_arr)[0] Z_idx = where('Z [m]' == column_headings_arr)[0] lines = file.readlines() lines_list = [line.split(';') for line in lines] empty_lines_idx = [] ll = [] for i_line, line in enumerate(lines_list): # check if line contains values or only a node number! # if line[1] == 'Standard': ll.append( [line[elem_no_idx], line[X_idx], line[Y_idx], line[Z_idx]]) else: # NOTE: current number in file starts with 1, index in loop starts with 0 # therefore add 1 in the index list # empty_lines_idx.append(i_line + 1) input_arr = array(ll, dtype='float_') node_no = input_arr[:, 0] X = input_arr[:, 1] Y = input_arr[:, 2] Z = input_arr[:, 2] return node_no, X, Y, Z, empty_lines_idx def compare_thickness_values(self, thickness, thickness_stb): '''get relative difference between the calucated thickness read in from the obj file, cut of and projected with respect to the approximated data given from stb. ''' thickness = thickness.reshape(shape(thickness_stb)) error = abs(1 - thickness / thickness_stb) * 100 return error def export_midsurface_data(self, node_no, x, y, z_middle, file_name, empty_lines_idx): '''exports data to csv - worksheet ''' print '*** writing middle surface data to file,', file_name, ' ***' data = c_[node_no, x, y, z_middle] file = open(file_name, 'w') writer = csv.writer(file, delimiter=";", lineterminator="\n") writer.writerow(['node_number', 'x[m]', 'y[m]', 'z[m]']) writer.writerows(data) file = file.close() # if file contains empty lines add them at the positions # defined in 'empty_lines_idx' # if len(empty_lines_idx) != 0: print '--- file contains ', len( empty_lines_idx), ' empty_lines ---' # file without empty lines # file = open(file_name, 'r') lines = file.readlines() # overwrite file including empty lines # file = open(file_name, 'w') # index 'n' runs in the array without empty lines # index 'i' runs in the array with empty lines # n = 0 for i in range(data.shape[0] + len(empty_lines_idx)): if i in empty_lines_idx: file.writelines(str(i) + ";;;;\n") else: file.writelines(lines[n]) n += 1 # add last line: # file.writelines(lines[-1]) file.close() print '--- empty lines added to file ---' return def export_thickness_data(self, elem_no, x, y, t, file_name): '''exports data to csv - worksheet ''' print '*** writing thickness data to file,', file_name, ' ***' data = c_[elem_no, x, y, t * 1000] print shape(data) writer = csv.writer(open(file_name, 'w'), delimiter=";", lineterminator="\n") writer.writerow(['element_number', 'x[m]', 'y[m]', 't[mm]']) writer.writerows(data) return @show def show(self, x, y, z_middle, displayed_value): """Test contour_surf on regularly spaced co-ordinates like MayaVi. """ print '*** plotting data***' s = points3d(X, Y, z_middle, displayed_value, colormap="gist_rainbow", mode="cube", scale_factor=0.3) sb = colorbar(s) # Recorded script from Mayavi2 #try: # engine = mayavi.engine #except NameError: # from etsproxy.mayavi.api import Engine # engine = Engine() # engine.start() #if len(engine.scenes) == 0: # engine.new_scene() # ------------------------------------------- glyph = s #.pipeline.scenes[0].children[0].children[0].children[0] glyph.glyph.glyph_source.glyph_source.center = array([0., 0., 0.]) glyph.glyph.glyph_source.glyph_source.progress = 1.0 glyph.glyph.glyph_source.glyph_source.x_length = 0.6 glyph.glyph.glyph_source.glyph_source.y_length = 0.6 sb.scalar_bar.title = 'thickness [m]' #print s.pipeline #s.scene.background = (1.0, 1.0, 1.0) return s
class SPIRRIDModelView(ModelView): title = Str('spirrid exec ctrl') model = Instance(SPIRRID) ins = Instance(NoOfFibers) def _ins_default(self): return NoOfFibers() eval = Button def _eval_fired(self): Specimen_Volume = self.ins.Lx * self.ins.Ly * self.ins.Lz self.no_of_fibers_in_specimen = ( Specimen_Volume * self.ins.Fiber_volume_fraction / 100) / ( pi * (self.ins.Fiber_diameter / 20)**2 * self.ins.Fiber_Length / 10) prob_crackbridging_fiber = (self.ins.Fiber_Length / (10 * 2)) / self.ins.Lx self.mean_parallel_links = prob_crackbridging_fiber * self.no_of_fibers_in_specimen self.stdev_parallel_links = (prob_crackbridging_fiber * self.no_of_fibers_in_specimen * (1 - prob_crackbridging_fiber))**0.5 run = Button(desc='Run the computation') def _run_fired(self): self.evaluate() run_legend = Str('mean response', desc='Legend to be added to the plot of the results') min_eps = Float(0.0, desc='minimum value of the control variable') max_eps = Float(1.0, desc='maximum value of the control variable') n_eps = Int(100, desc='resolution of the control variable') plot_title = Str('response', desc='diagram title') label_x = Str('epsilon', desc='label of the horizontal axis') label_y = Str('sigma', desc='label of the vertical axis') stdev = Bool(True) mean_parallel_links = Float(1., desc='mean number of parallel links (fibers)') stdev_parallel_links = Float( 0., desc='stdev of number of parallel links (fibers)') no_of_fibers_in_specimen = Float( 0., desc='Number of Fibers in the specimen', ) data_changed = Event(True) def evaluate(self): self.model.set( min_eps=0.00, max_eps=self.max_eps, n_eps=self.n_eps, ) # evaluate the mean curve self.model.mean_curve # evaluate the variance if the stdev bool is True if self.stdev: self.model.var_curve self.data_changed = True traits_view = View(VGroup( HGroup( Item('run_legend', resizable=False, label='Run label', width=80, springy=False), Item('run', show_label=False, resizable=False)), Tabbed( VGroup( Item('model.cached_dG', label='Cached weight factors', resizable=False, springy=False), Item('model.compiled_QdG_loop', label='Compiled loop over the integration product', springy=False), Item('model.compiled_eps_loop', enabled_when='model.compiled_QdG_loop', label='Compiled loop over the control variable', springy=False), scrollable=True, label='Execution configuration', id='spirrid.tview.exec_params', dock='tab', ), VGroup( HGroup(Item('min_eps', label='Min', springy=False, resizable=False), Item('max_eps', label='Max', springy=False, resizable=False), Item('n_eps', label='N', springy=False, resizable=False), label='Simulation range', show_border=True), HGroup(Item('stdev', label='plot standard deviation'), ), HSplit( HGroup( VGroup( Item('mean_parallel_links', label='mean No of fibers'), Item('stdev_parallel_links', label='stdev No of fibers'), )), VGroup( Item('@ins', label='evaluate No of fibers', show_label=False), VGroup( HGroup( Item('eval', show_label=False, resizable=False, label='Evaluate No of Fibers'), Item('no_of_fibers_in_specimen', label='No of Fibers in specimen', style='readonly')))), label='number of parralel fibers', show_border=True, scrollable=True, ), VGroup( Item('plot_title', label='title', resizable=False, springy=False), Item('label_x', label='x', resizable=False, springy=False), Item('label_y', label='y', resizable=False, springy=False), label='title and axes labels', show_border=True, scrollable=True, ), label='Execution control', id='spirrid.tview.view_params', dock='tab', ), scrollable=True, id='spirrid.tview.tabs', dock='tab', ), ), title='SPIRRID', id='spirrid.viewmodel', dock='tab', resizable=True, height=1.0, width=1.0)
class RandomField(HasTraits): ''' This class implements a 3D random field on a regular grid and allows for interpolation using the EOLE method ''' lacor_arr = Array(Float, modified=True) #(nD,1) array of autocorrelation lengths nDgrid = List(Array, modified=True) # list of nD entries: each entry is an array of points in the part. dimension reevaluate = Event seed = Bool(False) distr_type = Enum('Gauss', 'Weibull', modified=True) stdev = Float(1.0, modified=True) mean = Float(0.0, modified=True) shape = Float(5.0, modified=True) scale = Float(1.0, modified=True) loc = Float(0.0, modified=True) def acor(self, dx, lacor): '''autocorrelation function''' C = e ** (-(dx / lacor) ** 2) return C eigenvalues = Property(depends_on='+modified') @cached_property def _get_eigenvalues(self): '''evaluates the eigenvalues and eigenvectors of the covariance matrix''' # creating distances from the first coordinate for i, grid_i in enumerate(self.nDgrid): self.nDgrid[i] -= grid_i[0] # creating a symm. toeplitz matrix with (xgrid, xgrid) data points coords_lst = [toeplitz(grid_i) for grid_i in self.nDgrid] # apply the autocorrelation func. on the coord matrices to obtain the covariance matrices C_matrices = [self.acor(coords_i, self.lacor_arr[i]) for i, coords_i in enumerate(coords_lst)] # evaluate the eigenvalues and eigenvectors of the autocorrelation matrices eigen_lst = [] for i, C_i in enumerate(C_matrices): print 'evaluating eigenvalues for dimension ' + str(i+1) lambda_i, Phi_i = eigh(C_i) # truncate the eigenvalues at 99% of tr(C) truncation_limit = 0.99 * np.trace(C_i) argsort = np.argsort(lambda_i) cum_sum_lambda = np.cumsum(np.sort(lambda_i)[::-1]) idx_trunc = int(np.sum(cum_sum_lambda < truncation_limit)) eigen_lst.append([lambda_i[argsort[::-1]][:idx_trunc], Phi_i[:, argsort[::-1]][:,:idx_trunc]]) print 'complete' Lambda_C = 1.0 Phi_C = 1.0 for lambda_i, Phi_i in eigen_lst: Lambda_i = np.diag(lambda_i) Lambda_C = np.kron(Lambda_C, Lambda_i) Phi_C = np.kron(Phi_C, Phi_i) return Lambda_C, Phi_C generated_random_vector = Property(Array, depends_on='reevaluate') @cached_property def _get_generated_random_vector(self): if self.seed == True: np.random.seed(141) # points between 0 to 1 with an equidistant step for the LHS # No. of points = No. of truncated eigenvalues npts = self.eigenvalues[0].shape[0] randsim = np.linspace(0.5/npts, 1 - 0.5/npts, npts) # shuffling points for the simulation np.random.shuffle(randsim) # matrix containing standard Gauss distributed random numbers xi = norm().ppf(randsim) return xi random_field = Property(Array, depends_on='+modified') @cached_property def _get_random_field(self): '''simulates the Gaussian random field''' # evaluate the eigenvalues and eigenvectors of the autocorrelation matrix Lambda_C_sorted, Phi_C_sorted = self.eigenvalues # generate the RF with standardized Gaussian distribution ydata = np.dot(np.dot(Phi_C_sorted, (Lambda_C_sorted) ** 0.5), self.generated_random_vector) # transform the standardized Gaussian distribution if self.distr_type == 'Gauss': # scaling the std. distribution scaled_ydata = ydata * self.stdev + self.mean elif self.distr_type == 'Weibull': # setting Weibull params Pf = norm().cdf(ydata) scaled_ydata = weibull_min(self.shape, scale=self.scale, loc=self.loc).ppf(Pf) shape = tuple([len(grid_i) for grid_i in self.nDgrid]) rf = np.reshape(scaled_ydata, shape) return rf def interpolate_rf(self, coords): '''interpolate RF values using the EOLE method coords = list of 1d arrays of coordinates''' # check consistency of dimensions if len(coords) != len(self.nDgrid): raise ValueError('point dimension differs from random field dimension') # create the covariance matrix C_matrices = [self.acor(coords_i.reshape(1, len(coords_i)) - self.nDgrid[i].reshape(len(self.nDgrid[i]),1), self.lacor_arr[i]) for i, coords_i in enumerate(coords)] C_u = 1.0 for i, C_ui in enumerate(C_matrices): if i == 0: C_u *= C_ui else: C_u = C_u.reshape(C_u.shape[0], 1, C_u.shape[1]) * C_ui grid_size = 1.0 for j in np.arange(i+1): grid_size *= len(self.nDgrid[j]) C_u = C_u.reshape(grid_size,len(coords[0])) Lambda_Cx, Phi_Cx = self.eigenvalues # values interpolated in the standardized Gaussian rf u = np.sum(self.generated_random_vector / np.diag(Lambda_Cx) ** 0.5 * np.dot(C_u.T, Phi_Cx), axis=1) if self.distr_type == 'Gauss': scaled_u = u * self.stdev + self.mean elif self.distr_type == 'Weibull': Pf = norm().cdf(u) scaled_u = weibull_min(self.shape, scale=self.scale, loc=self.loc).ppf(Pf) return scaled_u
class YMBView2D(HasTraits): data = Instance(YMBData) zero = Constant(0) slider_max = Property() def _get_slider_max(self): return self.data.n_cuts - 1 var_enum = Trait('radius', var_dict, modified=True) cut_slider = Range('zero', 'slider_max', mode='slider', auto_set=False, enter_set=True, modified=True) circle_diameter = Float(20, enter_set=True, auto_set=False, modified=True) underlay = Bool(False, modified=True) variable = Property(Array, depends_on='var_enum') @cached_property def _get_variable(self): return getattr(self.data, self.var_enum_) figure = Instance(Figure) def _figure_default(self): figure = Figure() figure.add_axes([0.1, 0.1, 0.8, 0.8]) return figure data_changed = Event(True) @on_trait_change('+modified, data.input_changed') def _redraw(self): # TODO: set correct ranges, fix axis range (axes.xlim) self.figure.clear() self.figure.add_axes([0.1, 0.1, 0.8, 0.8]) figure = self.figure axes = figure.axes[0] axes.clear() y_arr, z_arr = self.data.cut_data[1:3] y_raw_arr, z_raw_arr = self.data.cut_raw_data[0:2] offset = hstack([0, self.data.cut_raw_data[5]]) scalar_arr = self.variable mask = y_arr[:, self.cut_slider] > -1 axes.scatter( y_raw_arr[offset[self.cut_slider]:offset[self.cut_slider + 1]], z_raw_arr[offset[self.cut_slider]:offset[self.cut_slider + 1]], s=self.circle_diameter, color='k', marker='x', label='identified filament in cut') scat = axes.scatter(y_arr[:, self.cut_slider][mask], z_arr[:, self.cut_slider][mask], s=self.circle_diameter, c=scalar_arr[:, self.cut_slider][mask], cmap=my_cmap_lin, label='connected filaments') axes.set_xlabel('$y\, [\mathrm{mm}]$', fontsize=16) axes.set_ylabel('$z\, [\mathrm{mm}]$', fontsize=16) axes.set_xlim([0, ceil(max(y_arr))]) axes.set_ylim([0, ceil(max(z_arr))]) axes.legend() figure.colorbar(scat) if self.underlay == True: axes.text(axes.get_xlim()[0], axes.get_ylim()[0], 'That\'s all at this moment :-)', color='red', fontsize=20) self.data_changed = True traits_view = View( Group( Item('var_enum'), Item('cut_slider', springy=True), Item('circle_diameter', springy=True), Item('underlay', springy=True), ), Item('figure', style='custom', editor=MPLFigureEditor(), show_label=False), resizable=True, )
class LS(HasTraits): '''Limit state class ''' # backward link to the info shell to access the # input data when calculating # the limit-state-specific values # ls_table = WeakRef # parameters of the limit state # dir = Enum(DIRLIST) stress_res = Enum(SRLIST) #------------------------------- # ls columns #------------------------------- # defined in the subclasses # ls_columns = List show_ls_columns = Bool(True) #------------------------------- # sr columns #------------------------------- # stress resultant columns - for ULS this is defined in the subclasses # sr_columns = List(['m', 'n']) show_sr_columns = Bool(True) # stress resultant columns - generated from the parameter combination # dir and stress_res - one of MX, NX, MY, NY # m_varname = Property(Str) def _get_m_varname(self): # e.g. mx_N appendix = self.dir + '_' + self.stress_res return 'm' + appendix n_varname = Property(Str) def _get_n_varname(self): # e.g. nx_N appendix = self.dir + '_' + self.stress_res return 'n' + appendix n = Property(Float) def _get_n(self): return getattr(self.ls_table, self.n_varname) m = Property(Float) def _get_m(self): return getattr(self.ls_table, self.m_varname) #------------------------------- # geo columns form info shell #------------------------------- geo_columns = List(['elem_no', 'X', 'Y', 'Z', 'D_elem']) show_geo_columns = Bool(True) elem_no = Property(Float) def _get_elem_no(self): return self.ls_table.elem_no X = Property(Float) def _get_X(self): return self.ls_table.X Y = Property(Float) def _get_Y(self): return self.ls_table.Y Z = Property(Float) def _get_Z(self): return self.ls_table.Z D_elem = Property(Float) def _get_D_elem(self): return self.ls_table.D_elem #------------------------------- # state columns form info shell #------------------------------- # state_columns = List( ['mx', 'my', 'mxy', 'nx', 'ny', 'nxy' ] ) state_columns = List([ 'mx', 'my', 'mxy', 'nx', 'ny', 'nxy', 'sigx_lo', 'sigy_lo', 'sigxy_lo', 'sig1_lo', 'sig2_lo', 'alpha_sig_lo', 'sigx_up', 'sigy_up', 'sigxy_up', 'sig1_up', 'sig2_up', 'alpha_sig_up', ]) show_state_columns = Bool(True) mx = Property(Float) def _get_mx(self): return self.ls_table.mx my = Property(Float) def _get_my(self): return self.ls_table.my mxy = Property(Float) def _get_mxy(self): return self.ls_table.mxy nx = Property(Float) def _get_nx(self): return self.ls_table.nx ny = Property(Float) def _get_ny(self): return self.ls_table.ny nxy = Property(Float) def _get_nxy(self): return self.ls_table.nxy # evaluate principal stresses # upper face: # sigx_up = Property(Float) def _get_sigx_up(self): return self.ls_table.sigx_up sigy_up = Property(Float) def _get_sigy_up(self): return self.ls_table.sigy_up sigxy_up = Property(Float) def _get_sigxy_up(self): return self.ls_table.sigxy_up sig1_up = Property(Float) def _get_sig1_up(self): return self.ls_table.sig1_up sig2_up = Property(Float) def _get_sig2_up(self): return self.ls_table.sig2_up alpha_sig_up = Property(Float) def _get_alpha_sig_up(self): return self.ls_table.alpha_sig_up # lower face: # sigx_lo = Property(Float) def _get_sigx_lo(self): return self.ls_table.sigx_lo sigy_lo = Property(Float) def _get_sigy_lo(self): return self.ls_table.sigy_lo sigxy_lo = Property(Float) def _get_sigxy_lo(self): return self.ls_table.sigxy_lo sig1_lo = Property(Float) def _get_sig1_lo(self): return self.ls_table.sig1_lo sig2_lo = Property(Float) def _get_sig2_lo(self): return self.ls_table.sig2_lo alpha_sig_lo = Property(Float) def _get_alpha_sig_lo(self): return self.ls_table.alpha_sig_lo #------------------------------- # ls table #------------------------------- # all columns associated with the limit state including the corresponding # stress resultants # columns = Property(List, depends_on='show_geo_columns, show_state_columns,\ show_sr_columns, show_ls_columns') @cached_property def _get_columns(self): columns = [] if self.show_geo_columns: columns += self.geo_columns if self.show_state_columns: columns += self.state_columns if self.show_sr_columns: columns += self.sr_columns if self.show_ls_columns: columns += self.ls_columns return columns # select column used for sorting the data in selected sorting order # sort_column = Enum(values='columns') def _sort_column_default(self): return self.columns[-1] sort_order = Enum('descending', 'ascending', 'unsorted') #------------------------------------------------------- # get the maximum value of the selected variable # 'max_in_column' of the current sheet (only one sheet) #------------------------------------------------------- # get the maximum value of the chosen column # max_in_column = Enum(values='columns') def _max_in_column_default(self): return self.columns[-1] max_value = Property(depends_on='max_in_column') def _get_max_value(self): col = getattr(self, self.max_in_column)[:, 0] return max(col) #------------------------------------------------------- # get the maximum value and the corresponding case of # the selected variable 'max_in_column' in all (!) sheets #------------------------------------------------------- max_value_all = Property(depends_on='max_in_column') def _get_max_value_all(self): return self.ls_table.max_value_and_case[ self.max_in_column]['max_value'] max_case = Property(depends_on='max_in_column') def _get_max_case(self): return self.ls_table.max_value_and_case[self.max_in_column]['max_case'] #------------------------------------------------------- # get ls_table for View #------------------------------------------------------- # stack columns together for table used by TabularEditor # ls_array = Property( Array, depends_on='sort_column, sort_order, show_geo_columns, \ show_state_columns, show_sr_columns, show_ls_columns' ) @cached_property def _get_ls_array(self): arr_list = [getattr(self, col) for col in self.columns] # get the array currently selected by the sort_column enumeration # sort_arr = getattr(self, self.sort_column)[:, 0] sort_idx = argsort(sort_arr) ls_array = hstack(arr_list) if self.sort_order == 'descending': return ls_array[sort_idx[::-1]] if self.sort_order == 'ascending': return ls_array[sort_idx] if self.sort_order == 'unsorted': return ls_array #--------------------------------- # plot outputs in mlab-window #--------------------------------- plot_column = Enum(values='columns') plot = Button def _plot_fired(self): X = self.ls_table.X[:, 0] Y = self.ls_table.Y[:, 0] Z = self.ls_table.Z[:, 0] plot_col = getattr(self, self.plot_column)[:, 0] if self.plot_column == 'n_tex': plot_col = where(plot_col < 0, 0, plot_col) mlab.figure(figure="SFB532Demo", bgcolor=(1.0, 1.0, 1.0), fgcolor=(0.0, 0.0, 0.0)) mlab.points3d( X, Y, (-1.0) * Z, plot_col, # colormap = "gist_rainbow", # colormap = "Reds", colormap="YlOrBr", mode="cube", scale_factor=0.15) mlab.scalarbar(title=self.plot_column, orientation='vertical') mlab.show # name of the trait that is used to assess the evaluated design # assess_name = Str('') #------------------------------- # ls group #------------------------------- # @todo: the dynamic selection of the columns to be displayed # does not work in connection with the LSArrayAdapter ls_group = VGroup( HGroup( #Item( 'assess_name' ), Item('max_in_column'), Item('max_value', style='readonly', format_str='%6.2f'), Item('max_value_all', style='readonly', format_str='%6.2f'), Item('max_case', style='readonly', label='found in case: '), ), HGroup( Item('sort_column'), Item('sort_order'), Item('show_geo_columns', label='show geo'), Item('show_state_columns', label='show state'), Item('show_sr_columns', label='show sr'), Item('plot_column'), Item('plot'), ), )
class ImageProcessing(HasTraits): def __init__(self, **kw): super(ImageProcessing, self).__init__(**kw) self.on_trait_change(self.refresh, '+params') self.refresh() image_path = Str def rgb2gray(self, rgb): return np.dot(rgb[..., :3], [0.299, 0.587, 0.144]) filter = Bool(False, params=True) block_size = Range(1, 100, params=True) offset = Range(1, 20, params=True) denoise = Bool(False, params=True) denoise_spatial = Range(1, 100, params=True) processed_image = Property def _get_processed_image(self): # read image image = mpimg.imread(self.image_path) mask = image[:, :, 1] > 150. image[mask] = 255. #plt.imshow(image) #plt.show() # convert to grayscale image = self.rgb2gray(image) # crop image image = image[100:1000, 200:1100] mask = mask[100:1000, 200:1100] image = image - np.min(image) image[mask] *= 255. / np.max(image[mask]) if self.filter == True: image = denoise_bilateral(image, sigma_spatial=self.denoise_spatial) if self.denoise == True: image = threshold_adaptive(image, self.block_size, offset=self.offset) return image, mask edge_detection_method = Enum('canny', 'sobel', 'roberts', params=True) canny_sigma = Range(2.832, 5, params=True) canny_low = Range(5.92, 100, params=True) canny_high = Range(0.1, 100, params=True) edges = Property def _get_edges(self): img_edg, mask = self.processed_image if self.edge_detection_method == 'canny': img_edg = canny(img_edg, sigma=self.canny_sigma, low_threshold=self.canny_low, high_threshold=self.canny_high) elif self.edge_detection_method == 'roberts': img_edg = roberts(img_edg) elif self.edge_detection_method == 'sobel': img_edg = sobel(img_edg) img_edg = img_edg > 0.0 return img_edg radii = Int(80, params=True) radius_low = Int(40, params=True) radius_high = Int(120, params=True) step = Int(2, params=True) hough_circles = Property def _get_hough_circles(self): hough_radii = np.arange(self.radius_low, self.radius_high, self.step)[::-1] hough_res = hough_circle(self.edges, hough_radii) centers = [] accums = [] radii = [] # For each radius, extract num_peaks circles num_peaks = 3 for radius, h in zip(hough_radii, hough_res): peaks = peak_local_max(h, num_peaks=num_peaks) centers.extend(peaks) print 'circle centers = ', peaks accums.extend(h[peaks[:, 0], peaks[:, 1]]) radii.extend([radius] * num_peaks) im = mpimg.imread(self.image_path) # crop image im = im[100:1000, 200:1100] for idx in np.arange(len(centers)): center_x, center_y = centers[idx] radius = radii[idx] cx, cy = circle_perimeter(center_y, center_x, radius) mask = (cx < im.shape[0]) * (cy < im.shape[1]) im[cy[mask], cx[mask]] = (220., 20., 20.) return im eval_edges = Button def _eval_edges_fired(self): edges = self.figure_edges edges.clear() axes_edges = edges.gca() axes_edges.imshow(self.edges, plt.gray()) self.data_changed = True eval_circles = Button def _eval_circles_fired(self): circles = self.figure_circles circles.clear() axes_circles = circles.gca() axes_circles.imshow(self.hough_circles, plt.gray()) self.data_changed = True figure = Instance(Figure) def _figure_default(self): figure = Figure(facecolor='white') return figure figure_edges = Instance(Figure) def _figure_edges_default(self): figure = Figure(facecolor='white') return figure figure_circles = Instance(Figure) def _figure_circles_default(self): figure = Figure(facecolor='white') return figure data_changed = Event def plot(self, fig, fig2): figure = fig figure.clear() axes = figure.gca() img, mask = self.processed_image axes.imshow(img, plt.gray()) def refresh(self): self.plot(self.figure, self.figure_edges) self.data_changed = True traits_view = View(HGroup( Group(Item('filter', label='filter'), Item('block_size'), Item('offset'), Item('denoise', label='denoise'), Item('denoise_spatial'), label='Filters'), Group( Item('figure', editor=MPLFigureEditor(), show_label=False, resizable=True), scrollable=True, label='Plot', ), ), Tabbed( VGroup(Item('edge_detection_method'), Item('canny_sigma'), Item('canny_low'), Item('canny_high'), Item('eval_edges', label='Evaluate'), Item('figure_edges', editor=MPLFigureEditor(), show_label=False, resizable=True), scrollable=True, label='Plot_edges'), ), Tabbed( VGroup(Item('radii'), Item('radius_low'), Item('radius_high'), Item('step'), Item('eval_circles'), Item('figure_circles', editor=MPLFigureEditor(), show_label=False, resizable=True), scrollable=True, label='Plot_circles'), ), id='imview', dock='tab', title='Image processing', scrollable=True, resizable=True, width=600, height=400)
class MushRoofModel(IBVModel): '''Basis Class for Mushroof models (mr_quarter, mr_one, mr_one_free, mr_two, mr_four) ''' #=========================================================================== # initial strain #=========================================================================== initial_strain_roof = False initial_strain_col = False alpha = Float(1.3e-5) t_up = Float(-100.) t_lo = Float(-100.) def temperature_strain_z(self, X_pnt, x_pnt): alpha, t_up, t_lo = self.alpha, self.t_up, self.t_lo delta_t = t_lo + (t_up - t_lo) * x_pnt[2] epsilon_0 = alpha * delta_t # return the initial volumetric strain tensor with n_dims return diag([epsilon_0 for i in range(3)]) #=========================================================================== # material model #=========================================================================== E_roof = Float(28700) # [MN/m^2] E_column = Float(32800) # [MN/m^2] E_cm for concrete C45/55 E_plate = Float(210000) # [MN/m^2] steel plate nu = Float(0.2) # [-] mats_roof = Property(Instance(MATS3DElastic), depends_on='+input') @cached_property def _get_mats_roof(self): if self.initial_strain_roof == True: return MATS3DElastic(E=self.E_roof, nu=self.nu, initial_strain=self.temperature_strain_z) else: return MATS3DElastic(E=self.E_roof, nu=self.nu) mats_column = Property(Instance(MATS3DElastic), depends_on='+input') @cached_property def _get_mats_column(self): if self.initial_strain_col == True: return MATS3DElastic(E=self.E_column, nu=self.nu, initial_strain=self.temperature_strain_z) else: return MATS3DElastic(E=self.E_column, nu=self.nu) mats_plate = Property(Instance(MATS3DElastic), depends_on='+input') @cached_property def _get_mats_plate(self): return MATS3DElastic(E=self.E_plate, nu=self.nu) #=========================================================================== # finite elements #=========================================================================== fe_linear_roof = Property(Instance(FETSEval, transient=True), depends_on='+input') def _get_fe_linear_roof(self): return FETS3D8H(mats_eval=self.mats_roof) fe_quad_serendipity_roof = Property(Instance(FETSEval, transient=True), depends_on='+input') def _get_fe_quad_serendipity_roof(self): return FETS3D8H20U(mats_eval=self.mats_roof) fe_quad_serendipity_column = Property(Instance(FETSEval, transient=True), depends_on='+input') def _get_fe_quad_serendipity_column(self): return FETS3D8H20U(mats_eval=self.mats_column) fe_linear_plate = Property(Instance(FETSEval, transient=True), depends_on='+input') def _get_fe_linear_plate(self): return FETS3D8H(mats_eval=self.mats_plate) fe_quad_serendipity_plate = Property(Instance(FETSEval, transient=True), depends_on='+input') def _get_fe_quad_serendipity_plate(self): return FETS3D8H20U(mats_eval=self.mats_plate) #=========================================================================== # geometric dimensions #=========================================================================== # dimensions of one quarter of the shell structure [m] # length_xy_quarter = Float(3.5, input=True) # , ps_levels = [4, 16, 5] ) length_z = Float(0.927, input=True) # , ps_levels = [1, 2, 1] ) # shell thickness is used only by option 'const_reinf_layer_elem' # t_shell = Float(0.06, input=True) # dimensions of the steel plate # t_plate = Float(0.03, unit='m', input=True) # dimensions of the column # NOTE: width of column at top is used also by option 'shift_elem' of 'HPShell' # width_top_col = Float(0.45, unit='m', input=True) width_bottom_col = Float(0.35, unit='m', input=True) # column length (from lowest point of the shell to the upper edge of the foundation: # h_col = Float(3.60, unit='m', input=True) # r_pipe = Float( 0.1, unit = 'm', input = True ) scalefactor_delta_h = Float(1.00, input=True) # [-] scalefactor_length_xy = Float(1.00, input=True) # [-] #----------------------------------------------------------------- # specify the relation of the total structure (in the 'mushroof'-model) # with respect to a quarter of one shell defined in 'HPShell' #----------------------------------------------------------------- # choose model and discretization for roof shell # mushroof_part = Enum('one', 'quarter', 'four', input=True) def _mushroof_part_default(self): return 'one' # @todo: add comment! # this is no input! # X0 = List([0, 0, 0], input=True) #----------------------------------------------------------------- # discretization #----------------------------------------------------------------- # shell discretization: # n_elems_xy_quarter = Int(10, input=True) n_elems_z = Int(1, input=True) # @todo: remove "+input", use mapped traits instead! # n_elems_xy_dict = Property(Dict, depends_on='+ps_levels, +input') def _get_n_elems_xy_dict(self): return { 'quarter': self.n_elems_xy_quarter, 'one': self.n_elems_xy_quarter * 2, # @todo: include "scale_size" parameter used by HPShell! 'detail': self.n_elems_xy_quarter * 2 } n_elems_xy = Property(Int, depends_on='+ps_levels, +input') def _get_n_elems_xy(self): # @todo: mushroff_part == "four" is not supported! (see HPShell) return int(self.n_elems_xy_dict[self.mushroof_part]) # column discretization: # n_elems_col_xy = Int(2, input=True) # , ps_levels = [5, 20, 3 ] ) #----------------------------------------------------------------- # option 'shift_elem' #----------------------------------------------------------------- # optional parameters # shift_elems = Bool(True, input=True) # set fixed points for shell discretization # NOTE: method is overwritten in subclass, e.g. 'MRtwo' # shift_array = Array # def _get_shift_array( self ): # return array( [[self.width_top_col / 2 ** 0.5, # self.width_top_col / 2 ** 0.5, # self.n_elems_col_xy / 2], ] ) #----------------------------------------------------------------- # option 'const_reinf_layer_elem' #----------------------------------------------------------------- # element thickness defined (e.g to 3 cm) for bottom and top layer of the roof # needed to simulate reinforced area of the shell for non-linear simulation # @todo: remove "+input" # const_reinf_layer_elem = Bool(False, input=True) #----------------------------------------------------------------- # geometric transformations #----------------------------------------------------------------- # @todo: remove! geo_transforme is performed in the subclass 'MRTwo' #=========================================================================== # evaluation #=========================================================================== tline = Instance(TLine) def _tline_default(self): return TLine(min=0.0, step=1.0, max=1.0) max_princ_stress = Instance(RTraceDomainListField) def _max_princ_stress_default(self): return RTraceDomainListField( name='max principal stress', idx=0, var='max_principle_sig', warp=True, # position = 'int_pnts', record_on='update', ) dof = Int(1) f_dof = Property(Instance(RTraceGraph), depends_on='+ps_levels, +input') def _get_f_dof(self): return RTraceGraph(name='Fi,right over u_right (iteration)', var_y='F_int', idx_y=self.dof, var_x='U_k', idx_x=self.dof + 2, record_on='update') sig_app = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_sig_app(self): return RTraceDomainListField( name='sig_app', # position='int_pnts', var='sig_app', record_on='update', ) eps_app = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_eps_app(self): return RTraceDomainListField( name='eps_app', # position = 'int_pnts', var='eps_app', record_on='update', ) u = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_u(self): return RTraceDomainListField( name='displacement', var='u', warp=True, record_on='update', ) damage = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_damage(self): return RTraceDomainListField(name='damage', var='omega_mtx', idx=0, warp=False, record_on='update') phi_pdc = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_phi_pdc(self): return RTraceDomainListField( name='principal damage', # position = 'int_pnts', var='phi_pdc', record_on='update', ) max_omega_i = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_max_omega_i(self): return RTraceDomainListField( name='max_omega_i', # position = 'int_pnts', var='max_omega_i', record_on='update', ) fracture_energy = Property(Instance(RTraceDomainListField), depends_on='+ps_levels, +input') @cached_property def _get_fracture_energy(self): return RTraceDomainListField( name='Fracture energy', # position = 'int_pnts', var='fracture_energy', record_on='update', ) # sorting force of slice by number of dofs internal forces # def sort_by_dofs(self, dofs, unsorted): """ for dof slices, the slice for one edge, results in a more dimensional array of form (a,b,c) where a = elements connected to slice b = nodes of each element c = degree of freedom at each node however the degree of freedoms are no more ordered in global order, but on the local element basis. therefore sum([:,:-1,:]) for the hinge force is not possible, sort_dofs solves the problem by sorting the values in respect to the dof number, which is ordered globally. """ if size(unsorted) != size(dofs): raise "--- array must have same size ---" # order of dofs order = argsort(dofs, axis=1)[:, :, 0] sorted = zeros_like(unsorted) for i, elem in enumerate(order): sorted[i] = unsorted[i][elem] return sorted
class CBClampedRandXi(RF): ''' Crack bridged by a fiber with constant frictional interface to rigid; free fiber end; ''' implements(IRF) title = Str('crack bridge with rigid matrix') tau = Float(2.5, auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm']) r = Float(0.013, auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm'], desc='fiber radius') E_f = Float(72e3, auto_set=False, enter_set=True, input=True, distr=['uniform']) m = Float(5., auto_set=False, enter_set=True, input=True, distr=['uniform']) sV0 = Float(3.e-3, auto_set=False, enter_set=True, input=True, distr=['uniform']) V_f = Float(0.0175, auto_set=False, enter_set=True, input=True, distr=['uniform']) lm = Float(np.inf, auto_set=False, enter_set=True, input=True, distr=['uniform']) w = Float(auto_set=False, enter_set=True, input=True, distr=['uniform'], desc='crack width', ctrl_range=(0.0, 1.0, 10)) x_label = Str('crack opening [mm]') y_label = Str('composite stress [MPa]') C_code = Str('') pullout = Bool(False) def mu_broken(self, e, depsf, r, lm, m, sV0, mask): n = 200 shape = [1] * len(mask.shape) + [n] fact = np.linspace(0.0, 1.0, n).reshape(tuple(shape)) e = e.reshape(tuple(list(e.shape) + [1])) if isinstance(depsf, np.ndarray): depsf = depsf.reshape(tuple(list(depsf.shape) + [1])) if isinstance(r, np.ndarray): r = r.reshape(tuple(list(r.shape) + [1])) e_arr = e * fact a0 = (e_arr+1e-15)/depsf mask = a0 < lm/2.0 pdf = np.gradient(self.cdf(e_arr, depsf, r, lm, m, sV0), e/float(n)) if isinstance(pdf, list): pdf = pdf[-1] e_broken = e_arr/(m+1) * mask + e_arr / 2. * (mask == False) return np.trapz(np.nan_to_num(pdf) * e_broken, e_arr) def cdf(self, e, depsf, r, lm, m, sV0): '''weibull_fibers_cdf_mc''' s = ((depsf * (m+1) * sV0**m)/(2. * pi * r ** 2))**(1./(m+1)) a0 = (e+1e-15)/depsf mask = a0 < lm/2.0 ef0cb = e * mask ef0lin = e * (mask == False) Gxi_deb = 1 - np.exp(-(ef0cb/s)**(m+1)) Gxi_clamp = 1 - np.exp(-(ef0lin/s)**(m+1) * (1-(1-lm/(2.*(ef0lin/depsf)))**(m+1))) return np.nan_to_num(Gxi_deb) + np.nan_to_num(Gxi_clamp) def __call__(self, w, tau, E_f, V_f, r, m, sV0, lm): '''free and fixed fibers combined the failure probability of fixed fibers is evaluated by integrating only between -lm/2 and lm/2. ''' T = 2. * tau / r + 1e-10 k = np.sqrt(T/E_f) ef0cb = k*np.sqrt(w) ef0lin = w/lm + T*lm/4./E_f depsf = T/E_f a0 = ef0cb/depsf mask = a0 < lm/2.0 e = ef0cb * mask + ef0lin * (mask == False) Gxi = self.cdf(e, depsf, r, lm, m, sV0) mu_int = e * (1-Gxi) if self.pullout: mu_broken = self.mu_broken(e, depsf, r, lm, m, sV0, mask) return (mu_int + mu_broken) * E_f * V_f * r**2 else: return mu_int * E_f * V_f * r**2 def free_deb(self, w, tau, E_f, V_f, r, m, sV0): '''free debonding only = __call__ with lm=infty''' T = 2. * tau / r #scale parameter with respect to a reference volume s = ((T * (m+1) * sV0**m)/(2. * E_f * pi * r ** 2))**(1./(m+1)) ef0 = np.sqrt(w*T/E_f) Gxi = 1 - np.exp(-(ef0/s)**(m+1)) mu_int = ef0 * (1-Gxi) I = s * gamma(1 + 1./(m+1)) * gammainc(1 + 1./(m+1), (ef0/s)**(m+1)) mu_broken = I / (m+1) if self.pullout: return (mu_int + mu_broken) * E_f * V_f * r**2 else: return mu_int * E_f * V_f * r**2
class YMBHist(HasTraits): slider = Instance(YMBSlider) figure = Instance(Figure) bins = Int(20, auto_set=False, enter_set=True, modified=True) xlimit_on = Bool(False, modified=True) ylimit_on = Bool(False, modified=True) xlimit = Float(100, auto_set=False, enter_set=True, modified=True) ylimit = Float(100, auto_set=False, enter_set=True, modified=True) multi_hist_on = Bool(False, modified=True) stats_on = Bool(False, modified=True) normed_on = Bool(False, modified=True) normed_hist = Property(depends_on='+modified, slider.input_change') @cached_property def _get_normed_hist(self): data = self.slider.stat_data h, b = histogram(data, bins=self.bins, normed=True) return h, b range = Property def _get_range(self): h, b = self.normed_hist return (min(b), max(b)) bin_width = Property def _get_bin_width(self): return (self.range[1] - self.range[0]) / self.bins def _figure_default(self): figure = Figure() figure.add_axes(self.axes_adjust) return figure edge_color = Str(None) face_color = Str(None) axes_adjust = List([0.1, 0.1, 0.8, 0.8]) data_changed = Event(True) @on_trait_change('+modified, slider.input_change') def _redraw(self): figure = self.figure axes = figure.axes[0] axes.clear() if self.multi_hist_on == True: histtype = 'step' lw = 3 plot_data = getattr(self.slider.data, self.slider.var_enum_) for i in range(0, plot_data.shape[1]): axes.hist(plot_data[:, i].compressed(), bins=self.bins, histtype=histtype, color='gray') if self.multi_hist_on == False: histtype = 'bar' lw = 1 var_data = self.slider.stat_data axes.hist(var_data, bins=self.bins, histtype=histtype, linewidth=lw, \ normed=self.normed_on, edgecolor=self.edge_color, facecolor=self.face_color) if self.stats_on == True: xint = axes.xaxis.get_view_interval() yint = axes.yaxis.get_view_interval() axes.text( xint[0], yint[0], 'mean = %e, std = %e' % (mean(var_data), sqrt(var(var_data)))) # redefine xticks labels # inter = axes.xaxis.get_view_interval() # axes.set_xticks( linspace( inter[0], inter[1], 5 ) ) axes.set_xlabel(self.slider.var_enum) axes.set_ylabel('frequency') # , fontsize = 16 setp(axes.get_xticklabels(), position=(0, -.025)) if self.xlimit_on == True: axes.set_xlim(0, self.xlimit) if self.ylimit_on == True: axes.set_ylim(0, self.ylimit) self.data_changed = True view = View( Group( Item('figure', style='custom', editor=MPLFigureEditor(), show_label=False, id='figure.view'), HGroup( Item('bins'), Item('ylimit_on', label='Y limit'), Item('ylimit', enabled_when='ylimit_on == True', show_label=False), Item('stats_on', label='stats'), Item('normed_on', label='norm'), Item('multi_hist_on', label='multi')), label='histogram', dock='horizontal', id='yarn_hist.figure', ), Group( Item('slider', style='custom', show_label=False), label='yarn data', dock='horizontal', id='yarn_hist.config', ), id='yarn_structure_view', resizable=True, scrollable=True, # width = 0.8, # height = 0.4 )
class RIDVariable(HasTraits): """ Association between a random variable and distribution. """ title = Str('RIDvarible') s = WeakRef rf = WeakRef n_int = Int(20, enter_set=True, auto_set=False, desc='Number of integration points') def _n_int_changed(self): if self.pd: self.pd.n_segments = self.n_int # should this variable be randomized random = Bool(False, randomization_changed=True) def _random_changed(self): # get the default distribution if self.random: self.s.rv_dict[self.varname] = RV(pd=self.pd, name=self.varname, n_int=self.n_int) else: del self.s.rv_dict[self.varname] # name of the random variable (within the response function) # varname = String source_trait = Trait trait_value = Float pd = Property(Instance(IPDistrib), depends_on='random') @cached_property def _get_pd(self): if self.random: tr = self.rf.trait(self.varname) pd = PDistrib(distr_choice=tr.distr[0], n_segments=self.n_int) trait = self.rf.trait(self.varname) # get the distribution parameters from the metadata # distr_params = { 'scale': trait.scale, 'loc': trait.loc, 'shape': trait.shape } dparams = {} for key, val in list(distr_params.items()): if val: dparams[key] = val pd.distr_type.set(**dparams) return pd else: return None value = Property def _get_value(self): if self.random: return '' else: return '%g' % self.trait_value # -------------------------------------------- # default view specification def default_traits_view(self): return View(HGroup(Item( 'n_int', visible_when='random', label='NIP', ), Spring(), show_border=True, label='Variable name: %s' % self.varname), Item('pd@', show_label=False), resizable=True, id='rid_variable', height=800)
class RandomField(HasStrictTraits): '''Class for generating a 1D random field by scaling a standardized normally distributed random field. The random field array is stored in the property random_field. Gaussian or Weibull local distributions are available. The parameters of the Weibull random field are related to the minimum extreme along the whole length of the field. ''' # Parameters to be set lacor = Float(1., auto_set=False, enter_set=True, desc='autocorrelation length', modified=True) nsim = Int(1, auto_set=False, enter_set=True, desc='No of fields to be simulated', modified=True) mean = Float(0, auto_set=False, enter_set=True, desc='mean value', modified=True) stdev = Float(1., auto_set=False, enter_set=True, desc='standard deviation', modified=True) shape = Float(10., auto_set=False, enter_set=True, desc='shape for Weibull distr', modified=True) scale = Float(5., auto_set=False, enter_set=True, desc='scale for Weibull distr. corresp. to a length < lacor', modified=True) loc = Float(auto_set=False, enter_set=True, desc='location for 3 params weibull', modified=True) length = Float(1000., auto_set=False, enter_set=True, desc='length of the random field', modified=True) nx = Int(200, auto_set=False, enter_set=True, desc='number of discretization points', modified=True) non_negative_check = False reevaluate = Event seed = Bool(False) distr_type = Enum('Weibull', 'Gauss', modified=True) xgrid = Property(Array, depends_on='length,nx') @cached_property def _get_xgrid(self): '''get the discretized grid for the random field''' return np.linspace(0, self.length, self.nx) gridpoint_scale = Property(depends_on='scale,shape,length,nx,lacor') @cached_property def _get_gridpoint_scale(self): '''Scaling of the defined distribution to the distribution of a single grid point. This option is only available for Weibull random field''' delta_x = self.xgrid[1] - self.xgrid[0] return self.scale * (self.lacor / (delta_x + self.lacor)) ** (-1. / self.shape) def acor(self, dx, lcorr): '''autocorrelation function''' return e ** (-(dx / lcorr) ** 2) eigenvalues = Property(depends_on='lacor,length,nx') @cached_property def _get_eigenvalues(self): '''evaluates the eigenvalues and eigenvectors of the autocorrelation matrix''' # creating a symm. toeplitz matrix with (xgrid, xgrid) data points Rdist = toeplitz(self.xgrid, self.xgrid) # apply the autocorrelation func. to get the correlation matrix R = self.acor(Rdist, self.lacor) # evaluate the eigenvalues and eigenvectors of the autocorrelation matrix print 'evaluating eigenvalues for random field...' eigenvalues = eig(R) print 'complete' return eigenvalues random_field = Property(Array, depends_on='+modified, reevaluate') @cached_property def _get_random_field(self): if self.seed == True: np.random.seed(101) '''simulates the Gaussian random field''' # evaluate the eigenvalues and eigenvectors of the autocorrelation matrix _lambda, phi = self.eigenvalues # simulation points from 0 to 1 with an equidistant step for the LHS randsim = linspace(0, 1, len(self.xgrid) + 1) - 0.5 / (len(self.xgrid)) randsim = randsim[1:] # shuffling points for the simulation shuffle(randsim) # matrix containing standard Gauss distributed random numbers xi = transpose(ones((self.nsim, len(self.xgrid))) * array([ norm().ppf(randsim) ])) # eigenvalue matrix LAMBDA = eye(len(self.xgrid)) * _lambda # cutting out the real part ydata = dot(dot(phi, (LAMBDA) ** 0.5), xi).real if self.distr_type == 'Gauss': # scaling the std. distribution scaled_ydata = ydata * self.stdev + self.mean elif self.distr_type == 'Weibull': # setting Weibull params Pf = norm().cdf(ydata) scaled_ydata = weibull_min(self.shape, scale=self.scale, loc=self.loc).ppf(Pf) self.reevaluate = False rf = reshape(scaled_ydata, len(self.xgrid)) if self.non_negative_check == True: if (rf < 0).any(): raise ValueError, 'negative value(s) in random field' return rf view_traits = View(Item('lacor'), Item('nsim'), Item('shape'), Item('scale'), Item('length'), Item('nx'), Item('distr_type'), )
class YMBFieldVar(HasTraits): data = Instance(IYMBData) n_cols = Property() def _get_n_cols(self): return self.data.n_cuts var_enum = Trait('radius', var_dict, modified=True) scalar_arr = Property(depends_on='var_enum') def _get_scalar_arr(self): return getattr(self.data, self.var_enum_) sorted_on = Bool(False, modified=True) scalar_arr_sorted = Property(depends_on='var_enum') def _get_scalar_arr_sorted(self): ''' Return scalar array sorted by the shortest distance from the edge ''' scalar_arr = zeros_like(getattr(self.data, self.var_enum_)) scalar_mask_arr = zeros_like(getattr(self.data, self.var_enum_)) distance_arr = self.data.edge_distance.filled() for i in range(0, self.n_cols): scalar_mask_arr[:, i] = zip( *sorted(zip(distance_arr[:, i], getattr(self.data, self.var_enum_).mask[:, i]), reverse=True))[1] scalar_arr[:, i] = zip( *sorted(zip(distance_arr[:, i], getattr(self.data, self.var_enum_).filled()[:, i]), reverse=True))[1] return ma.array(scalar_arr, mask=array(scalar_mask_arr, dtype=bool)) figure = Instance(Figure, ()) def _figure_default(self): figure = Figure() figure.add_axes([0.1, 0.1, 0.8, 0.8]) return figure data_changed = Event(True) @on_trait_change('+modified, data') def _redraw(self): self.figure.clear() self.figure.add_axes([0.1, 0.1, 0.8, 0.8]) figure = self.figure axes = figure.axes[0] axes.clear() if self.sorted_on == True: scalar_arr = self.scalar_arr_sorted else: scalar_arr = self.scalar_arr xi = linspace(min(self.data.cut_x), max(self.data.cut_x), 100) x = (ones_like(scalar_arr) * self.data.cut_x).flatten() ny_row = scalar_arr.shape[0] dy = max(diff(self.data.cut_x)) yi = linspace(0, ny_row * dy, ny_row) y = (ones_like(scalar_arr).T * linspace(0, ny_row * dy, ny_row)).T.flatten() z = scalar_arr.flatten() zi = griddata(x, y, z, xi, yi, interp='nn') # contour the gridded data, plotting dots at the nonuniform data points # axes.contour( xi, yi, zi, 20, linewidths = .5, colors = 'k' ) # plotting filled contour axes.contourf(xi, yi, zi, 200, cmap=my_cmap_lin) # my_cmap_lin scat = axes.scatter(x, y, marker='o', c=z, s=20, linewidths=0, cmap=my_cmap_lin) figure.colorbar(scat) self.data_changed = True view = View('var_enum', 'sorted_on', Item('figure', style='custom', editor=MPLFigureEditor(), show_label=False), id='yarn_structure_view', resizable=True, scrollable=True, dock='tab', width=0.8, height=0.4)
class SPIRRID(HasTraits): '''Multidimensional statistical integration. Its name SPIRRID is an acronym for Set of Parallel Independent Random Responses with Identical Distributions The package implements the evaluation of an integral over a set of random variables affecting a response function RF and distributed according to a probabilistic distribution PDistrib. The input parameters are devided in four categories in order to define state consistency of the evaluation. The outputs are define as cached properties that are reevaluated in response to changes in the inputs. The following events accummulate changes in the input parameters of spirrid: rf_change - change in the response function rand_change - change in the randomization conf_change - change in the configuration of the algorithm eps_change - change in the studied range of the process control variable ''' #-------------------------------------------------------------------- # Response function #-------------------------------------------------------------------- # rf = Instance(IRF) def _rf_changed(self): self.on_trait_change(self._set_rf_change, 'rf.changed') self.rv_dict = {} #-------------------------------------------------------------------- # Specification of random parameters #-------------------------------------------------------------------- # rv_dict = Dict def add_rv(self, variable, distribution='uniform', loc=0., scale=1., shape=1., n_int=30): '''Declare a variable as random ''' if variable not in self.rf.param_keys: raise AssertionError('parameter %s not defined by the response function' \ % variable) params_with_distr = self.rf.traits( distr=lambda x: type(x) == ListType and distribution in x) if variable not in params_with_distr: raise AssertionError('distribution type %s not allowed for parameter %s' \ % ( distribution, variable )) # @todo - let the RV take care of PDistrib specification. # isolate the dirty two-step definition of the distrib from spirrid # pd = PDistrib(distr_choice=distribution, n_segments=n_int) pd.distr_type.set(scale=scale, shape=shape, loc=loc) self.rv_dict[variable] = RV(name=variable, pd=pd, n_int=n_int) def del_rv(self, variable): '''Delete declaration of random variable ''' del self.rv_dict[variable] def clear_rv(self): self.rv_dict = {} # subsidiary methods for sorted access to the random variables. # (note dictionary has not defined order of its items) rv_keys = Property(List, depends_on='rv_dict') @cached_property def _get_rv_keys(self): rv_keys = sorted(self.rv_dict.keys()) # the random variable gets an index based on the # sorted keys for idx, key in enumerate(rv_keys): self.rv_dict[key].idx = idx return rv_keys rv_list = Property(List, depends_on='rv_dict') @cached_property def _get_rv_list(self): return list(map(self.rv_dict.get, self.rv_keys)) #-------------------------------------------------------------------- # Define which changes in the response function and in the # statistical parameters are relevant for reevaluation of the response #-------------------------------------------------------------------- rf_change = Event @on_trait_change('rf.changed') def _set_rf_change(self): self.rf_change = True rand_change = Event @on_trait_change('rv_dict, rv_dict.changed') def _set_rand_change(self): self.rand_change = True conf_change = Event @on_trait_change('+alg_option') def _set_conf_change(self): self.conf_change = True eps_change = Event @on_trait_change('+eps_range') def _set_eps_change(self): self.eps_change = True # Dictionary with key = rf parameters # and values = default param values for the resp func # param_dict = Property(Dict, depends_on='rf_change, rand_change') @cached_property def _get_param_dict(self): '''Gather all the traits with the metadata distr specified. ''' dict = {} for name, value in zip(self.rf.param_keys, self.rf.param_values): rv = self.rv_dict.get(name, None) if rv == None: dict[name] = value else: dict[name] = self.theta_ogrid[rv.idx] return dict ##### - experimental ##### # @deprecated: ful coverage of the sampling domain - for orientation full_theta_arr_list = Property(depends_on='rf_change, rand_change') @cached_property def _get_full_theta_arr_list(self): '''Get list of arrays with both deterministic and statistic arrays. ''' param_arr_list = [ array([value], dtype='float_') for value in self.rf.param_values ] for idx, name in enumerate(self.rf.param_keys): rv = self.rv_dict.get(name, None) if rv: param_arr_list[idx] = rv.theta_arr return param_arr_list def get_rvs_theta_arr(self, n_samples): rvs_theta_arr = array( [repeat(value, n_samples) for value in self.rf.param_values]) for idx, name in enumerate(self.rf.param_keys): rv = self.rv_dict.get(name, None) if rv: rvs_theta_arr[idx, :] = rv.get_rvs_theta_arr(n_samples) return rvs_theta_arr # Constant parameters # const_param_dict = Property(Dict, depends_on='rf_change, rand_change') @cached_property def _get_const_param_dict(self): const_param_dict = {} for name, v in zip(self.rf.param_keys, self.rf.param_values): if name not in self.rv_keys: const_param_dict[name] = v return const_param_dict # List of discretized statistical domains # theta_arr_list = Property(depends_on='rf_change, rand_change') @cached_property def _get_theta_arr_list(self): '''Get list of arrays with discretized RVs. ''' return [rv.theta_arr for rv in self.rv_list] # Discretized statistical domain # theta_ogrid = Property(depends_on='rf_change, rand_change') @cached_property def _get_theta_ogrid(self): '''Get orthogonal list of arrays with discretized RVs. ''' return orthogonalize(self.theta_arr_list) #--------------------------------------------------------------------------------- # PDF arrays oriented in enumerated dimensions - broadcasting possible #--------------------------------------------------------------------------------- pdf_ogrid = Property(depends_on='rf_change, rand_change') @cached_property def _get_pdf_ogrid(self): '''Get orthogonal list of arrays with PDF values of RVs. ''' pdf_arr_list = [rv.pdf_arr for rv in self.rv_list] return orthogonalize(pdf_arr_list) #--------------------------------------------------------------------------------- # PDF * Theta arrays oriented in enumerated dimensions - broadcasting possible #--------------------------------------------------------------------------------- dG_ogrid = Property(depends_on='rf_change, rand_change') @cached_property def _get_dG_ogrid(self): '''Get orthogonal list of arrays with PDF * Theta product of. ''' dG_arr_list = [rv.dG_arr for rv in self.rv_list] return orthogonalize(dG_arr_list) #--------------------------------------------------------------------------------- # PDF grid - mutually multiplied arrays of PDF #--------------------------------------------------------------------------------- dG_grid = Property(depends_on='rf_change, rand_change') @cached_property def _get_dG_grid(self): if len(self.dG_ogrid): return reduce(lambda x, y: x * y, self.dG_ogrid) else: return 1.0 #------------------------------------------------------------------------------------ # Configuration of the algorithm #------------------------------------------------------------------------------------ # # cached_dG_grid: # If set to True, the cross product between the pdf values of all random variables # will be precalculated and stored in an n-dimensional grid # otherwise the product is performed for every epsilon in the inner loop anew # cached_dG = Bool(True, alg_option=True) # compiled_eps_loop: # If set True, the loop over the control variable epsilon is compiled # otherwise, python loop is used. compiled_eps_loop = Bool(True, alg_option=True) # compiled_QdG_loop: # If set True, the integration loop over the product between the response function # and the pdf . theta product is performed in c # otherwise the numpy arrays are used. compiled_QdG_loop = Bool(True, alg_option=True) def _compiled_QdG_loop_changed(self): '''If the inner loop is not compiled, the outer loop must not be compiled as well. ''' if self.compiled_QdG_loop == False: self.compiled_eps = False arg_list = Property(depends_on='rf_change, rand_change, conf_change') @cached_property def _get_arg_list(self): arg_list = [] # create argument string for inline function if self.compiled_eps_loop: arg_list += ['mu_q_arr', 'e_arr'] else: arg_list.append('e') arg_list += ['%s_flat' % name for name in self.rv_keys] if self.cached_dG: arg_list += ['dG_grid'] else: arg_list += ['%s_pdf' % name for name in self.rv_keys] return arg_list C_code_qg = Property(depends_on='rf_change, rand_change, conf_change') @cached_property def _get_C_code_qg(self): if self.cached_dG: # q_g - blitz matrix used to store the grid code_str = '\tdouble pdf = dG_grid(' + \ ','.join( [ 'i_%s' % name for name in self.rv_keys ] ) + \ ');\n' else: # qg code_str = '\tdouble pdf = ' + \ '*'.join( [ ' *( %s_pdf + i_%s)' % ( name, name ) for name in self.rv_keys ] ) + \ ';\n' return code_str #------------------------------------------------------------------------------------ # Configurable generation of C-code for mean curve evaluation #------------------------------------------------------------------------------------ C_code = Property( depends_on='rf_change, rand_change, conf_change, eps_change') @cached_property def _get_C_code(self): code_str = '' if self.compiled_eps_loop: # create code string for inline function # code_str += 'for( int i_eps = 0; i_eps < %g; i_eps++){\n' % self.n_eps if self.cached_dG: # multidimensional index needed for dG_grid # use blitz arrays must be used also for other arrays # code_str += 'double eps = e_arr( i_eps );\n' else: # pointer access possible for single dimensional arrays # use the pointer arithmetics for accessing the pdfs code_str += '\tdouble eps = *( e_arr + i_eps );\n' else: # create code string for inline function # code_str += 'double eps = e;\n' code_str += 'double mu_q(0);\n' code_str += 'double q(0);\n' code_str += '#line 100\n' # create code for constant params for name, value in list(self.const_param_dict.items()): code_str += 'double %s = %g;\n' % (name, value) # generate loops over random params for rv in self.rv_list: name = rv.name n_int = rv.n_int # create the loop over the random variable # code_str += 'for( int i_%s = 0; i_%s < %g; i_%s++){\n' % ( name, name, n_int, name) if self.cached_dG: # multidimensional index needed for pdf_grid - use blitz arrays # code_str += '\tdouble %s = %s_flat( i_%s );\n' % (name, name, name) else: # pointer access possible for single dimensional arrays # use the pointer arithmetics for accessing the pdfs code_str += '\tdouble %s = *( %s_flat + i_%s );\n' % ( name, name, name) if len(self.rv_keys) > 0: code_str += self.C_code_qg code_str += self.rf.C_code + \ '// Store the values in the grid\n' + \ '\tmu_q += q * pdf;\n' else: code_str += self.rf.C_code + \ '\tmu_q += q;\n' # close the random loops # for name in self.rv_keys: code_str += '};\n' if self.compiled_eps_loop: if self.cached_dG: # blitz matrix code_str += 'mu_q_arr(i_eps) = mu_q;\n' else: code_str += '*(mu_q_arr + i_eps) = mu_q;\n' code_str += '};\n' else: code_str += 'return_val = mu_q;' return code_str eps_grid_shape = Property(depends_on='eps_change') @cached_property def _get_eps_grid_shape(self): return tuple([len(eps) for eps in self.eps_list]) eps_list = Property(depends_on='eps_change') @cached_property def _get_eps_list(self): ctrl_list = self.rf.ctrl_traits # generate the slices to produce the grid of the control values eps_list = [linspace(*cv.ctrl_range) for cv in ctrl_list] # produce the tuple of expanded arrays with n-dimensions - values broadcasted return eps_list eps_grid = Property(depends_on='eps_change') @cached_property def _get_eps_grid(self): '''Generate the grid of control variables. The array can be multidimensional depending on the dimension of the input variable of the current response function. ''' ctrl_list = self.rf.ctrl_traits # generate the slices to produce the grid of the control values slices = [ slice(cv.ctrl_range[0], cv.ctrl_range[1], complex(0, cv.ctrl_range[2])) for cv in ctrl_list ] # produce the tuple of expanded arrays with n-dimensions - values broadcasted return mgrid[tuple(slices)] eps_arr = Property(depends_on='eps_change') @cached_property def _get_eps_arr(self): ''' flatten the arrays and order them as columns of an array containing all combinations of the control variable values. ''' return c_[tuple([eps_arr.flatten() for eps_arr in self.eps_grid])] compiler_verbose = Int(0) compiler = Str('gcc') def _eval(self): '''Evaluate the integral based on the configuration of algorithm. ''' if self.cached_dG == False and self.compiled_QdG_loop == False: raise NotImplementedError( 'Configuration for pure Python integration is too slow and is not implemented' ) self._set_compiler() # prepare the array of the control variable discretization # eps_arr = self.eps_arr mu_q_arr = zeros((eps_arr.shape[0], ), dtype='float_') # prepare the parameters for the compiled function in # a separate dictionary c_params = {} if self.compiled_eps_loop: # for compiled eps_loop the whole input and output array must be passed to c # c_params['e_arr'] = eps_arr c_params['mu_q_arr'] = mu_q_arr #c_params['n_eps' ] = n_eps if self.compiled_QdG_loop: # prepare the lengths of the arrays to set the iteration bounds # for rv in self.rv_list: c_params['%s_flat' % rv.name] = rv.theta_arr if len(self.rv_list) > 0: if self.cached_dG: c_params['dG_grid'] = self.dG_grid else: for rv in self.rv_list: c_params['%s_pdf' % rv.name] = rv.dG_arr else: c_params['dG_grid'] = self.dG_grid if self.cached_dG: conv = converters.blitz else: conv = converters.default t = time.clock() if self.compiled_eps_loop: # C loop over eps, all inner loops must be compiled as well # inline(self.C_code, self.arg_list, local_dict=c_params, type_converters=conv, compiler=self.compiler, verbose=self.compiler_verbose) else: # Python loop over eps # for idx, e in enumerate(eps_arr): if self.compiled_QdG_loop: # C loop over random dimensions # c_params['e'] = e # prepare the parameter mu_q = inline(self.C_code, self.arg_list, local_dict=c_params, type_converters=conv, compiler=self.compiler, verbose=self.compiler_verbose) else: # Numpy loops over random dimensions # # get the rf grid for all combinations of # parameter values # Q_grid = self.rf(*e, **self.param_dict) # multiply the response grid with the contributions # of pdf distributions (weighted by the delta of the # random variable disretization) # Q_grid *= self.dG_grid # sum all the values to get the integral mu_q = sum(Q_grid) # add the value to the return array mu_q_arr[idx] = mu_q duration = time.clock() - t return mu_q_arr, duration def eval_i_dG_grid(self): '''Get the integral of the pdf * theta grid. ''' return sum(self.dG_grid) def _eval_mu_q(self): # configure eval and call it pass def _eval_stdev_q(self): # configure eval and call it pass #-------------------------------------------------------------------------------------------- # Numpy implementation #-------------------------------------------------------------------------------------------- def get_rf(self, eps): ''' Numpy based evaluation of the response function. ''' return self.rf(eps, **self.param_dict) #--------------------------------------------------------------------------------------------- # Output properties #--------------------------------------------------------------------------------------------- # container for the data obtained in the integration # # This is not only the mean curve but also stdev and # execution statistics. Such an implementation # concentrates the critical part of the algorithmic # evaluation and avoids duplication of code and # repeated calls. The results are cached in the tuple. # They are accessed by the convenience properties defined # below. # results = Property( depends_on='rf_change, rand_change, conf_change, eps_change') @cached_property def _get_results(self): return self._eval() #--------------------------------------------------------------------------------------------- # Output accessors #--------------------------------------------------------------------------------------------- # the properties that access the cached results and give them a name mu_q_arr = Property() def _get_mu_q_arr(self): return self.results[0] mu_q_grid = Property() def _get_mu_q_grid(self): return self.mu_q_arr.reshape(self.eps_grid_shape) exec_time = Property() def _get_exec_time(self): '''Execution time of the last evaluation. ''' return self.results[1] mean_curve = Property() def _get_mean_curve(self): '''Mean response curve. ''' return MFnLineArray(xdata=self.eps_arr, ydata=self.mu_q_arr) mu_q_peak_idx = Property() def _get_mu_q_peak_idx(self): '''Get mean peak response value''' return argmax(self.mu_q_arr) mu_q_peak = Property() def _get_mu_q_peak(self): '''Get mean peak response value''' return self.mu_q_arr[self.mu_q_peak_idx] eps_at_peak = Property() def _get_eps_at_peak(self): '''Get strain at maximum middle response mu_q ''' return self.eps_arr[self.mu_q_peak_idx] stdev_mu_q_peak = Property() def _get_stdev_mu_q_peak(self): ''' Numpy based evaluation of the time integral. ''' mu_q_peak = self.mu_q_peak eps_at_peak = self.eps_at_peak q_quad_grid = self.get_rf(eps_at_peak)**2 q_quad_grid *= self.dG_grid q_quad_peak = sum(q_quad_grid) stdev_mu_q_peak = sqrt(q_quad_peak - mu_q_peak**2) return stdev_mu_q_peak #--------------------------------------------------------------------------------------------- # Auxiliary methods #--------------------------------------------------------------------------------------------- def _set_compiler(self): '''Catch eventual mismatch between scipy.weave and compiler ''' try: uname = os.uname()[3] except: # it is not Linux - just let it go and suffer return #if self.compiler == 'gcc': #os.environ['CC'] = 'gcc-4.1' #os.environ['CXX'] = 'g++-4.1' #os.environ['OPT'] = '-DNDEBUG -g -fwrapv -O3' traits_view = View( Item('rf@', show_label=False), width=0.3, height=0.3, resizable=True, scrollable=True, )
class ExpBT3PT(ExType): '''Experiment: Bending Test Three Point ''' # label = Str('three point bending test') implements(IExType) file_ext = 'raw' #-------------------------------------------------------------------- # register a change of the traits with metadata 'input' #-------------------------------------------------------------------- input_change = Event @on_trait_change('+input, ccs.input_change, +ironing_param') def _set_input_change(self): self.input_change = True #-------------------------------------------------------------------------------- # specify inputs: #-------------------------------------------------------------------------------- length = Float(0.46, unit='m', input=True, table_field=True, auto_set=False, enter_set=True) width = Float(0.1, unit='m', input=True, table_field=True, auto_set=False, enter_set=True) thickness = Float(0.02, unit='m', input=True, table_field=True, auto_set=False, enter_set=True) # age of the concrete at the time of testing age = Int(33, unit='d', input=True, table_field=True, auto_set=False, enter_set=True) loading_rate = Float(4.0, unit='mm/min', input=True, table_field=True, auto_set=False, enter_set=True) #-------------------------------------------------------------------------- # composite cross section #-------------------------------------------------------------------------- ccs = Instance(CompositeCrossSection) def _ccs_default(self): '''default settings ''' # fabric_layout_key = 'MAG-07-03' # fabric_layout_key = '2D-02-06a' fabric_layout_key = '2D-05-11' # fabric_layout_key = '2D-09-12' # concrete_mixture_key = 'PZ-0708-1' # concrete_mixture_key = 'FIL-10-09' concrete_mixture_key = 'barrelshell' orientation_fn_key = 'all0' # orientation_fn_key = 'all90' # orientation_fn_key = '90_0' n_layers = 6 s_tex_z = 0.020 / (n_layers + 1) ccs = CompositeCrossSection(fabric_layup_list=[ plain_concrete(s_tex_z * 0.5), FabricLayUp(n_layers=n_layers, orientation_fn_key=orientation_fn_key, s_tex_z=s_tex_z, fabric_layout_key=fabric_layout_key), plain_concrete(s_tex_z * 0.5) ], concrete_mixture_key=concrete_mixture_key) return ccs #-------------------------------------------------------------------------- # Get properties of the composite #-------------------------------------------------------------------------- # E-modulus of the composite at the time of testing E_c = Property(Float, unit='MPa', depends_on='input_change', table_field=True) def _get_E_c(self): return self.ccs.get_E_c_time(self.age) # E-modulus of the composite after 28 days E_c28 = DelegatesTo('ccs', listenable=False) # reinforcement ration of the composite rho_c = DelegatesTo('ccs', listenable=False) #-------------------------------------------------------------------------------- # define processing #-------------------------------------------------------------------------------- # flag distinguishes weather data from a displacement gauge is available # stored in a separate ASC-file with a corresponding file name # flag_ASC_file = Bool(False) def _read_data_array(self): ''' Read the experiment data. ''' if exists(self.data_file): print 'READ FILE' file_split = self.data_file.split('.') # first check if a '.csv' file exists. If yes use the # data stored in the '.csv'-file and ignore # the data in the '.raw' file! # file_name = file_split[0] + '.csv' if not os.path.exists(file_name): file_name = file_split[0] + '.raw' if not os.path.exists(file_name): raise IOError, 'file %s does not exist' % file_name print 'file_name', file_name _data_array = loadtxt_bending(file_name) self.data_array = _data_array # check if a '.ASC'-file exists. If yes append this information # to the data array. # file_name = file_split[0] + '.ASC' if not os.path.exists(file_name): print 'NOTE: no data from displacement gauge is available (no .ASC file)' self.flag_ASC_file = False else: print 'NOTE: additional data from displacement gauge for center deflection is available (.ASC-file loaded)!' self.flag_ASC_file = True # add data array read in from .ASC-file; the values are assigned by '_set_array_attribs' based on the # read in values in 'names_and_units' read in from the corresponding .DAT-file # self.data_array_ASC = loadtxt(file_name, delimiter=';') else: print 'WARNING: data_file with path %s does not exist == False' % ( self.data_file) names_and_units = Property(depends_on='data_file') @cached_property def _get_names_and_units(self): '''names and units corresponding to the returned '_data_array' by 'loadtxt_bending' ''' names = ['w_raw', 'eps_c_raw', 'F_raw'] units = ['mm', '1*E-3', 'N'] print 'names, units from .raw-file', names, units return names, units names_and_units_ASC = Property(depends_on='data_file') @cached_property def _get_names_and_units_ASC(self): ''' Extract the names and units of the measured data. The order of the names in the .DAT-file corresponds to the order of the .ASC-file. ''' file_split = self.data_file.split('.') file_name = file_split[0] + '.DAT' data_file = open(file_name, 'r') lines = data_file.read().split() names = [] units = [] for i in range(len(lines)): if lines[i] == '#BEGINCHANNELHEADER': name = lines[i + 1].split(',')[1] unit = lines[i + 3].split(',')[1] names.append(name) units.append(unit) print 'names, units extracted from .DAT-file', names, units return names, units factor_list_ASC = Property(depends_on='data_file') def _get_factor_list_ASC(self): return self.names_and_units_ASC[0] def _set_array_attribs(self): '''Set the measured data as named attributes defining slices into the processed data array. ''' for i, factor in enumerate(self.factor_list): self.add_trait( factor, Array(value=self.processed_data_array[:, i], transient=True)) if self.flag_ASC_file: for i, factor in enumerate(self.factor_list_ASC): self.add_trait( factor, Array(value=self.data_array_ASC[:, i], transient=True)) elastomer_law = Property(depends_on='input_change') @cached_property def _get_elastomer_law(self): elastomer_path = os.path.join(simdb.exdata_dir, 'bending_tests', 'three_point', '2011-06-10_BT-3PT-12c-6cm-0-TU_ZiE', 'elastomer_f-w.raw') _data_array_elastomer = loadtxt_bending(elastomer_path) # force [kN]: # NOTE: after conversion 'F_elastomer' is a positive value # F_elastomer = -0.001 * _data_array_elastomer[:, 2].flatten() # displacement [mm]: # NOTE: after conversion 'w_elastomer' is a positive value # w_elastomer = -1.0 * _data_array_elastomer[:, 0].flatten() mfn_displacement_elastomer = MFnLineArray(xdata=F_elastomer, ydata=w_elastomer) return frompyfunc(mfn_displacement_elastomer.get_value, 1, 1) w_wo_elast = Property(depends_on='input_change') @cached_property def _get_w_wo_elast(self): # use the machine displacement for the center displacement: # subtract the deformation of the elastomer cushion between the cylinder # and change sign in positive values for vertical displacement [mm] # return self.w_raw - self.elastomer_law(self.F_raw) M_ASC = Property(Array('float_'), depends_on='input_change') @cached_property def _get_M_ASC(self): return self.F_ASC * self.length / 4.0 M_raw = Property(Array('float_'), depends_on='input_change') @cached_property def _get_M_raw(self): return self.F_raw * self.length / 4.0 # # get only the ascending branch of the response curve # # # max_force_idx = Property(Int) # def _get_max_force_idx(self): # '''get the index of the maximum force''' # return argmax(-self.Kraft) # # f_asc = Property(Array) # def _get_f_asc(self): # '''get only the ascending branch of the response curve''' # return -self.Kraft[:self.max_force_idx + 1] K_bending_elast = Property(Array('float_'), depends_on='input_change') @cached_property def _get_K_bending_elast(self): '''calculate the analytical bending stiffness of the beam (3 point bending) ''' t = self.thickness w = self.width L = self.length # coposite E-modulus # E_c = self.E_c # moment of inertia # I_yy = t**3 * w / 12. delta_11 = (L**3) / 48 / E_c / I_yy # [MN/m]=[kN/mm] bending stiffness with respect to a force applied at center of the beam # K_bending_elast = 1 / delta_11 # print 'K_bending_elast', K_bending_elast return K_bending_elast F_cr = Property(Array('float_'), depends_on='input_change') @cached_property def _get_F_cr(self): '''calculate the analytical cracking load of the beam ''' t = self.thickness w = self.width L = self.length # approx. flectural tensile strength # f_cfl = 6. # MPa # resistant moment # W_yy = t**2 * w / 6. # analytical cracking load of the beam # corresponds to l = 0.46m and f_cfl = approx. 8.4 MPa# # F_cr = W_yy * f_cfl * 1000. / L # [kN] return F_cr def process_source_data(self): '''read in the measured data from file and assign attributes after array processing. ''' super(ExpBT3PT, self).process_source_data() #--------------------------------------------- # process data from .raw file (machine data) #--------------------------------------------- # convert machine force [N] to [kN] and return only positive values # self.F_raw *= -0.001 # convert machine displacement [mm] to positive values # and remove offset # self.w_raw *= -1.0 self.w_raw -= self.w_raw[0] # convert [permille] to [-] and return only positive values # self.eps_c_raw *= -0.001 # access the derived arrays to initiate their processing # self.w_wo_elast self.M_raw #--------------------------------------------- # process data from .ASC file (displacement gauge) #--------------------------------------------- # only if separate ASC.-file with force-displacement data from displacement gauge is available # if self.flag_ASC_file == True: self.F_ASC = -1.0 * self.Kraft # remove offset and change sign to return positive displacement values # if hasattr(self, "WA50"): self.WA50 *= -1 self.WA50 -= self.WA50[0] WA50_avg = np.average(self.WA50) if hasattr(self, "W10_u"): self.W10_u *= -1 self.W10_u -= self.W10_u[0] W10_u_avg = np.average(self.W10_u) # check which displacement gauge has been used depending on weather two names are listed in .DAT file or only one # and assign values to 'w_ASC' # if hasattr(self, "W10_u") and hasattr(self, "WA50"): if W10_u_avg > WA50_avg: self.w_ASC = self.W10_u print 'self.W10_u assigned to self.w_ASC' else: self.w_ASC = self.WA50 print 'self.WA50 assigned to self.w_ASC' elif hasattr(self, "W10_u"): self.w_ASC = self.W10_u print 'self.W10_u assigned to self.w_ASC' elif hasattr(self, "WA50"): self.w_ASC = self.WA50 print 'self.WA50 assigned to self.w_ASC' # convert strain from [permille] to [-], # switch to positive values for compressive strains # and remove offset # self.eps_c_ASC = -0.001 * self.DMS_l self.eps_c_ASC -= self.eps_c_ASC[0] # access the derived arrays to initiate their processing # self.M_ASC #-------------------------------------------------------------------------------- # plot templates #-------------------------------------------------------------------------------- plot_templates = { 'force / machine displacement (incl. w_elast)': '_plot_force_machine_displacement', 'force / machine displacement (without w_elast)': '_plot_force_machine_displacement_wo_elast', 'force / machine displacement (without w_elast, interpolated)': '_plot_force_machine_displacement_wo_elast_interpolated', 'force / machine displacement (analytical offset)': '_plot_force_machine_displacement_wo_elast_analytical_offset', 'force / gauge displacement': '_plot_force_gauge_displacement', 'force / gauge displacement (analytical offset)': '_plot_force_gauge_displacement_with_analytical_offset', 'force / gauge displacement (interpolated)': '_plot_force_gauge_displacement_interpolated', # 'smoothed force / gauge displacement' : '_plot_smoothed_force_gauge_displacement', # 'smoothed force / machine displacement' : '_plot_smoothed_force_machine_displacement_wo_elast', # 'moment / eps_c (ASC)': '_plot_moment_eps_c_ASC', 'moment / eps_c (raw)': '_plot_moment_eps_c_raw', # # 'smoothed moment / eps_c (ASC)' : '_plot_smoothed_moment_eps_c_ASC', # 'smoothed moment / eps_c (raw)' : '_plot_smoothed_moment_eps_c_raw', # # 'analytical bending stiffness' : '_plot_analytical_bending_stiffness' } default_plot_template = 'force / deflection (displacement gauge)' def _plot_analytical_bending_stiffness(self, axes, color='red', linewidth=1., linestyle='--'): '''plot the analytical bending stiffness of the beam (3 point bending) ''' t = self.thickness w = self.width L = self.length # composite E-modulus # E_c = self.E_c # moment of inertia # I_yy = t**3 * w / 12. delta_11 = L**3 / 48 / E_c / I_yy K_linear = 1 / delta_11 # [MN/m] bending stiffness with respect to a force applied at center of the beam w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., K_linear]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_force_machine_displacement_wo_elast(self, axes, color='blue', linewidth=1., linestyle='-'): # get the index of the maximum stress # max_force_idx = argmax(self.F_raw) # get only the ascending branch of the response curve # f_asc = self.F_raw[:max_force_idx + 1] w_asc = self.w_wo_elast[:max_force_idx + 1] axes.plot(w_asc, f_asc, color=color, linewidth=linewidth, linestyle=linestyle) # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') # xkey = 'deflection [mm]' # ykey = 'force [kN]' # axes.set_xlabel('%s' % (xkey,)) # axes.set_ylabel('%s' % (ykey,)) def _plot_force_machine_displacement_wo_elast_interpolated( self, axes, color='green', linewidth=1., linestyle='-'): # get the index of the maximum stress # max_force_idx = argmax(self.F_raw) # get only the ascending branch of the response curve # f_asc = self.F_raw[:max_force_idx + 1] w_asc = np.copy(self.w_wo_elast[:max_force_idx + 1]) # interpolate the starting point of the center deflection curve based on the slope of the curve # (remove offset in measured displacement where there is still no force measured) # idx_10 = np.where(f_asc > f_asc[-1] * 0.10)[0][0] idx_8 = np.where(f_asc > f_asc[-1] * 0.08)[0][0] f8 = f_asc[idx_8] f10 = f_asc[idx_10] w8 = w_asc[idx_8] w10 = w_asc[idx_10] m = (f10 - f8) / (w10 - w8) delta_w = f8 / m w0 = w8 - delta_w * 0.9 # print 'w0', w0 f_asc_interpolated = np.hstack([0., f_asc[idx_8:]]) w_asc_interpolated = np.hstack([w0, w_asc[idx_8:]]) # print 'type( w_asc_interpolated )', type(w_asc_interpolated) w_asc_interpolated -= float(w0) axes.plot(w_asc_interpolated, f_asc_interpolated, color=color, linewidth=linewidth, linestyle=linestyle) # fw_arr = np.hstack([f_asc_interpolated[:, None], w_asc_interpolated[:, None]]) # print 'fw_arr.shape', fw_arr.shape # np.savetxt('BT-3PT-12c-6cm-TU_f-w_interpolated.csv', fw_arr, delimiter=';') # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_force_machine_displacement_wo_elast_analytical_offset( self, axes, color='green', linewidth=1., linestyle='-'): # get the index of the maximum stress # max_force_idx = argmax(self.F_raw) # get only the ascending branch of the response curve # f_asc = self.F_raw[:max_force_idx + 1] w_asc = np.copy(self.w_wo_elast[:max_force_idx + 1]) M_asc = f_asc * self.length / 4. eps_c_asc = self.eps_c_raw[:max_force_idx + 1] t = self.thickness w = self.width # coposite E-modulus # E_c = self.E_c # resistant moment # W_yy = t**2 * w / 6. K_I_analytic = W_yy * E_c # [MN/m] bending stiffness with respect to center moment K_I_analytic *= 1000. # [kN/m] bending stiffness with respect to center moment # interpolate the starting point of the center deflection curve based on the slope of the curve # (remove offset in measured displacement where there is still no force measured) # idx_lin = np.where(M_asc <= K_I_analytic * eps_c_asc)[0][0] idx_lin = int(idx_lin * 0.7) # idx_lin = 50 # idx_lin = np.where(M_asc - M_asc[0] / eps_c_asc <= 0.90 * K_I_analytic)[0][0] print 'idx_lin', idx_lin print 'F_asc[idx_lin]', f_asc[idx_lin] print 'M_asc[idx_lin]', M_asc[idx_lin] print 'w_asc[idx_lin]', w_asc[idx_lin] w_lin_epsc = w_asc[idx_lin] w_lin_analytic = f_asc[idx_lin] / self.K_bending_elast f_asc_offset_analytic = f_asc[idx_lin:] w_asc_offset_analytic = w_asc[idx_lin:] w_asc_offset_analytic -= np.array([w_lin_epsc]) w_asc_offset_analytic += np.array([w_lin_analytic]) axes.plot(w_asc_offset_analytic, f_asc_offset_analytic, color=color, linewidth=linewidth, linestyle=linestyle) # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_force_machine_displacement(self, axes, color='black', linewidth=1., linestyle='-'): xdata = self.w_raw ydata = self.F_raw axes.plot(xdata, ydata, color=color, linewidth=linewidth, linestyle=linestyle) # xkey = 'deflection [mm]' # ykey = 'force [kN]' # axes.set_xlabel('%s' % (xkey,)) # axes.set_ylabel('%s' % (ykey,)) # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_force_gauge_displacement_with_analytical_offset( self, axes, color='black', linewidth=1., linestyle='-'): # skip the first values (= first seconds of testing) # and start with the analytical bending stiffness instead to avoid artificial offset of F-w-diagram # # w_max = np.max(self.w_ASC) # cut_idx = np.where(self.w_ASC > 0.001 * w_max)[0] cut_idx = np.where(self.w_ASC > 0.01)[0] print 'F_cr ', self.F_cr # cut_idx = np.where(self.F_ASC > 0.6 * self.F_cr)[0] print 'cut_idx', cut_idx[0] print 'w_ASC[cut_idx[0]]', self.w_ASC[cut_idx[0]] xdata = np.copy(self.w_ASC[cut_idx]) ydata = np.copy(self.F_ASC[cut_idx]) # specify offset if force does not start at the origin with value 0. F_0 = ydata[0] print 'F_0 ', F_0 offset_w = F_0 / self.K_bending_elast xdata -= xdata[0] xdata += offset_w f_asc_interpolated = np.hstack([0, ydata]) w_asc_interpolated = np.hstack([0, xdata]) # fw_arr = np.hstack([f_asc_interpolated[:, None], w_asc_interpolated[:, None]]) # print 'fw_arr.shape', fw_arr.shape # np.savetxt('BT-3PT-6c-2cm-TU_f-w_interpolated.csv', fw_arr, delimiter=';') xdata = self.w_raw ydata = self.F_raw axes.plot(xdata, ydata, color='blue', linewidth=linewidth, linestyle=linestyle) axes.plot(w_asc_interpolated, f_asc_interpolated, color=color, linewidth=linewidth, linestyle=linestyle) # xkey = 'deflection [mm]' # ykey = 'force [kN]' # axes.set_xlabel('%s' % (xkey,)) # axes.set_ylabel('%s' % (ykey,)) # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_force_gauge_displacement(self, axes, offset_w=0., color='black', linewidth=1., linestyle='-'): xdata = self.w_ASC ydata = self.F_ASC # specify offset if force does not start at the origin xdata += offset_w axes.plot(xdata, ydata, color=color, linewidth=linewidth, linestyle=linestyle) # xkey = 'deflection [mm]' # ykey = 'force [kN]' # axes.set_xlabel('%s' % (xkey,)) # axes.set_ylabel('%s' % (ykey,)) # fw_arr = np.hstack([xdata[:, None], ydata[:, None]]) # print 'fw_arr.shape', fw_arr.shape # np.savetxt('BT-3PT-6c-2cm-TU-80cm-V3_f-w_asc.csv', fw_arr, delimiter=';') # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_smoothed_force_gauge_displacement(self, axes): # get the index of the maximum stress # max_force_idx = argmax(self.F_ASC) # get only the ascending branch of the response curve # F_asc = self.F_ASC[:max_force_idx + 1] w_asc = self.w_ASC[:max_force_idx + 1] n_points = int(self.n_fit_window_fraction * len(w_asc)) F_smooth = smooth(F_asc, n_points, 'flat') w_smooth = smooth(w_asc, n_points, 'flat') axes.plot(w_smooth, F_smooth, color='blue', linewidth=2) def _plot_force_gauge_displacement_interpolated(self, axes, color='green', linewidth=1., linestyle='-'): '''get only the ascending branch of the meassured load-displacement curve)''' # get the index of the maximum stress # max_force_idx = argmax(self.F_ASC) # get only the ascending branch of the response curve # f_asc = self.F_ASC[:max_force_idx + 1] w_asc = self.w_ASC[:max_force_idx + 1] # interpolate the starting point of the center deflection curve based on the slope of the curve # (remove offset in measured displacement where there is still no force measured) # idx_10 = np.where(f_asc > f_asc[-1] * 0.10)[0][0] idx_8 = np.where(f_asc > f_asc[-1] * 0.08)[0][0] f8 = f_asc[idx_8] f10 = f_asc[idx_10] w8 = w_asc[idx_8] w10 = w_asc[idx_10] m = (f10 - f8) / (w10 - w8) delta_w = f8 / m w0 = w8 - delta_w * 0.9 print 'w0', w0 f_asc_interpolated = np.hstack([0., f_asc[idx_8:]]) w_asc_interpolated = np.hstack([w0, w_asc[idx_8:]]) print 'type( w_asc_interpolated )', type(w_asc_interpolated) w_asc_interpolated -= float(w0) # w_offset = f_asc[idx_10] / self.K_bending_elast # f_asc_interpolated = np.hstack([0., f_asc[ idx_10: ]]) # w_asc_interpolated = np.hstack([0, w_asc[ idx_10: ] - w_asc[idx_10] + w_offset]) axes.plot(w_asc_interpolated, f_asc_interpolated, color=color, linewidth=linewidth, linestyle=linestyle) # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') def _plot_smoothed_force_machine_displacement_wo_elast(self, axes): # get the index of the maximum stress # max_force_idx = argmax(self.F_raw) # get only the ascending branch of the response curve # F_asc = self.F_raw[:max_force_idx + 1] w_asc = self.w_wo_elast[:max_force_idx + 1] n_points = int(self.n_fit_window_fraction * len(w_asc)) F_smooth = smooth(F_asc, n_points, 'flat') w_smooth = smooth(w_asc, n_points, 'flat') axes.plot(w_smooth, F_smooth, color='blue', linewidth=2) # plot analytical bending stiffness # w_linear = 2 * np.array([0., 1.]) F_linear = 2 * np.array([0., self.K_bending_elast]) axes.plot(w_linear, F_linear, linestyle='--') # secant_stiffness_w10 = ( f_smooth[10] - f_smooth[0] ) / ( w_smooth[10] - w_smooth[0] ) # w0_lin = array( [0.0, w_smooth[10] ], dtype = 'float_' ) # f0_lin = array( [0.0, w_smooth[10] * secant_stiffness_w10 ], dtype = 'float_' ) # axes.plot( w0_lin, f0_lin, color = 'black' ) def _plot_moment_eps_c_ASC(self, axes): xkey = 'compressive strain [1*E-3]' ykey = 'moment [kNm]' xdata = self.eps_c_ASC ydata = self.M_ASC axes.set_xlabel('%s' % (xkey, )) axes.set_ylabel('%s' % (ykey, )) axes.plot(xdata, ydata) # plot stiffness in uncracked state t = self.thickness w = self.width # composite E-modulus # E_c = self.E_c # resistant moment # W_yy = t**2 * w / 6. max_M = np.max(self.M_raw) K_linear = W_yy * E_c # [MN/m] bending stiffness with respect to center moment K_linear *= 1000. # [kN/m] bending stiffness with respect to center moment w_linear = np.array([0., max_M / K_linear]) M_linear = np.array([0., max_M]) axes.plot(w_linear, M_linear, linestyle='--') def _plot_moment_eps_c_raw(self, axes, color='black', linewidth=1.5, linestyle='-'): xkey = 'compressive strain [1*E-3]' ykey = 'moment [kNm]' xdata = self.eps_c_raw ydata = self.M_raw axes.set_xlabel('%s' % (xkey, )) axes.set_ylabel('%s' % (ykey, )) axes.plot(xdata, ydata, color=color, linewidth=linewidth, linestyle=linestyle) # plot stiffness in uncracked state t = self.thickness w = self.width # composite E-modulus # E_c = self.E_c # resistant moment # W_yy = t**2 * w / 6. max_M = np.max(self.M_raw) K_linear = W_yy * E_c # [MN/m] bending stiffness with respect to center moment K_linear *= 1000. # [kN/m] bending stiffness with respect to center moment w_linear = np.array([0., max_M / K_linear]) M_linear = np.array([0., max_M]) axes.plot(w_linear, M_linear, linestyle='--') n_fit_window_fraction = Float(0.1) smoothed_M_eps_c_ASC = Property(depends_on='input_change') @cached_property def _get_smoothed_M_eps_c_ASC(self): # get the index of the maximum stress max_idx = argmax(self.M_ASC) # get only the ascending branch of the response curve m_asc = self.M_ASC[:max_idx + 1] eps_c_asc = self.eps_c_ASC[:max_idx + 1] n_points = int(self.n_fit_window_fraction * len(eps_c_asc)) m_smoothed = smooth(m_asc, n_points, 'flat') eps_c_smoothed = smooth(eps_c_asc, n_points, 'flat') return m_smoothed, eps_c_smoothed smoothed_eps_c_ASC = Property def _get_smoothed_eps_c_ASC(self): return self.smoothed_M_eps_c_ASC[1] smoothed_M_ASC = Property def _get_smoothed_M_ASC(self): return self.smoothed_M_eps_c_ASC[0] def _plot_smoothed_moment_eps_c_ASC(self, axes): axes.plot(self.smoothed_eps_c_ASC, self.smoothed_M_ASC, color='blue', linewidth=2) smoothed_M_eps_c_raw = Property(depends_on='input_change') @cached_property def _get_smoothed_M_eps_c_raw(self): # get the index of the maximum stress max_idx = argmax(self.M_raw) # get only the ascending branch of the response curve m_asc = self.M_raw[:max_idx + 1] eps_c_asc = self.eps_c_raw[:max_idx + 1] n_points = int(self.n_fit_window_fraction * len(eps_c_asc)) m_smoothed = smooth(m_asc, n_points, 'flat') eps_c_smoothed = smooth(eps_c_asc, n_points, 'flat') return m_smoothed, eps_c_smoothed smoothed_eps_c_raw = Property def _get_smoothed_eps_c_raw(self): return self.smoothed_M_eps_c_raw[1] smoothed_M_raw = Property def _get_smoothed_M_raw(self): return self.smoothed_M_eps_c_raw[0] def _plot_smoothed_moment_eps_c_raw(self, axes): axes.plot(self.smoothed_eps_c_raw, self.smoothed_M_raw, color='blue', linewidth=2) #-------------------------------------------------------------------------------- # view #-------------------------------------------------------------------------------- traits_view = View(VGroup( Group(Item('length', format_str="%.3f"), Item('width', format_str="%.3f"), Item('thickness', format_str="%.3f"), label='geometry'), Group(Item('loading_rate'), Item('age'), label='loading rate and age'), Group(Item('E_c', show_label=True, style='readonly', format_str="%.0f"), Item('ccs@', show_label=False), label='composite cross section')), scrollable=True, resizable=True, height=0.8, width=0.6)
class HPShell(HasTraits): '''Geometry definition. ''' #=========================================================================== # geometric variables and values #=========================================================================== # part of mushroof # # @todo: "four" is not supported by "n_elems_xy_dict"in mushroff_model mushroof_part = Enum('detail', 'quarter', 'one', 'four') # origin # @todo: define as "_default" # X0 = Array(float, value=[0., 0., 0.]) # element properties of grid # n_elems_xy = Int(6) n_elems_z = Int(3) n_elems_xy_quarter = Property(Int) @cached_property def _get_n_elems_xy_quarter(self): return self.n_elems_xy / self.scale_size # standard array for column shift # shift array shifts the elements towards the defined coordinates # [x_shift,y_shift,element_between] # array needs to have the right shape (:,3)!! # # @todo: define as "_default" # shift_array = Array(float, shape=(None, 3), value=[[0.45 / 2**0.5, 0.45 / 2**0.5, 1]]) # dimensions of the shell for one quarter of mush_roof # @todo: add "_quarter" # length_x = Float(4.0) # [m] length_y = Float(4.0) # [m] length_z = Float(1.062) # [m] t_shell = Float(0.06) # [m] width_column = Float(0.45) # [m] length_x_detail = Float(1.5) # [m] length_y_detail = Float(1.5) # [m] scale_size_detail_x = Property(Float) def _get_scale_size_detail_x(self): return self.length_x_detail / self.length_x * 2. scale_size_detail_y = Property(Float) def _get_scale_size_detail_y(self): return self.length_y_detail / self.length_y * 2. # scale factor for different mushroof parts # Defines the proportion between the lenght of the model # with respect to the length of a quarter shell as the # basic substructure of which the model consists of. # @todo: add "depend_on" or remove "cached_property" # @todo: move to class definition of "mushroof_model" not in "HPShell" # (also see there "n_elems_dict" with implicit "scale_factor") # scale_size = Property(Float) # @cached_property def _get_scale_size(self): # scale_size_detail = self.lenght_x_detail / self.length_x scale_dict = { 'detail': self.scale_size_detail_x, 'quarter': 1.0, 'one': 2.0, 'four': 4.0 } return scale_dict[self.mushroof_part] # factor to scale delta_h (inclination of the shell) # The thickness remains unchanged by this scaling, e.g. self.thickness = 0.06 [m] # delta_h_scalefactor = Float(1.00) # [-] # shift of column elements # shift_elems = Bool(True) # const_edge element operator # (for non-linear analysis only, where an element layer of constant # thickness is needed to simulate the reinforced behaviour of the # concrete. # const_edge_elem = Bool(False) t_edge = Float(0.03) # [m] n_elems_edge = Int(1) #number of dofs used for edge refinement #=========================================================================== # reading options #=========================================================================== # "lowerface_cut_off" - option replaces constant height for the coordinates # which connect to the column (this cuts of the shell geometry horizontally # at the bottom of the lower face of the shell geometry. # Option should be used for the robj-file with 4x4m geometry # cut_off_lowerface = Bool(True) # corresponds to the delta in the geometry .obj-file with name '4x4m' as a cut off # delta_h = Float(1.00) # [m] # choose geometric file (obj-data file) # geo_input_name = Enum('4x4m', '02') # filter for '4x4m' file needs to be done to have regular grid # in order to rbf-function leading to stable solution without oscilation # geo_filter = Dict({'4x4m': delete_second_rows}) def _read_arr(self, side='lowerface_'): file_name = side + self.geo_input_name + '.robj' file_path = join('geometry_files', file_name) v_arr = read_rsurface(file_path) filter = self.geo_filter.get(self.geo_input_name, None) if filter != None: v_arr = filter(v_arr) return v_arr # array of the vertex positions in global # x,y,z-coordinates defining the lower surface of the shell vl_arr = Property(Array(float)) @cached_property def _get_vl_arr(self): vl_arr = self._read_arr('lowerface_') if self.cut_off_lowerface == True: print '--- lower face z-coords cut off ---' # z-values of the coords from the lower face are cut off. # From the highest z-coordinate of the lower face the vertical # distance is 1 m (=delta h). At this limit the lower face is # cut off. Global z coordinate is assumed to point up. # vl_z_max = max(vl_arr[:, 2]) vl_z_min = vl_z_max - self.delta_h vl_arr_z = where(vl_arr[:, 2] < vl_z_min, vl_z_min, vl_arr[:, 2]) vl_arr = c_[vl_arr[:, 0:2], vl_arr_z] return vl_arr # array of the vertex positions in global # x,y,z-coordinates defining the upper surface of the shell vu_arr = Property(Array(float)) @cached_property def _get_vu_arr(self): return self._read_arr('upperface_') #------------------------------------------------------------------------------ # hp_shell geometric transformation #------------------------------------------------------------------------------ def __call__(self, points): '''Return the global coordinates of the supplied local points. ''' # number of local grid points for each coordinate direction # values must range between 0 and 1 # xi, yi, zi = points[:, 0], points[:, 1], points[:, 2] print "xi", xi print "xi.shape", xi.shape # size of total structure # # @todo: move to class definition of "mushroof_model" and send to "__call__" scale_size = self.scale_size print "scale_size", scale_size # @todo: add "_quarter" (see above) length_x_tot = self.length_x * scale_size length_y_tot = self.length_y * scale_size n_elems_xy_quarter = self.n_elems_xy_quarter # distance from origin for each mushroof_part # def d_origin_fn(self, coords): # if self.mushroof_part == 'quarter': # return coords # if self.mushroof_part == 'one': # return abs( 2.0 * coords - 1.0 ) if self.mushroof_part == 'detail': print 'in d_origin_fn' return abs(1.0 * coords - 0.5) * scale_size # # @todo: corresponding "scale_factor" needs to be added # # in order for this to work # if self.mushroof_part == 'four': # return where( coords < 0.5, abs( 4 * coords - 1 ), abs( -4 * coords + 3 ) ) # values are used to calculate the z-coordinate using RBF-function of the quarter # (= values of the distance to the origin as absolute value) # xi_rbf = d_origin_fn(self, xi) print 'xi_rbf', xi_rbf yi_rbf = d_origin_fn(self, yi) # normalized coordinates of the vertices for lower- and upperface # NOTE: the underline character indicates a normalized value # vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr) # use a radial basis function approximation (rbf) (i.e. interpolation of # scattered data) based on the normalized vertex points of the lower face # x_ = vl_arr_[:, 0] # flip the orientation of the local coordinate system in the # corresponding y-direction depending on the data file # geo_input_name = self.geo_input_name if geo_input_name == '4x4m': y_ = vl_arr_[:, 1] else: y_ = 1 - vl_arr_[:, 1] z_ = vl_arr_[:, 2] rbf = Rbf(x_, y_, z_, function='cubic') # get the z-value at the supplied local grid points # of the lower face # zi_lower_ = rbf(xi_rbf, yi_rbf) # use a radial basis function approximation (rbf) (i.e. interpolation of # scattered data) based on the normalized vertex points of the upper face # x_ = vu_arr_[:, 0] # flip the orientation of the local coordinate system in the # corresponding y-direction depending on the data file # geo_input_name = self.geo_input_name if geo_input_name == '4x4m': y_ = vu_arr_[:, 1] else: y_ = 1 - vu_arr_[:, 1] z_ = vu_arr_[:, 2] rbf = Rbf(x_, y_, z_, function='cubic') # get the z-value at the supplied local grid points # of the upper face # # note that zi_upper_ is a normalized coordinate! # zi_upper_ = rbf(xi_rbf, yi_rbf) # thickness is multiplied by the supplied zi coordinate # z_ = (zi_lower_ + (zi_upper_ - zi_lower_) * zi / self.delta_h_scalefactor) * self.delta_h_scalefactor # coordinates of origin # X, Y, Z = self.X0 print '--- geometric transformation done ---' # multiply the local grid points with the real dimensions in order to obtain the # global coordinates of the mushroof_part: # return c_[X + xi * length_x_tot, Y + yi * length_y_tot, Z + z_ * self.length_z]
class CBRandXi(RF): ''' Crack bridged by a fiber with constant frictional interface to rigid; free fiber end; ''' implements(IRF) title = Str('crack bridge with rigid matrix') tau = Float(2.5, auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm']) r = Float(0.013, auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm'], desc='fiber radius') E_f = Float(72e3, auto_set=False, enter_set=True, input=True, distr=['uniform']) m = Float(5., auto_set=False, enter_set=True, input=True, distr=['uniform']) sV0 = Float(3.e-3, auto_set=False, enter_set=True, input=True, distr=['uniform']) V_f = Float(0.0175, auto_set=False, enter_set=True, input=True, distr=['uniform']) w = Float(auto_set=False, enter_set=True, input=True, distr=['uniform'], desc='crack width', ctrl_range=(0.0, 1.0, 10)) include_pullout = Bool(True) x_label = Str('crack opening [mm]') y_label = Str('composite stress [MPa]') C_code = Str('') def __call__(self, w, tau, E_f, V_f, r, m, sV0): #strain and debonded length of intact fibers T = 2. * tau / r ef0_inf = np.sqrt(T * w / E_f) #scale parameter with respect to a reference volume s0 = ((T * (m + 1) * sV0**m) / (2. * E_f * pi * r**2))**(1. / (m + 1)) k = np.sqrt(T / E_f) ef0 = k * np.sqrt(w) G = 1 - np.exp(-(ef0 / s0)**(m + 1)) mu_int = ef0 * E_f * V_f * (1 - G) I = s0 * gamma(1 + 1. / (m + 1)) * gammainc(1 + 1. / (m + 1), (ef0 / s0)**(m + 1)) mu_broken = E_f * V_f * I / (m + 1) res = mu_int + mu_broken return res * r**2
class DoublePulloutSym(RF): implements(IRF) title = Str('symetrical yarn pullout') xi = Float(0.0179, auto_set=False, enter_set=True, input=True, distr=['weibull_min', 'uniform']) tau_fr = Float(2.5, auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm']) # free length l = Float(0.0, auto_set=False, enter_set=True, input=True, distr=['uniform']) d = Float(26e-3, auto_set=False, input=True, enter_set=True, distr=['uniform', 'weibull_min']) E_mod = Float(72.0e3, auto_set=False, enter_set=True, input=True, distr=['uniform']) # slack theta = Float(0.01, auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm']) phi = Float(1., auto_set=False, enter_set=True, input=True, distr=['uniform', 'norm']) # embedded length L = Float(1., auto_set=False, enter_set=True, input=True, distr=['uniform']) free_fiber_end = Bool(True, input=True) w = Float(enter_set=True, input=True, ctrl_range=(0, 1, 10)) weave_code = ''' ''' def __call__(self, w, tau_fr, l, d, E_mod, theta, xi, phi, L): '''Return the force for a prescribed crack opening displacement w. ''' A = pi * d**2 / 4. l = l * (1 + theta) w = w - theta * l Tau = tau_fr * phi * d * pi P_ = 0.5 * (-l * Tau + sqrt(l**2 * Tau**2 + 4 * w * H(w) * E_mod * A * Tau)) # one sided pullout P_ = ( -l * Tau + sqrt( l ** 2 * Tau ** 2 + 2 * w * H( w ) * E_mod * A * Tau ) ) if self.free_fiber_end: # ------ FREE LENGTH ------- # frictional force along the bond length P_fr = Tau * (L - l) # if pullout_criterion positive - embedded # otherwise pulled out # pull_out_criterion = P_fr - P_ P_ = P_ * H(pull_out_criterion) + P_fr * H(-pull_out_criterion) else: # -------------------------- # ------ clamped fiber end --------- v = L * (l * Tau + Tau * L) / E_mod / A P_ = P_ * H(Tau * L - P_) + (Tau * L + (w - v) / (l + 2 * L) * A * E_mod) * H(P_ - Tau * L) # ---------------------------------- P = P_ * H(A * E_mod * xi - P_) return P figure = Instance(Figure) def _figure_default(self): figure = Figure(facecolor='white') return figure changed = Event @on_trait_change('+input') def _set_changed(self): self.changed = True data_changed = Event @on_trait_change('+input') def refresh(self): figure = self.figure figure.clear() axes = figure.gca() P_fn = lambda w: self.__call__(w, self.tau_fr, self.l, self.d, self. E_mod, self.theta, self.xi, self.phi, self.L) pyF = frompyfunc(P_fn, 1, 1) w_arr = linspace(0.0, 1.0, 100) P_arr = array(pyF(w_arr), dtype='float_') axes.plot(w_arr, P_arr, lw=1.0, color='blue') self.data_changed = True group_attribs = VGroup( Item('tau_fr'), Item('l'), Item('d'), Item('E_mod'), Item('theta'), Item('xi'), Item('phi'), Item('L'), Item('free_fiber_end'), ), traits_view = View( group_attribs, scrollable=True, resizable=True, id='mk.figure.attribs', dock='tab', ) traits_view_diag = View(HSplit( group_attribs, VGroup(Item('figure', editor=MPLFigureEditor(), show_label=False, resizable=True), id='mk.figure.view'), ), id='mk.view', buttons=['OK', 'Cancel'], resizable=True, width=600, height=400)
class HPShell(HasTraits): '''Geometry definition of a hyperbolic parabolid shell. ''' #----------------------------------------------------------------- # geometric parameters of the shell #----------------------------------------------------------------- # dimensions of the shell for one quarter of mush_roof # length_xy_quarter = Float(3.5, input=True) # [m] length_z = Float(0.927, input=True) # [m] # corresponds to the delta in the geometry .obj-file with name '4x4m' as a cut off # delta_h = Float(0.865, input=True) # [m] # scale factors for geometric dimensions # NOTE: parameters can be scaled separately, i.e. scaling of 'delta_h' (inclination of the shell) # does not effect the scaling of the thickness # scalefactor_delta_h = Float(1.00, input=True) # [-] scalefactor_length_xy = Float(1.00, input=True) # [-] # thickness of the shell # NOTE: only used by the option 'const_reinf_layer' # t_shell = Float(0.06, input=True) # [m] width_top_col = Float(0.45, input=True) # [m] # factor shifting the z coordinates at the middle nodes of the edges upwards # in order to include the effect of imperfection in the formwork z_imperfection_factor = Float(0.0) #----------------------------------------------------------------- # specify the relation of the total structure (in the 'mushroof'-model) # with respect to a quarter of one shell defined in 'HPShell' #----------------------------------------------------------------- # @todo: "four" is not supported by "n_elems_xy_dict"in mushroff_model mushroof_part = Enum('one', 'quarter', 'four', input=True) # 'scale_size' parameter is used as scale factor for different mushroof parts # Defines the proportion between the length of the total model # with respect to the length of a quarter shell as the # basic substructure of which the model consists of. # @todo: add "depends_on" or remove "cached_property" # @todo: move to class definition of "mushroof_model" not in "HPShell" # (also see there "n_elems_dict" with implicit "scale_factor") # scale_size = Property(Float, depends_on='mushroof_part') @cached_property def _get_scale_size(self): scale_dict = {'quarter': 1.0, 'one': 2.0, 'four': 4.0} return scale_dict[self.mushroof_part] # origin of the shell # X0 = Array(float, input=True) def _X0_default(self): return array([0., 0., 0.]) #----------------------------------------------------------------- # discretisation #----------------------------------------------------------------- # number of element used for the discretisation ( dimensions of the entire model) # n_elems_xy_quarter = Int(5, input=True) n_elems_z = Int(3, input=True) n_elems_xy = Property(Int, depends_on='n_elems_xy_quarter, +input') @cached_property def _get_n_elems_xy(self): return self.n_elems_xy_quarter * self.scale_size #----------------------------------------------------------------- # option: 'shift_elems' #----------------------------------------------------------------- # shift of column elements # if set to "True" (default) the information defined in 'shift_array' is used. # shift_elems = Bool(True, input=True) # 'shift_array' is used to place element corners at a defined global # position in order to connect the shell with the corner nodes of the column. # [x_shift, y_shift, number of element s between the coordinate position] # NOTE: 'shift_array' needs to have shape (:,3)! # shift_array = Array(float, input=True) def _shift_array_default(self): return array( [[self.width_top_col / 2**0.5, self.width_top_col / 2**0.5, 1]]) #----------------------------------------------------------------- # option: 'const_reinf_layer' #----------------------------------------------------------------- # 'const_reinf_layer' - parameter is used only for the non-linear analysis, # where an element layer with a constant thickness is needed to simulate the # reinforced concrete at the top and bottom of the TRC-shell. # const_reinf_layer_elem = Bool(False, input=True) t_reinf_layer = Float(0.03, input=True) # [m] n_elems_reinf_layer = Int( 1, input=True) #number of dofs used for edge refinement #----------------------------------------------------------------- # read vertice points of the shell and derive a normalized # RBF-function for the shell approximation #----------------------------------------------------------------- # "lowerface_cut_off" - option replaces constant height for the coordinates # which connect to the column (this cuts of the shell geometry horizontally # at the bottom of the lower face of the shell geometry. # cut_off_lowerface = Bool(True, input=True) # choose geometric file (obj-data file) # geo_input_name = Enum('350x350cm', '4x4m', '02', input=True) # filter for '4x4m' file needs to be done to have regular grid # in order to rbf-function leading to stable solution without oscilation # geo_filter = Dict({'4x4m': delete_second_rows}) # ,'350x350cm' : delete_second_rows} ) def _read_arr(self, side='lowerface_'): '''read the robj-file saved in the subdirectory 'geometry_files' ''' file_name = side + self.geo_input_name + '.robj' file_path = join('geometry_files', file_name) # get an array with the vertice coordinates # v_arr = read_rsurface(file_path) # print 'v_arr before filtering \n', v_arr # print 'v_arr.shape before filtering \n', v_arr.shape filter = self.geo_filter.get(self.geo_input_name, None) if filter != None: v_arr = filter(v_arr) # print 'v_arr after filtering \n', v_arr # print 'v_arr.shape after filtering \n', v_arr.shape return v_arr # array of the vertex positions in global # x,y,z-coordinates defining the lower surface of the shell # vl_arr = Property(Array(float), depends_on='geo_input_name') @cached_property def _get_vl_arr(self): vl_arr = self._read_arr('lowerface_') if self.cut_off_lowerface == True: print '--- lower face z-coords cut off ---' # z-values of the coords from the lower face are cut off. # From the highest z-coordinate of the lower face the vertical # distance is 'delta h (for 4x4m: delta_h = 1.0m). # At this limit the lower face is cut off. # NOTE: the global z coordinate is assumed to point up # and must be given in the same unite as 'delta_h', i.e. in [m]. # vl_z_max = max(vl_arr[:, 2]) if self.geo_input_name == '4x4m': # NOTE: the global z-coordinates are given in the geo data file in [m] # no conversion of unites necessary (self.delta_h is given in [m]) delta_h = self.delta_h elif self.geo_input_name == '350x350cm': # NOTE: the global z-coordinates are given in the geo data file in [cm] # convert delta_h from [m] to [cm] # delta_h = self.delta_h * 100. vl_z_min = vl_z_max - delta_h vl_arr_z = where(vl_arr[:, 2] < vl_z_min, vl_z_min, vl_arr[:, 2]) vl_arr = c_[vl_arr[:, 0:2], vl_arr_z] return vl_arr # array of the vertex positions in global # x,y,z-coordinates defining the upper surface of the shell # vu_arr = Property(Array(float), depends_on='geo_input_name') @cached_property def _get_vu_arr(self): return self._read_arr('upperface_') # normalized coordinates of the vertices for the lowerface # NOTE: the underline character indicates a normalized value # @todo: 'normalize_rsurfaces' is called twice for 'vl_arr_' and 'vu_arr_' # vl_arr_ = Property(Array(float), depends_on='geo_input_name') @cached_property def _get_vl_arr_(self): vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr) return vl_arr_ # normalized coordinates of the vertices for the lowerface # NOTE: the underline character indicates a normalized value # vu_arr_ = Property(Array(float), depends_on='geo_input_name') @cached_property def _get_vu_arr_(self): vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr) return vu_arr_ rbf_l_ = Property(Instance(Rbf), depends_on='geo_input_name') @cached_property def _get_rbf_l_(self): # use a radial basis function approximation (rbf) (i.e. interpolation of # scattered data) based on the normalized vertex points of the lower face # xl_ = self.vl_arr_[:, 0] yl_ = self.vl_arr_[:, 1] zl_ = self.vl_arr_[:, 2] # flip the orientation of the local coordinate axis # depending on the geometry file used # if self.geo_input_name == '350x350cm': xl_ = 1 - self.vl_arr_[:, 0] if self.geo_input_name == '4x4m': yl_ = 1 - self.vl_arr_[:, 1] rbf_l_ = Rbf(xl_, yl_, zl_, function='cubic') # rbf_l_ = Rbf( xl_, yl_, zl_, function = 'linear' ) return rbf_l_ rbf_u_ = Property(Instance(Rbf), depends_on='geo_input_name') @cached_property def _get_rbf_u_(self): # use a radial basis function approximation (rbf) (i.e. interpolation of # scattered data) based on the normalized vertex points of the upper face # xu_ = self.vu_arr_[:, 0] yu_ = self.vu_arr_[:, 1] zu_ = self.vu_arr_[:, 2] # flip the orientation of the local coordinate axis # depending on the geometry file used # if self.geo_input_name == '350x350cm': xu_ = 1 - self.vu_arr_[:, 0] if self.geo_input_name == '4x4m': yu_ = 1 - self.vu_arr_[:, 1] rbf_u_ = Rbf(xu_, yu_, zu_, function='cubic') # rbf_u_ = Rbf( xu_, yu_, zu_, function = 'linear' ) return rbf_u_ #------------------------------------------------------------------------------ # hp_shell geometric transformation # NOTE: returns the global coordinates of the shell based on the supplied local # grid points #------------------------------------------------------------------------------ def __call__(self, points): '''Return the global coordinates of the supplied local points. ''' # number of local grid points for each coordinate direction # NOTE: values must range between 0 and 1 # xi_, yi_, zi_ = points[:, 0], points[:, 1], points[:, 2] # insert imperfection (shift the middle node of the shell upwards) imp = self.z_imperfection_factor zi_ += imp * xi_ + imp * yi_ - 2 * imp * xi_ * yi_ # size of total structure # # @todo: move to class definition of "mushroof_model" and send to "__call__" scale_size = self.scale_size # @todo: add "_quarter" (see above) length_xy_tot = self.length_xy_quarter * scale_size n_elems_xy_quarter = self.n_elems_xy_quarter # print 'HPShell n_elems_xy_quarter', n_elems_xy_quarter # distance from origin for each mushroof_part # def d_origin_fn(self, coords): if self.mushroof_part == 'quarter': return coords if self.mushroof_part == 'one': return abs(2.0 * coords - 1.0) # @todo: corresponding "scale_factor" needs to be added # in order for this to work if self.mushroof_part == 'four': return where(coords < 0.5, abs(4 * coords - 1), abs(-4 * coords + 3)) # element at column shift # if self.shift_elems == True: # define the origin for each model part # def origin_fn(self, coords): if self.mushroof_part == 'quarter': return zeros_like(coords) if self.mushroof_part == 'one': return ones_like(xi_) * 0.5 if self.mushroof_part == 'four': return where(coords < 0.5, 0.25, 0.75) def piecewise_linear_fn(x, x_fix_arr_, y_fix_arr_): '''creates a piecewise linear_fn going through the fix_points values need to be normed running between 0..1 and values have to be unique''' x_fix_arr_ = hstack((0, x_fix_arr_, 1)) y_fix_arr_ = hstack((0, y_fix_arr_, 1)) rbf_fn_ = Rbf(x_fix_arr_, y_fix_arr_, function='linear') #rbf has to be linear return rbf_fn_(x) # define origin for quarter # xi_origin_arr_ = origin_fn(self, xi_) yi_origin_arr_ = origin_fn(self, yi_) # print 'xi_origin_arr_', xi_origin_arr_ # delta towards origin # xi_delta_arr_ = (xi_ - xi_origin_arr_) * scale_size yi_delta_arr_ = (yi_ - yi_origin_arr_) * scale_size # print 'xi_delta_arr_', xi_delta_arr # define sign # xi_sign_arr = where(xi_delta_arr_ == 0., 0., xi_delta_arr_ / abs(xi_delta_arr_)) yi_sign_arr = where(yi_delta_arr_ == 0., 0., yi_delta_arr_ / abs(yi_delta_arr_)) # print 'xi_sign_arr', xi_sign_arr # fix points defined in shift array as normelized values # x_fix_ = self.shift_array[:, 0] / self.length_xy_quarter # print 'x_fix_', x_fix_ y_fix_ = self.shift_array[:, 1] / self.length_xy_quarter n_fix_ = add.accumulate(self.shift_array[:, 2]) / n_elems_xy_quarter # print 'add.accumulate( self.shift_array[:, 2] )', add.accumulate( self.shift_array[:, 2] ) # print 'n_fix_', n_fix_ # print 'piecewise_linear_fn', piecewise_linear_fn( abs( xi_delta_arr_ ), # n_fix_, # x_fix_ ) / scale_size # new xi_ # xi_ = xi_origin_arr_ + xi_sign_arr * piecewise_linear_fn( abs(xi_delta_arr_), n_fix_, x_fix_) / scale_size # print 'xi_new', xi_ # new yi # yi_ = yi_origin_arr_ + yi_sign_arr * piecewise_linear_fn( abs(yi_delta_arr_), n_fix_, y_fix_) / scale_size #------------------------------------------------------------------------------------- # values are used to calculate the z-coordinate using RBF-function of the quarter # (= values of the distance to the origin as absolute value) # xi_rbf_ = d_origin_fn(self, xi_) # print 'xi_rbf_', xi_rbf_ yi_rbf_ = d_origin_fn(self, yi_) # get the z-value at the supplied local grid points # of the lower face # zi_lower_ = self.rbf_l_(xi_rbf_, yi_rbf_) # get the z-value at the supplied local grid points # of the upper face # zi_upper_ = self.rbf_u_(xi_rbf_, yi_rbf_) # constant edge element transformation # if self.const_reinf_layer_elem == True: # arrange and check data # if self.t_reinf_layer > self.t_shell / 2. or self.n_elems_z < 3: print '--- constant edge element transformation canceled ---' print 'the following condition needs to be fullfilled: \n' print 'self.t_reinf_layer <= self.t_shell/2 and self.n_elems_z >= 3' else: n_elems_z = float(self.n_elems_z) # normed thickness will evaluate as t_reinf_layer at each element t_reinf_layer_ = self.t_reinf_layer / self.length_z / ( zi_upper_ - zi_lower_) # zi_old set off from top which needs to be shifted delta_ = self.n_elems_reinf_layer / n_elems_z # get upper, lower and internal coordinates, that need to be shifted zi_lower = where(zi_ <= delta_) zi_upper = where(abs(1 - zi_) <= delta_ + 1e-10) zi_inter = where(abs(zi_ - 0.5) < 0.5 - (delta_ + 1e-10)) # narrowing of coordinates zi_[zi_lower] = zi_[zi_lower] * t_reinf_layer_[ zi_lower] / delta_ zi_[zi_upper] = 1 - ( 1 - zi_[zi_upper]) * t_reinf_layer_[zi_upper] / delta_ zi_[zi_inter] = t_reinf_layer_[zi_inter] + \ (zi_[zi_inter] - delta_) / (1 - 2 * delta_)\ * (1 - 2 * t_reinf_layer_[zi_inter]) print '--- constant edge elements transformation done ---' # thickness is multiplied by the supplied zi coordinate # z_ = (zi_lower_ * self.scalefactor_delta_h + (zi_upper_ - zi_lower_) * zi_) # coordinates of origin # X_0, Y_0, Z_0 = self.X0 print '--- geometric transformation done ---' # multiply the local grid points with the real dimensions in order to obtain the # global coordinates of the mushroof_part: # return c_[X_0 + (xi_ * length_xy_tot) * self.scalefactor_length_xy, Y_0 + (yi_ * length_xy_tot) * self.scalefactor_length_xy, Z_0 + z_ * self.length_z]
class LCCTable(HasTraits): '''Loading Case Manager. Generates and sorts the loading case combinations of all specified loading cases. ''' # define ls # ls = Trait('ULS', {'ULS': ULS, 'SLS': SLS}) # lcc-instance for the view # lcc = Instance(LCC) #------------------------------- # Define loading cases: #------------------------------- # path to the directory containing the state data files # data_dir = Directory # list of load cases # lc_list_ = List(Instance(LC)) lc_list = Property(List, depends_on='+filter') def _set_lc_list(self, value): self.lc_list_ = value def _get_lc_list(self): # for lc in self.lc_list_: # if lc.data_filter != self.data_filter: # lc.data_filter = self.data_filter return self.lc_list_ lcc_table_columns = Property(depends_on='lc_list_, +filter') def _get_lcc_table_columns(self): return [ ObjectColumn(label='Id', name='lcc_id') ] + \ [ ObjectColumn(label=lc.name, name=lc.name) for idx, lc in enumerate(self.lc_list) ] + \ [ ObjectColumn(label='assess_value', name='assess_value') ] geo_columns = Property(List(Str), depends_on='lc_list_, +filter') def _get_geo_columns(self): '''derive the order of the geo columns from the first element in 'lc_list'. The internal consistency is checked separately in the 'check_consistency' method. ''' return self.lc_list[0].geo_columns sr_columns = Property(List(Str), depends_on='lc_list_, +filter') def _get_sr_columns(self): '''derive the order of the stress resultants from the first element in 'lc_list'. The internal consistency is checked separately in the 'check_consistency' method. ''' return self.lc_list[0].sr_columns #------------------------------- # check consistency #------------------------------- def _check_for_consistency(self): ''' check input files for consitency: ''' return True #------------------------------- # lc_arr #------------------------------- lc_arr = Property(Array) def _get_lc_arr(self): '''stack stress resultants arrays of all loading cases together. This yields an array of shape ( n_lc, n_elems, n_sr ) ''' sr_arr_list = [lc.sr_arr for lc in self.lc_list] # for x in sr_arr_list: # print x.shape return array(sr_arr_list) #------------------------------- # Array dimensions: #------------------------------- n_sr = Property(Int) def _get_n_sr(self): return len(self.sr_columns) n_lc = Property(Int) def _get_n_lc(self): return len(self.lc_list) n_lcc = Property(Int) def _get_n_lcc(self): return self.combi_arr.shape[0] n_elems = Property(Int) def _get_n_elems(self): return self.lc_list[0].sr_arr.shape[0] #------------------------------- # auxilary method for get_combi_arr #------------------------------- def _product(self, args): """ Get all possible permutations of the security factors without changing the order of the loading cases. The method corresponds to the build-in function 'itertools.product'. Instead of returning a generator object a list of all possible permutations is returned. As argument a list of list needs to be defined. In the original version of 'itertools.product' the function takes a tuple as argument ("*args"). """ pools = map(tuple, args) # within original version args defined as *args result = [[]] for pool in pools: result = [x + [y] for x in result for y in pool] return result # ------------------------------------------------------------ # 'combi_arr' - array containing indices of all loading case combinations: # ------------------------------------------------------------ # list of indices of the position of the imposed loads in 'lc_list' # # imposed_idx_list = Property( List, depends_on = 'lc_list_, lc_list_.+input' ) imposed_idx_list = Property(List, depends_on='lc_list_') @cached_property def _get_imposed_idx_list(self): '''list of indices for the imposed loads ''' imposed_idx_list = [] for i_lc, lc in enumerate(self.lc_list): cat = lc.category if cat == 'imposed-load': imposed_idx_list.append(i_lc) return imposed_idx_list # array containing the psi with name 'psi_key' for the specified # loading cases defined in 'lc_list'. For dead-loads no value for # psi exists. In this case a value of 1.0 is defined. # This yields an array of shape ( n_lc, ) # def _get_psi_arr(self, psi_key): '''psi_key must be defined as: 'psi_0', 'psi_1', or 'psi_2' Returns an 1d-array of shape ( n_lc, ) ''' # get list of ones (used for dead-loads): # psi_list = [1] * len(self.lc_list) # overwrite ones with psi-values in case of imposed-loads: # for imposed_idx in self.imposed_idx_list: psi_value = getattr(self.lc_list[imposed_idx], psi_key) psi_list[imposed_idx] = psi_value return array(psi_list, dtype='float_') # list containing names of the loading cases # lc_name_list = Property(List, depends_on='lc_list_') @cached_property def _get_lc_name_list(self): '''list of names of all loading cases ''' return [lc.name for lc in self.lc_list] show_lc_characteristic = Bool(True) # combination array: # combi_arr = Property(Array, depends_on='lc_list_, combination_SLS') @cached_property def _get_combi_arr(self): '''array containing the security and combination factors corresponding to the specified loading cases. This yields an array of shape ( n_lcc, n_lc ) Properties defined in the subclasses 'LCCTableULS', 'LCCTableSLS': - 'gamma_list' = list of security factors (gamma) - 'psi_lead' = combination factors (psi) of the leading imposed load - 'psi_non_lead' = combination factors (psi) of the non-leading imposed loads ''' # printouts: # if self.ls == 'ULS': print '*** load case combinations for limit state ULS ***' else: print '*** load case combinations for limit state SLS ***' print '*** SLS combination used: % s ***' % (self.combination_SLS) #--------------------------------------------------------------- # get permutations of safety factors ('gamma') #--------------------------------------------------------------- # permutation_list = self._product(self.gamma_list) combi_arr = array(permutation_list) # check if imposed loads are defined # if not no further processing of 'combi_arr' is necessary: # if self.imposed_idx_list == []: # if option is set to 'True' the loading case combination table # is enlarged with an identity matrix in order to see the # characteristic values of each loading case. # if self.show_lc_characteristic: combi_arr = vstack([identity(self.n_lc), combi_arr]) return combi_arr #--------------------------------------------------------------- # get leading and non leading combination factors ('psi') #--------------------------------------------------------------- # go through all possible cases of leading imposed loads # For the currently investigated imposed loading case the # psi value is taken from 'psi_leading_arr' for all other # imposed loads the psi value is taken from 'psi_non_lead_arr' # Properties are defined in the subclasses # psi_lead_arr = self.psi_lead_arr psi_non_lead_arr = self.psi_non_lead_arr # for SLS limit state case 'rare' all imposed loads are multiplied # with 'psi_2'. In this case no distinction between leading or # non-leading imposed loads needs to be performed. # if all(psi_lead_arr == psi_non_lead_arr): combi_arr_psi = combi_arr * psi_lead_arr # generate a list or arrays obtained by multiplication # with the psi-factors. # This yields a list of length = number of imposed-loads. # else: combi_arr_psi_list = [] for imposed_idx in self.imposed_idx_list: # copy in order to preserve initial state of the array # and avoid in place modification psi_arr = copy(psi_non_lead_arr) psi_arr[imposed_idx] = psi_lead_arr[imposed_idx] combi_arr_lead_i = combi_arr[where( combi_arr[:, imposed_idx] != 0)] * psi_arr combi_arr_psi_list.append(combi_arr_lead_i) combi_arr_psi_no_0 = vstack(combi_arr_psi_list) # missing cases without any dead load have to be added # get combinations with all!! imposed = 0 # lcc_all_imposed_zero = where( (combi_arr[:, self.imposed_idx_list] == 0).all(axis=1)) # add to combinations # combi_arr_psi = vstack( (combi_arr[lcc_all_imposed_zero], combi_arr_psi_no_0)) #--------------------------------------------------------------- # get exclusive loading cases ('exclusive_to') #--------------------------------------------------------------- # get a list of lists containing the indices of the loading cases # that are defined exclusive to each other. # The list still contains duplicates, e.g. [1,2] and [2,1] # exclusive_list = [] for i_lc, lc in enumerate(self.lc_list): # get related load case number # for exclusive_name in lc.exclusive_to: if exclusive_name in self.lc_name_list: exclusive_idx = self.lc_name_list.index(exclusive_name) exclusive_list.append([i_lc, exclusive_idx]) # eliminate the duplicates in 'exclusive_list' # exclusive_list_unique = [] for exclusive_list_entry in exclusive_list: if sorted(exclusive_list_entry) not in exclusive_list_unique: exclusive_list_unique.append(sorted(exclusive_list_entry)) # delete the rows in combination array that contain # loading case combinations with imposed-loads that have been defined # as exclusive to each other. # combi_arr_psi_exclusive = combi_arr_psi # print 'combi_arr_psi_exclusive', combi_arr_psi_exclusive for exclusive_list_entry in exclusive_list_unique: # check where maximum one value of the exclusive load cases is unequal to one # LC1 LC2 LC3 (all LCs are defined as exclusive to each other) # # e.g. 1.5 0.9 0.8 (example of 'combi_arr_psi') # 1.5 0.0 0.0 # 0.0 0.0 0.0 (combination with all imposed loads = 0 after multiplication wit psi and gamma) # ... ... ... # # this would yield the following mask_arr (containing ones or zeros): # e.g. 1.0 1.0 1.0 --> sum = 3 --> true combi --> accepted combination # 1.0 0.0 0.0 --> sum = 1 --> false combi --> no accepted combination # e.g. 0.0 0.0 0.0 --> sum = 0 --> true combi --> accepted combination (only body-loads) # ... ... ... # mask_arr = where( combi_arr_psi_exclusive[:, exclusive_list_entry] != 0, 1.0, 0.0) # print 'mask_arr', mask_arr true_combi = where(sum(mask_arr, axis=1) <= 1.0) # print 'true_combi', true_combi combi_arr_psi_exclusive = combi_arr_psi_exclusive[true_combi] #--------------------------------------------------------------- # create array with only unique load case combinations #--------------------------------------------------------------- # If the psi values of an imposed-load are defined as zero this # may led to zero entries in 'combi_arr'. This would yield rows # in 'combi_arr' which are duplicates. Those rows are removed. # Add first row in 'combi_arr_psi_exclusive' to '_unique' array # This array must have shape (1, n_lc) in order to use 'axis'-option # combi_arr_psi_exclusive_unique = combi_arr_psi_exclusive[0][None, :] for row in combi_arr_psi_exclusive: # Check if all factors in one row are equal to the rows in 'unique' array. # If this is not the case for any row the combination is added to 'unique'. # Broadcasting is used for the bool evaluation: # if (row == combi_arr_psi_exclusive_unique).all( axis=1.0).any() == False: combi_arr_psi_exclusive_unique = vstack( (combi_arr_psi_exclusive_unique, row)) # if option is set to 'True' the loading case combination table # is enlarged with an identity matrix in order to see the # characteristic values of each loading case. # # if self.show_lc_characteristic: # combi_arr_psi_exclusive_unique = vstack( [ identity( self.n_lc ), combi_arr_psi_exclusive_unique ] ) return combi_arr_psi_exclusive_unique #------------------------------- # lcc_arr #------------------------------- lcc_arr = Property(Array, depends_on='lc_list_') @cached_property def _get_lcc_arr(self): '''Array of all loading case combinations following the loading cases define in 'lc_list' and the combinations defined in 'combi_arr'. This yields an array of shape ( n_lcc, n_elems, n_sr ) ''' self._check_for_consistency() combi_arr = self.combi_arr # 'combi_arr' is of shape ( n_lcc, n_lc ) # 'lc_arr' is of shape ( n_lc, n_elems, n_sr ) # lc_arr = self.lc_arr # Broadcasting is used to generate array containing the multiplied lc's # yielding an array of shape ( n_lcc, n_lc, n_elems, n_sr ) # lc_combi_arr = lc_arr[None, :, :, :] * combi_arr[:, :, None, None] # Then the sum over index 'n_lc' is evaluated yielding # an array of all loading case combinations. # This yields an array of shape ( n_lcc, n_elem, n_sr ) # lcc_arr = sum(lc_combi_arr, axis=1) return lcc_arr #------------------------------- # lcc_lists #------------------------------- lcc_list = Property(List, depends_on='lc_list_') @cached_property def _get_lcc_list(self): '''list of loading case combinations (instances of LCC) ''' combi_arr = self.combi_arr lcc_arr = self.lcc_arr sr_columns = self.sr_columns geo_columns = self.geo_columns n_lcc = self.n_lcc # return a dictionary of the stress resultants # this is used by LSTable to determine the stress # resultants of the current limit state # lcc_list = [] for i_lcc in range(n_lcc): state_data_dict = {} for i_sr, name in enumerate(sr_columns): state_data_dict[name] = lcc_arr[i_lcc, :, i_sr][:, None] geo_data_dict = self.geo_data_dict lcc = LCC( # lcc_table = self, factors=combi_arr[i_lcc, :], lcc_id=i_lcc, ls_table=LSTable(geo_data=geo_data_dict, state_data=state_data_dict, ls=self.ls)) for idx, lc in enumerate(self.lc_list): lcc.add_trait(lc.name, Int(combi_arr[i_lcc, idx])) lcc_list.append(lcc) return lcc_list #------------------------------- # geo_arr #------------------------------- geo_data_dict = Property(Dict, depends_on='lc_list_') @cached_property def _get_geo_data_dict(self): '''Array of global coords derived from the first loading case defined in lc_list. Coords are identical for all LC's. ''' return self.lc_list[0].geo_data_dict #------------------------------- # min/max-values #------------------------------- def get_min_max_state_data(self): ''' get the surrounding curve of all 'lcc' values ''' lcc_arr = self.lcc_arr min_arr = ndmin(lcc_arr, axis=0) max_arr = ndmax(lcc_arr, axis=0) return min_arr, max_arr #-------------------------------------- # use for case 'max N*' nach ZiE # Fall 'maximale Normalkraft' nach ZiE #-------------------------------------- # max_sr_grouped_dict = Property( Dict ) # @cached_property # def _get_max_sr_grouped_dict( self ): # ''' get the surrounding curve for each stress resultant # shape lcc_array ( n_lcc, n_elems, n_sr ) # ''' # sr_columns = self.sr_columns # lcc_arr = self.lcc_arr # dict = {} # for i, sr in enumerate( self.sr_columns ): # idx_1 = argmax( abs( lcc_arr[:, :, i] ), axis = 0 ) # idx_2 = arange( 0, idx_1.shape[0], 1 ) # dict[sr] = lcc_arr[idx_1, idx_2, :] # return dict #-------------------------------------- # use for case 'max eta' nach ZiE # Fall max Ausnutzungsgrad nach ZiE #-------------------------------------- max_sr_grouped_dict = Property(Dict) @cached_property def _get_max_sr_grouped_dict(self): '''evaluate eta and prepare plot ''' sr_columns = self.sr_columns lcc_arr = self.lcc_arr # ## N_s6cm_d results from 'V_op_d'*1.5 # assume a distribution of stresses as for a simple # supported beam with cantilever corresponding # to the distance of the screws to each other and to the edge # of the TRC shell (33cm/17cm) # N_s6cm_d = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33. # ## V_s6cm_d results from 'N_ip_d'/2 # assume an equal distribution (50% each) of the # normal forces to each screw # V_s6cm_d = lcc_arr[:, :, 0] * 0.56 # V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5 # resistance ac characteristic value obtained from the # experiment and EN DIN 1990 # N_ck = 28.3 V_ck = 63.8 gamma_s = 1.5 eta_N = N_s6cm_d / (N_ck / gamma_s) eta_V = abs(V_s6cm_d / (V_ck / gamma_s)) eta_inter = (eta_N) + (eta_V) idx_max_hinge = eta_inter.argmax(axis=0) dict = {} for i, sr in enumerate(self.sr_columns): idx_1 = idx_max_hinge idx_2 = arange(0, idx_1.shape[0], 1) dict[sr] = lcc_arr[idx_1, idx_2, :] return dict def export_hf_max_grouped(self, filename): """exports the hinge forces as consistent pairs for the two case 'max_eta' or 'max_N*' """ from matplotlib import pyplot sr_columns = self.sr_columns dict = self.max_sr_grouped_dict length_xy_quarter = self.length_xy_quarter def save_bar_plot(x, y, filename='bla', title='Title', xlabel='xlabel', ylabel='ylavel', width=0.1, xmin=0, xmax=1000, ymin=-1000, ymax=1000, figsize=[10, 5]): fig = pyplot.figure(facecolor="white", figsize=figsize) ax1 = fig.add_subplot(1, 1, 1) ax1.bar(x, y, width=width, align='center', color='green') ax1.set_xlim(xmin, xmax) ax1.set_ylim(ymin, ymax) ax1.set_xlabel(xlabel, fontsize=22) ax1.set_ylabel(ylabel, fontsize=22) if title == 'N_ip max': title = 'Fall max $\eta$' # title = 'Fall max $N^{*}$' if title == 'V_ip max': title = 'max $V_{ip}$' if title == 'V_op max': title = 'Fall max $V^{*}$' ax1.set_title(title) fig.savefig(filename, orientation='portrait', bbox_inches='tight') pyplot.clf() X = array(self.geo_data_dict['X_hf']) Y = array(self.geo_data_dict['Y_hf']) # symmetric axes # idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_sym = X[idx_sym].reshape(-1) idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_r0_r1 = Y[idx_r0_r1].reshape(-1) for sr in sr_columns: F_int = dict[sr] # first row N_ip, second V_ip third V_op F_sym = F_int[idx_sym, :].reshape(-1, len(sr_columns)) F_r0_r1 = F_int[idx_r0_r1, :].reshape(-1, len(sr_columns)) save_bar_plot(X_sym, F_sym[:, 0].reshape(-1), xlabel='$X$ [m]', ylabel='$N^{*}_{Ed}$ [kN]', filename=filename + 'N_ip' + '_sym_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=3.5 * length_xy_quarter, figsize=[10, 5], ymin=-30, ymax=+30) if self.link_type == 'inc_V_ip': save_bar_plot(X_sym, F_sym[:, 1].reshape(-1), xlabel='$X$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_sym_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=3.5 * length_xy_quarter, figsize=[10, 5], ymin=-30, ymax=+30) save_bar_plot(X_sym, F_sym[:, 2].reshape(-1), xlabel='$X$ [m]', ylabel='$V^{*}_{Ed}$ [kN]', filename=filename + 'V_op' + '_sym_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=3.5 * length_xy_quarter, figsize=[10, 5], ymin=-10, ymax=+10) # r0_r1 # save_bar_plot(X_r0_r1, F_r0_r1[:, 0].reshape(-1), xlabel='$Y$ [m]', ylabel='$N^{*}_{Ed}$ [kN]', filename=filename + 'N_ip' + '_r0_r1_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=2.0 * length_xy_quarter, figsize=[5, 5], ymin=-30, ymax=+30) if self.link_type == 'inc_V_ip': save_bar_plot(X_r0_r1, F_r0_r1[:, 1].reshape(-1), xlabel='$Y$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_r0_r1_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=2.0 * length_xy_quarter, figsize=[5, 5], ymin=-30, ymax=+30) save_bar_plot(X_r0_r1, F_r0_r1[:, 2].reshape(-1), xlabel='$Y$ [m]', ylabel='$V^{*}_{Ed}$ [kN]', filename=filename + 'V_op' + '_r0_r1_' + sr + '_max', title=sr + ' max', xmin=0.0, xmax=2.0 * length_xy_quarter, figsize=[5, 5], ymin=-10, ymax=+10) def plot_interaction_s6cm(self): """get the maximum values (consistent pairs of N and V) and plot them in an interaction plot """ lcc_arr = self.lcc_arr # ## F_Edt results from 'V_op_d'*1.5 # assume a distribution of stresses as for a simple # supported beam with cantilever corresponding # to the distance of the screws to each other and to the edge # of the TRC shell (33cm/17cm) # F_Edt = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33. # ## F_EdV1 results from 'N_ip_d'/2 # assume an equal distribution (50% each) of the # normal forces to each screw # F_EdV1 = lcc_arr[:, :, 0] * 0.56 # V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5 # resistance ac characteristic value obtained from the # experiment and EN DIN 1990 # F_Rkt = 28.3 F_RkV1 = 63.8 gamma_M = 1.5 eta_t = abs(F_Edt / (F_Rkt / gamma_M)) eta_V1 = abs(F_EdV1 / (F_RkV1 / gamma_M)) print 'eta_t.shape', eta_t.shape print 'eta_V1.shape', eta_V1.shape # self.interaction_plot(abs(F_Edt), abs(F_EdV1)) self.interaction_plot(eta_t, eta_V1) # eta_inter = ( eta_N ) + ( eta_V ) # # idx_max_hinge = eta_inter.argmax( axis = 0 ) # idx_hinge = arange( 0, len( idx_max_hinge ), 1 ) # plot_eta_N = eta_N[idx_max_hinge, idx_hinge] # plot_eta_V = eta_V[idx_max_hinge, idx_hinge] # self.interaction_plot( plot_eta_N, plot_eta_V ) def interaction_plot(self, eta_N, eta_V): from matplotlib import font_manager ticks_font = font_manager.FontProperties(family='Times', style='normal', size=18, weight='normal', stretch='normal') from matplotlib import pyplot fig = pyplot.figure(facecolor="white", figsize=[10, 10]) ax1 = fig.add_subplot(1, 1, 1) # x = arange(0, 1.01, 0.01) # y15 = (1 - x ** 1.5) ** (1 / 1.5) # y = (1 - x) ax1.set_xlabel('$F_\mathrm{Ed,V1}/F_\mathrm{Rd,V1}$', fontsize=24) ax1.set_ylabel('$F_\mathrm{Ed,t}/F_\mathrm{Rd,t}$', fontsize=24) # ax1.set_xlabel('$|N_\mathrm{Ed}|$' , fontsize=32) # ax1.set_ylabel('$|V_\mathrm{Ed}|$', fontsize=32) # ax1.plot(x , y, '--', color='black' # , linewidth=2.0) # ax1.plot(x , y15, '--', color='black' # , linewidth=2.0) ax1.plot(eta_V, eta_N, 'wo', markersize=3) # ax1.plot(eta_V, eta_N, 'o', color='green', markersize=8) # ax1.plot( eta_V[where( limit < 1 )] , eta_N[where( limit < 1 )], 'o', markersize = 8 ) # ax1.plot( eta_V[where( limit > 1 )] , eta_N[where( limit > 1 )], 'o', color = 'red', markersize = 8 ) for xlabel_i in ax1.get_xticklabels(): xlabel_i.set_fontsize(24) xlabel_i.set_family('serif') for ylabel_i in ax1.get_yticklabels(): ylabel_i.set_fontsize(24) ylabel_i.set_family('serif') # ax1.plot( x , 1 - x, '--', color = 'black', label = 'lineare Interaktion' ) ax1.set_xlim(0, 1.0) ax1.set_ylim(0, 1.0) ax1.legend() pyplot.show() pyplot.clf() # choose linking type (in-plane shear dof blocked or not) # link_type = Enum('exc_V_ip', 'inc_V_ip') # length of the shell (needed to plot the hinge forces plots correctly) # length_xy_quarter = 3.5 # m def export_hf_lc(self): """exports the hinge forces for each loading case separately """ from matplotlib import pyplot sr_columns = self.sr_columns dict = self.max_sr_grouped_dict length_xy_quarter = self.length_xy_quarter def save_bar_plot(x, y, filename='bla', xlabel='xlabel', ylabel='ylavel', ymin=-10, ymax=10, width=0.1, xmin=0, xmax=1000, figsize=[10, 5]): fig = pyplot.figure(facecolor="white", figsize=figsize) ax1 = fig.add_subplot(1, 1, 1) ax1.bar(x, y, width=width, align='center', color='blue') ax1.set_xlim(xmin, xmax) ax1.set_ylim(ymin, ymax) ax1.set_xlabel(xlabel, fontsize=22) ax1.set_ylabel(ylabel, fontsize=22) fig.savefig(filename, orientation='portrait', bbox_inches='tight') pyplot.clf() X = array(self.geo_data_dict['X_hf']) Y = array(self.geo_data_dict['Y_hf']) # symmetric axes # idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_sym = X[idx_sym].reshape(-1) idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001) X_r0_r1 = Y[idx_r0_r1].reshape(-1) F_int = self.lc_arr for i, lc_name in enumerate(self.lc_name_list): filename = self.lc_list[i].plt_export max_N_ip = max(int(ndmax(F_int[i, :, 0], axis=0)) + 1, 1) max_V_ip = max(int(ndmax(F_int[i, :, 1], axis=0)) + 1, 1) max_V_op = max(int(ndmax(F_int[i, :, 2], axis=0)) + 1, 1) F_int_lc = F_int[i, :, :] # first row N_ip, second V_ip third V_op F_sym = F_int_lc[idx_sym, :].reshape(-1, len(sr_columns)) F_r0_r1 = F_int_lc[idx_r0_r1, :].reshape(-1, len(sr_columns)) save_bar_plot( X_sym, F_sym[:, 0].reshape(-1), # xlabel = '$X$ [m]', ylabel = '$N^{ip}$ [kN]', xlabel='$X$ [m]', ylabel='$N^{*}$ [kN]', filename=filename + 'N_ip' + '_sym', xmin=0.0, xmax=3.5 * length_xy_quarter, ymin=-max_N_ip, ymax=max_N_ip, figsize=[10, 5]) save_bar_plot(X_sym, F_sym[:, 1].reshape(-1), xlabel='$X$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_sym', xmin=0.0, xmax=3.5 * length_xy_quarter, ymin=-max_V_ip, ymax=max_V_ip, figsize=[10, 5]) save_bar_plot( X_sym, F_sym[:, 2].reshape(-1), # xlabel = '$X$ [m]', ylabel = '$V_{op}$ [kN]', xlabel='$X$ [m]', ylabel='$V^{*}$ [kN]', filename=filename + 'V_op' + '_sym', xmin=0.0, xmax=3.5 * length_xy_quarter, ymin=-max_V_op, ymax=max_V_op, figsize=[10, 5]) # r0_r1 # save_bar_plot( X_r0_r1, F_r0_r1[:, 0].reshape(-1), # xlabel = '$Y$ [m]', ylabel = '$N_{ip}$ [kN]', xlabel='$Y$ [m]', ylabel='$N^{*}$ [kN]', filename=filename + 'N_ip' + '_r0_r1', xmin=0.0, xmax=2.0 * length_xy_quarter, ymin=-max_N_ip, ymax=max_N_ip, figsize=[5, 5]) save_bar_plot(X_r0_r1, F_r0_r1[:, 1].reshape(-1), xlabel='$Y$ [m]', ylabel='$V_{ip}$ [kN]', filename=filename + 'V_ip' + '_r0_r1', xmin=0.0, xmax=2.0 * length_xy_quarter, ymin=-max_V_ip, ymax=max_V_ip, figsize=[5, 5]) save_bar_plot( X_r0_r1, F_r0_r1[:, 2].reshape(-1), # xlabel = '$Y$ [m]', ylabel = '$V_{op}$ [kN]', xlabel='$Y$ [m]', ylabel='$V^{*}$ [kN]', filename=filename + 'V_op' + '_r0_r1', xmin=0.0, xmax=2.0 * length_xy_quarter, ymin=-max_V_op, ymax=max_V_op, figsize=[5, 5]) # ------------------------------------------------------------ # View # ------------------------------------------------------------ traits_view = View(VGroup( VSplit( Item('lcc_list', editor=lcc_list_editor, show_label=False), Item('lcc@', show_label=False), ), ), resizable=True, scrollable=True, height=1.0, width=1.0)
class Distribution(HasTraits): ''' takes a scipy.stats distribution ''' def __init__(self, distribution, **kw): super(Distribution, self).__init__(**kw) self.distribution = distribution self.changes() distribution = Instance(rv_continuous) def add_listeners(self): self.on_trait_change(self.changes, '+params,+moments') def remove_listeners(self): self.on_trait_change(self.changes, '+params,+moments', remove=True) # precision for displayed numbers = 12 numbers corresponds with the numbers # displayed in the UI. decimal.getcontext().prec = 12 # event that triggers the replot in pdistrib.py changed = Event # freezes the location to 0.0 loc_zero = Bool(True) # old values are compared with new values to recognize which value changed old_values = Array(Float, value=zeros(7)) new_values = Array(Float, value=zeros(7)) # statistical parameters loc = Float(0.0, auto_set=False, enter_set=True, params=True) scale = Float(1.0, auto_set=False, enter_set=True, params=True) shape = Float(1.0, auto_set=False, enter_set=True, params=True) # statistical moments mean = Float(0.0, auto_set=False, enter_set=True, moments=True) variance = Float(0.0, auto_set=False, enter_set=True, moments=True) skewness = Float(0.0, auto_set=False, enter_set=True, moments=True) kurtosis = Float(0.0, auto_set=False, enter_set=True, moments=True) stdev = Property(depends_on='variance') def _get_stdev(self): return self.variance**(0.5) def get_mean(self): ''' Methods for evaluating the statistical moments. Decimal together with precision are needed in order to get the number which is actually displayed in the UI. Otherwise clicking in the interface or pressing enter on the displayed values would trigger new computation because these values are a representation of the computed values rounded to 12 numbers. ''' self.mean = float(Decimal(str((self.distr.stats('m')))) / 1) def get_variance(self): self.variance = float(Decimal(str((self.distr.stats('v')))) / 1) def get_skewness(self): self.skewness = float(Decimal(str((self.distr.stats('s')))) / 1) def get_kurtosis(self): self.kurtosis = float(Decimal(str((self.distr.stats('k')))) / 1) def get_moments(self, specify): ''' specify is a string containing some of the letters 'mvsk' ''' self.remove_listeners() moments = self.distr.stats(specify) moment_names = ['mean', 'variance', 'skewness', 'kurtosis'] for idx, value in enumerate(moments): setattr(self, moment_names[idx][0], value) dict = { 'm': self.get_mean, 'v': self.get_variance, 's': self.get_skewness, 'k': self.get_kurtosis } # chooses the methods to calculate the three moments which didn't # trigger this method for idx in specify: dict[idx]() self.add_listeners() def changes(self): ''' coordinates the methods for computing parameters and moments when a change has occurred ''' self.remove_listeners() self.new_values = array([ self.shape, self.loc, self.scale, self.mean, self.variance, self.skewness, self.kurtosis ]) # test which parameters or moments are significant indexing = arange(7)[abs(self.old_values - self.new_values) != 0] if len(indexing) > 0 and indexing[0] < 3: self.get_moments('mvsk') elif len(indexing) > 0 and indexing[0] > 2: self.param_methods[indexing[0] - 3]() else: pass self.old_values = array([ self.shape, self.loc, self.scale, self.mean, self.variance, self.skewness, self.kurtosis ]) self.add_listeners() self.changed = True param_methods = Property(Array, depends_on='distribution') @cached_property def _get_param_methods(self): methods = array([ self.mean_change, self.variance_change_scale, self.variance_change_shape, self.skewness_change, self.kurtosis_change ]) if self.distribution.shapes == None: return methods[0:2] else: if len(self.distribution.shapes) == 1: return hstack((methods[0], methods[2:5])) else: print 'more than 1 shape parameters' def shape_scale_mean_var_residuum(self, params): shape = params[0] scale = params[1] res_mean = self.mean - self.distribution(shape, \ loc=self.loc, scale=scale).stats('m') res_var = self.variance - self.distribution(shape, \ loc=self.loc, scale=scale).stats('v') return [res_mean, res_var] def mean_change(self): if self.loc_zero == True and self.distribution.__dict__[ 'shapes'] != None: self.loc = 0.0 result = fsolve(self.shape_scale_mean_var_residuum, [1., 1.]) self.shape = float(Decimal(str(result[0].sum())) / 1) self.scale = float(Decimal(str(result[1].sum())) / 1) else: self.loc += float( Decimal(str(self.mean - self.distr.stats('m'))) / 1) def scale_variance_residuum(self, scale): return self.variance - self.distribution(\ loc=self.loc, scale=scale).stats('v') def variance_change_scale(self): self.scale = float( Decimal(str(fsolve(self.scale_variance_residuum, 1).sum())) / 1) def shape_variance_residuum(self, shape): return self.variance - self.distribution(shape, \ loc=self.loc, scale=self.scale).stats('v') def variance_change_shape(self): self.shape = float( Decimal(str(fsolve(self.shape_variance_residuum, 1).sum())) / 1) self.get_moments('msk') def shape_skewness_residuum(self, shape): return self.skewness - self.distribution(shape, \ loc=self.loc, scale=self.scale).stats('s') def skewness_change(self): self.shape = float( Decimal(str(fsolve(self.shape_skewness_residuum, 1).sum())) / 1) self.get_moments('mvk') def shape_kurtosis_residuum(self, shape): return self.kurtosis - self.distribution(shape, \ loc=self.loc, scale=self.scale).stats('k') def kurtosis_change(self): self.shape = float( Decimal(str(fsolve(self.shape_kurtosis_residuum, 1).sum())) / 1) self.get_moments('mvs') distr = Property(depends_on='+params') @cached_property def _get_distr(self): if self.distribution.__dict__['numargs'] == 0: return self.distribution(self.loc, self.scale) elif self.distribution.__dict__['numargs'] == 1: return self.distribution(self.shape, self.loc, self.scale) def default_traits_view(self): '''checks the number of shape parameters of the distribution and adds them to the view instance''' label = str(self.distribution.name) if self.distribution.shapes == None: params = Item() if self.mean == infty: moments = Item(label='No finite moments defined') else: moments = Item('mean', label='mean'), \ Item('variance', label='variance'), \ Item('stdev', label='st. deviation', style='readonly') elif len(self.distribution.shapes) == 1: params = Item('shape', label='shape') if self.mean == infty: moments = Item(label='No finite moments defined') else: moments = Item('mean', label='mean'), \ Item('variance', label='variance'), \ Item('stdev', label='st. deviation', style='readonly'), \ Item('skewness', label='skewness'), \ Item('kurtosis', label='kurtosis'), else: params = Item() moments = Item() view = View(VGroup(Label(label, emphasized=True), Group(params, Item('loc', label='location'), Item('scale', label='scale'), Item('loc_zero', label='loc = 0.0'), show_border=True, label='parameters', id='pdistrib.distribution.params'), Group( moments, id='pdistrib.distribution.moments', show_border=True, label='moments', ), id='pdistrib.distribution.vgroup'), kind='live', resizable=True, id='pdistrib.distribution.view') return view
class YMBAutoCorrelView(HasTraits): correl_data = Instance(YMBAutoCorrel) axes_adjust = List([0.1, 0.1, 0.8, 0.8]) data = Property def _get_data(self): return self.correl_data.data zero = Constant(0) slider_max = Property() def _get_slider_max(self): return self.data.n_cuts - 1 cut_slider = Range('zero', 'slider_max', mode='slider', auto_set=False, enter_set=True, modified=True) vcut_slider = Range('zero', 'slider_max', mode='slider', auto_set=False, enter_set=True, modified=True) cut_slider_on = Bool(False, modified=True) color = Str('blue') figure = Instance(Figure) def _figure_default(self): figure = Figure() figure.add_axes(self.axes_adjust) return figure data_changed = Event(True) @on_trait_change('correl_data.input_change, +modified') def _redraw(self): # TODO: set correct ranges, fix axis range (axes.xlim) print 'redrawing xxxx' figure = self.figure figure.clear() var_data = self.correl_data.corr_arr id = self.cut_slider if self.cut_slider_on == True: i = self.cut_slider j = self.vcut_slider plot_data = getattr(self.data, self.correl_data.var_enum_) # plot_data = vstack( [plot_data[:, i], plot_data[:, j]] ).T # plot only values > -1 # plot_data = plot_data[prod( plot_data >= 0, axis = 1, dtype = bool )] plot_data_x = plot_data[:, i] plot_data_y = plot_data[:, j] plot_data_corr = min(corrcoef(plot_data_x, plot_data_y)) plot_data_corr_spear = spearmanr(plot_data_x, plot_data_y)[0] left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left + width + 0.02 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] axScatter = figure.add_axes(rect_scatter) axHistx = figure.add_axes(rect_histx) axHisty = figure.add_axes(rect_histy) axScatter.clear() axHistx.clear() axHisty.clear() from matplotlib.ticker import NullFormatter axHistx.xaxis.set_major_formatter(NullFormatter()) axHisty.yaxis.set_major_formatter(NullFormatter()) axScatter.scatter(plot_data_x, plot_data_y) # binwidth = 0.25 # xymax = max( [max( abs( self.data.cf[:, j] ) ), max( abs( self.data.cf[:, i] ) )] ) # lim = ( int( xymax / binwidth ) + 1 ) * binwidth # axScatter.set_xlim( ( -lim, lim ) ) # axScatter.set_ylim( ( -lim, lim ) ) # bins = arange( -lim, lim + binwidth, binwidth ) axHistx.hist(plot_data_x.compressed(), bins=40) axHisty.hist(plot_data_y.compressed(), bins=40, orientation='horizontal') axHistx.set_xlim(axScatter.get_xlim()) axHisty.set_ylim(axScatter.get_ylim()) axScatter.set_xlabel('$\mathrm{cut\, %i}$' % self.cut_slider, fontsize=16) axScatter.set_ylabel('$\mathrm{cut\, %i}$' % self.vcut_slider, fontsize=16) axScatter.text(axScatter.get_xlim()[0], axScatter.get_ylim()[0], 'actual set correlation %.3f (Pearson), %.3f (Spearman)' % (plot_data_corr, plot_data_corr_spear), color='r') if self.cut_slider_on == False: figure.add_axes(self.axes_adjust) axes = figure.axes[0] axes.clear() x_coor = self.data.x_coord axes.grid() for i in range(0, var_data.shape[1]): axes.plot(x_coor[i:] - x_coor[i], var_data[i, (i):], '-x', color=self.color) # approximate by the polynomial (of the i-th order) # axes.plot( x_coor, self.correl_data.peval( x_coor, self.correl_data.fit_correl ), 'b', linewidth = 3 ) setp(axes.get_xticklabels(), position=(0, -.025)) axes.set_xlabel('$x \, [\mathrm{mm}]$', fontsize=15) axes.set_ylabel('$\mathrm{correlation}$', fontsize=15) axes.set_ylim(-1, 1) self.data_changed = True traits_view = View(Group(Item('correl_data', show_label=False, style='custom'), HGroup( Item('cut_slider_on', label='Scatter'), Item('cut_slider', show_label=False, springy=True, enabled_when='cut_slider_on == True'), Item('vcut_slider', show_label=False, springy=True, enabled_when='cut_slider_on == True'), ), Group(Item('figure', style='custom', editor=MPLFigureEditor(), show_label=False) , id='figure.view'), ), resizable=True, )
class RunTable(SimDBClass): '''Manage the combinations of exec configurations and randomization patterns. ''' name = Str(simdb=True) memsize = Float(1e4, simdb=True) s = Property(Instance(SPIRRID), depends_on='rf') @cached_property def _get_s(self): return SPIRRID(rf=self.rf, min_eps=0.00, max_eps=1.0, n_eps=20, compiler_verbose=0) rf = Instance(IRF, simdb=True) config_list = List(config=True) def _config_list_default(self): return ['I', 'IV'] config_dict = Property(depends_on='config_list') @cached_property def _get_config_dict(self): cd = {} for config_idx in self.config_list: cd[config_idx] = config_dict[config_idx] return cd rand_list = List(rand=True) run_arr = Property(Array, depends_on='+rand,+config') @cached_property def _get_run_arr(self): # generate the runs to be performed run_table = [[ SingleRun(run_table=self, config=config, rand_idx_arr=rand_idx_arr) for rand_idx_arr in self.rand_list ] for config in self.config_dict.items()] return array(run_table) exec_time_arr = Array n_int_arr = Array real_memsize_arr = Array calculate = Button() def _calculate_fired(self): s = self.run_arr.shape self.exec_time_arr = array( [run.exec_time for run in self.run_arr.flatten()]).reshape(s) self.n_int_arr = array([run.n_int for run in self.run_arr.flatten()]).reshape(s) self.real_memsize_arr = array( [run.real_memsize for run in self.run_arr.flatten()]).reshape(s) self.save() self._redraw_fired() clear = Button() def _clear_fired(self): figure = self.figure figure.clear() self.data_changed = True figure = Instance(Figure, transient=True) def _figure_default(self): figure = Figure(facecolor='white') #figure.add_axes( [0.08, 0.13, 0.85, 0.74] ) return figure data_changed = Event(True) normalized_numpy = Bool(True) c_code = Bool(False) redraw = Button() def _redraw_fired(self): figure = self.figure axes = figure.gca() self.plot(axes) self.data_changed = True redraw_in_window = Button() def _redraw_in_window_fired(self): figure = plt.figure(0) axes = figure.gca() self.plot(axes) plt.show() def plot(self, ax): exec_time_arr = self.exec_time_arr n_int_arr = self.n_int_arr[0, :] real_memsize_arr = self.real_memsize_arr[0, :] rand_arr = arange(len(self.rand_list)) + 1 width = 0.45 if exec_time_arr.shape[0] == 1: shift = width / 2.0 ax.bar(rand_arr - shift, exec_time_arr[0, :], width, color='lightgrey') elif self.exec_time_arr.shape[0] == 2: max_exec_time = nmax(exec_time_arr) ax.set_ylabel('$\mathrm{execution \, time \, [sec]}$', size=20) ax.set_xlabel( '$n_{\mathrm{rnd}} \;-\; \mathrm{number \, of \, random \, parameters}$', size=20) ax.bar(rand_arr - width, exec_time_arr[0, :], width, hatch='/', color='white', label='C') # , color = 'lightgrey' ) ax.bar(rand_arr, exec_time_arr[1, :], width, color='lightgrey', label='numpy') yscale = 1.25 ax_xlim = rand_arr[-1] + 1 ax_ylim = max_exec_time * yscale ax.set_xlim(0, ax_xlim) ax.set_ylim(0, ax_ylim) ax2 = ax.twinx() ydata = exec_time_arr[1, :] / exec_time_arr[0, :] ax2.plot(rand_arr, ydata, '-o', color='black', linewidth=1, label='numpy/C') ax2.plot([rand_arr[0] - 1, rand_arr[-1] + 1], [1, 1], '-') ax2.set_ylabel( '$\mathrm{time}( \mathsf{numpy} ) / \mathrm{ time }(\mathsf{C}) \; [-]$', size=20) ax2_ylim = nmax(ydata) * yscale ax2_xlim = rand_arr[-1] + 1 ax2.set_ylim(0, ax2_ylim) ax2.set_xlim(0, ax2_xlim) ax.set_xticks(rand_arr) ax.set_xticklabels(rand_arr, size=14) xticks = ['%.2g' % n_int for n_int in n_int_arr] ax3 = ax.twiny() ax3.set_xlim(0, rand_arr[-1] + 1) ax3.set_xticks(rand_arr) ax3.set_xlabel('$n_{\mathrm{int}}$', size=20) ax3.set_xticklabels(xticks, rotation=30) 'set the tick label size of the lower X axis' X_lower_tick = 14 xt = ax.get_xticklabels() for t in xt: t.set_fontsize(X_lower_tick) 'set the tick label size of the upper X axis' X_upper_tick = 12 xt = ax3.get_xticklabels() for t in xt: t.set_fontsize(X_upper_tick) 'set the tick label size of the Y axes' Y_tick = 14 yt = ax2.get_yticklabels() + ax.get_yticklabels() for t in yt: t.set_fontsize(Y_tick) 'set the legend position and font size' leg_fontsize = 16 leg = ax.legend(loc=(0.02, 0.83)) for t in leg.get_texts(): t.set_fontsize(leg_fontsize) leg = ax2.legend(loc=(0.705, 0.90)) for t in leg.get_texts(): t.set_fontsize(leg_fontsize) traits_view = View(Item('name'), Item('memsize'), Item('rf'), Item('config_dict'), Item('rand_list'), HGroup( Item('calculate', show_label=False), Item('redraw', show_label=False), Item('clear', show_label=False), Item('redraw_in_window', show_label=False), ), Item('figure', editor=MPLFigureEditor(), resizable=True, show_label=False), buttons=['OK', 'Cancel'])
class RandomVariable( HasTraits ): '''Class representing the definition and discretization of a random variable. ''' trait_value = Float source_trait = CTrait # name of the parameter name = Str pd = Instance( IPDistrib ) def _pd_changed( self ): self.pd.n_segments = self._n_int changed = Event @on_trait_change( 'pd.changed,+changed' ) def _set_changed( self ): self.changed = True # Switch the parameter random random = Bool( False, changed = True ) def set_random( self, distribution = 'uniform', discr_type = 'T grid', loc = 0., scale = 1., shape = 1., n_int = 30 ): possible_distr = self.source_trait.distr if distribution and distribution not in possible_distr: raise AssertionError, 'distribution type %s not allowed for parameter %s' \ % ( distribution, self.name ) self.pd = PDistrib( distr_choice = distribution, n_segments = n_int ) self.pd.distr_type.set( scale = scale, shape = shape, loc = loc ) self.n_int = n_int self.discr_type = discr_type self.random = True def unset_random( self ): self.random = False # Number of integration points (relevant only for grid based methods) _n_int = Int n_int = Property def _set_n_int( self, value ): if self.pd: self.pd.n_segments = value self._n_int = value def _get_n_int( self ): return self.pd.n_segments # type of the RandomVariable discretization discr_type = Enum( 'T grid', 'P grid', 'MC', changed = True ) def _discr_type_default( self ): return 'T grid' theta_arr = Property( Array( 'float_' ), depends_on = 'changed' ) @cached_property def _get_theta_arr( self ): '''Get the discr_type of the pdistrib ''' if not self.random: return array( [ self.trait_value ], dtype = float ) if self.discr_type == 'T grid': # Get the discr_type from pdistrib and shift it # such that the midpoints of the segments are used for the # integration. x_array = self.pd.x_array # Note assumption of T grid discr_type theta_array = x_array[:-1] + self.pd.dx / 2.0 elif self.discr_type == 'P grid': # P grid disretization generated from the inverse cummulative # probability # distr = self.pd.distr_type.distr # Grid of constant probabilities pi_arr = linspace( 0.5 / self.n_int, 1. - 0.5 / self.n_int, self.n_int ) theta_array = distr.ppf( pi_arr ) return theta_array dG_arr = Property( Array( 'float_' ), depends_on = 'changed' ) @cached_property def _get_dG_arr( self ): if not self.random: return array( [ 1.0 ], dtype = float ) if self.discr_type == 'T grid': d_theta = self.theta_arr[1] - self.theta_arr[0] return self.pd.get_pdf_array( self.theta_arr ) * d_theta elif self.discr_type == 'P grid': # P grid disretization generated from the inverse cummulative # probability # return array( [ 1.0 / float( self.n_int ) ], dtype = 'float_' ) def get_rvs_theta_arr( self, n_samples ): if self.random: return self.pd.get_rvs_array( n_samples ) else: return array( [self.trait_value], float )
class SPIRRID( Randomization ): #--------------------------------------------------------------------------------------------- # Range of the control process variable epsilon # Define particular control variable points with # the cv array or an equidistant range with (min, max, n) #--------------------------------------------------------------------------------------------- cv = Array( eps_range = True ) min_eps = Float( 0.0, eps_range = True ) max_eps = Float( 0.0, eps_range = True ) n_eps = Float( 80, eps_range = True ) eps_arr = Property( depends_on = 'eps_change' ) @cached_property def _get_eps_arr( self ): # @todo: !!! # This is a side-effect in a property - CRIME !! [rch] # No clear access interface points - naming inconsistent. # define a clean property with a getter and setter # # if the array of control variable points is not given if len( self.cv ) == 0: n_eps = self.n_eps min_eps = self.min_eps max_eps = self.max_eps return linspace( min_eps, max_eps, n_eps ) else: return self.cv #------------------------------------------------------------------------------------ # Configuration of the algorithm #------------------------------------------------------------------------------------ # # cached_dG_grid: # If set to True, the cross product between the pdf values of all random variables # will be precalculated and stored in an n-dimensional grid # otherwise the product is performed for every epsilon in the inner loop anew # cached_dG = Bool( True, alg_option = True ) # compiled_eps_loop: # If set True, the loop over the control variable epsilon is compiled # otherwise, python loop is used. compiled_eps_loop = Bool( False, alg_option = True ) # compiled_QdG_loop: # If set True, the integration loop over the product between the response function # and the pdf . theta product is performed in c # otherwise the numpy arrays are used. compiled_QdG_loop = Bool( False, alg_option = True ) def _compiled_QdG_loop_changed( self ): '''If the inner loop is not compiled, the outer loop must not be compiled as well. ''' if self.compiled_QdG_loop == False: self.compiled_eps = False arg_list = Property( depends_on = 'rf_change, rand_change, conf_change' ) @cached_property def _get_arg_list( self ): arg_list = [] # create argument string for inline function if self.compiled_eps_loop: arg_list += [ 'mu_q_arr', 'e_arr' ] else: arg_list.append( 'e' ) arg_list += ['%s_flat' % name for name in self.rv_keys ] if self.cached_dG: arg_list += [ 'dG_grid' ] else: arg_list += [ '%s_pdf' % name for name in self.rv_keys ] return arg_list dG_C_code = Property( depends_on = 'rf_change, rand_change, conf_change' ) @cached_property def _get_dG_C_code( self ): if self.cached_dG: # q_g - blitz matrix used to store the grid code_str = '\tdouble pdf = dG_grid(' + \ ','.join( [ 'i_%s' % name for name in self.rv_keys ] ) + \ ');\n' else: # qg code_str = '\tdouble pdf = ' + \ '*'.join( [ ' *( %s_pdf + i_%s)' % ( name, name ) for name in self.rv_keys ] ) + \ ';\n' return code_str #------------------------------------------------------------------------------------ # Configurable generation of C-code for mean curve evaluation #------------------------------------------------------------------------------------ C_code = Property( depends_on = 'rf_change, rand_change, conf_change, eps_change' ) @cached_property def _get_C_code( self ): code_str = '' if self.compiled_eps_loop: # create code string for inline function # code_str += 'for( int i_eps = 0; i_eps < %g; i_eps++){\n' % self.n_eps if self.cached_dG: # multidimensional index needed for dG_grid # blitz arrays must be used also for other arrays # code_str += 'double eps = e_arr( i_eps );\n' else: # pointer access possible for single dimensional arrays # use the pointer arithmetics for accessing the pdfs code_str += '\tdouble eps = *( e_arr + i_eps );\n' else: # create code string for inline function # code_str += 'double eps = e;\n' code_str += 'double mu_q(0);\n' code_str += 'double q(0);\n' code_str += '#line 100\n' # create code for constant params for name, value in list(self.const_param_dict.items()): code_str += 'double %s = %g;\n' % ( name, value ) # generate loops over random params for rv in self.rv_list: name = rv.name n_int = rv.n_int # create the loop over the random variable # code_str += 'for( int i_%s = 0; i_%s < %g; i_%s++){\n' % ( name, name, n_int, name ) if self.cached_dG: # multidimensional index needed for pdf_grid - use blitz arrays # code_str += '\tdouble %s = %s_flat( i_%s );\n' % ( name, name, name ) else: # pointer access possible for single dimensional arrays # use the pointer arithmetics for accessing the pdfs code_str += '\tdouble %s = *( %s_flat + i_%s );\n' % ( name, name, name ) if len( self.rv_keys ) > 0: code_str += self.dG_C_code code_str += self.rf.C_code + \ '// Store the values in the grid\n' + \ '\tmu_q += q * pdf;\n' else: code_str += self.rf.C_code + \ '\tmu_q += q;\n' # close the random loops # for name in self.rv_keys: code_str += '};\n' if self.compiled_eps_loop: if self.cached_dG: # blitz matrix code_str += 'mu_q_arr(i_eps) = mu_q;\n' else: code_str += '*(mu_q_arr + i_eps) = mu_q;\n' code_str += '};\n' else: code_str += 'return_val = mu_q;' return code_str compiler_verbose = Int( 0 ) compiler = Str( 'gcc' ) # Option of eval that induces the calculation of variation # in parallel with the mean value so that interim values of Q_grid # are directly used for both. # implicit_var_eval = Bool( False, alg_option = True ) def _eval( self ): '''Evaluate the integral based on the configuration of algorithm. ''' if self.cached_dG == False and self.compiled_QdG_loop == False: raise NotImplementedError('Configuration for pure Python integration is too slow and is not implemented') self._set_compiler() # prepare the array of the control variable discretization # eps_arr = self.eps_arr mu_q_arr = zeros_like( eps_arr ) # prepare the variable for the variance var_q_arr = None if self.implicit_var_eval: var_q_arr = zeros_like( eps_arr ) # prepare the parameters for the compiled function in # a separate dictionary c_params = {} if self.compiled_eps_loop: # for compiled eps_loop the whole input and output array must be passed to c # c_params['e_arr'] = eps_arr c_params['mu_q_arr'] = mu_q_arr #c_params['n_eps' ] = n_eps if self.compiled_QdG_loop: # prepare the lengths of the arrays to set the iteration bounds # for rv in self.rv_list: c_params[ '%s_flat' % rv.name ] = rv.theta_arr if len( self.rv_list ) > 0: if self.cached_dG: c_params[ 'dG_grid' ] = self.dG_grid else: for rv in self.rv_list: c_params['%s_pdf' % rv.name] = rv.dG_arr else: c_params[ 'dG_grid' ] = self.dG_grid if self.cached_dG: conv = converters.blitz else: conv = converters.default t = time.clock() if self.compiled_eps_loop: # C loop over eps, all inner loops must be compiled as well # if self.implicit_var_eval: raise NotImplementedError('calculation of variance not available in the compiled version') inline( self.C_code, self.arg_list, local_dict = c_params, type_converters = conv, compiler = self.compiler, verbose = self.compiler_verbose ) else: # Python loop over eps # for idx, e in enumerate( eps_arr ): if self.compiled_QdG_loop: if self.implicit_var_eval: raise NotImplementedError('calculation of variance not available in the compiled version') # C loop over random dimensions # c_params['e'] = e # prepare the parameter mu_q = inline( self.C_code, self.arg_list, local_dict = c_params, type_converters = conv, compiler = self.compiler, verbose = self.compiler_verbose ) else: # Numpy loops over random dimensions # # get the rf grid for all combinations of # parameter values # Q_grid = self.rf( e, **self.param_dict ) # multiply the response grid with the contributions # of pdf distributions (weighted by the delta of the # random variable disretization) # if not self.implicit_var_eval: # only mean value needed, the multiplication can be done # in-place Q_grid *= self.dG_grid # sum all the values to get the integral mu_q = sum( Q_grid ) else: # get the square values of the grid Q_grid2 = Q_grid ** 2 # make an inplace product of Q_grid with the weights Q_grid *= self.dG_grid # make an inplace product of the squared Q_grid with the weights Q_grid2 *= self.dG_grid # sum all values to get the mean mu_q = sum( Q_grid ) # sum all squared values to get the variance var_q = sum( Q_grid2 ) - mu_q ** 2 # add the value to the return array mu_q_arr[idx] = mu_q if self.implicit_var_eval: var_q_arr[idx] = var_q duration = time.clock() - t return mu_q_arr, var_q_arr, duration def eval_i_dG_grid( self ): '''Get the integral of the pdf * theta grid. ''' return sum( self.dG_grid ) #--------------------------------------------------------------------------------------------- # Output properties #--------------------------------------------------------------------------------------------- # container for the data obtained in the integration # # This is not only the mean curve but also variance and # execution statistics. Such an implementation # concentrates the critical part of the algorithmic # evaluation and avoids duplication of code and # repeated calls. The results are cached in the tuple. # They are accessed by the convenience properties defined # below. # results = Property( depends_on = 'rand_change, conf_change, eps_change' ) @cached_property def _get_results( self ): return self._eval() #--------------------------------------------------------------------------------------------- # Output accessors #--------------------------------------------------------------------------------------------- # the properties that access the cached results and give them a name mu_q_arr = Property() def _get_mu_q_arr( self ): '''mean of q at eps''' return self.results[0] var_q_arr = Property() def _get_var_q_arr( self ): '''variance of q at eps''' # switch on the implicit evaluation of variance # if it has not been the case so far if not self.implicit_var_eval: self.implicit_var_eval = True return self.results[1] exec_time = Property() def _get_exec_time( self ): '''Execution time of the last evaluation. ''' return self.results[2] mean_curve = Property() def _get_mean_curve( self ): '''Mean response curve. ''' return MFnLineArray( xdata = self.eps_arr, ydata = self.mu_q_arr ) var_curve = Property() def _get_var_curve( self ): '''variance of q at eps''' return MFnLineArray( xdata = self.eps_arr, ydata = self.var_q_arr ) #--------------------------------------------------------------------------------------------- # Auxiliary methods #--------------------------------------------------------------------------------------------- def _set_compiler( self ): '''Catch eventual mismatch between scipy.weave and compiler ''' try: uname = os.uname()[3] except: # it is not Linux - just let it go and suffer return #if self.compiler == 'gcc': #os.environ['CC'] = 'gcc-4.1' #os.environ['CXX'] = 'g++-4.1' #os.environ['OPT'] = '-DNDEBUG -g -fwrapv -O3' traits_view = View( Item( 'rf@', show_label = False ), width = 0.3, height = 0.3, resizable = True, scrollable = True, )
class CrackBridgeShortFibersGf(HasTraits): short_reinf_lst = List(Instance(Reinforcement)) w = Float E_c = Float E_m = Float epsm_softening = Float damage_switch = Bool(True) sorted_V_f = Property(depends_on='short_reinf_lst+') @cached_property def _get_sorted_V_f(self): return np.array([reinf.V_f for reinf in self.short_reinf_lst]) sorted_E_f = Property(depends_on='short_reinf_lst+') @cached_property def _get_sorted_E_f(self): return np.array([reinf.E_f for reinf in self.short_reinf_lst]) x_arr = Property(Array, depends_on='short_reinf_lst+') @cached_property def _get_x_arr(self): Lf_lst = [] for reinf in self.short_reinf_lst: Lf_lst.append(reinf.Lf) max_Lf = np.max(np.array(Lf_lst)) # !!! an even number has to be set as step for the zero position to be in the linspace !!! x_arr = np.linspace(-max_Lf / 2., max_Lf / 2., 61) return x_arr spirrid_lst = Property(List(Instance(SPIRRID)), depends_on='short_reinf_lst+') @cached_property def _get_spirrid_lst(self): spirrid_epsm_list = [] for reinf in self.short_reinf_lst: cb = CBShortFiberSP() spirrid = SPIRRID(q=cb, sampling_type='LHS', theta_vars=dict( epsm_softening=self.epsm_softening, tau=reinf.tau, E_f=reinf.E_f, r=reinf.r, xi=reinf.xi, snub=reinf.snub, le=reinf.le, phi=reinf.phi), n_int=reinf.n_int) spirrid_epsm_list.append(spirrid) return spirrid_epsm_list spirrid_evaluation_cached = Property(Array, depends_on='short_reinf_lst+') @cached_property def _get_spirrid_evaluation_cached(self): interpolators_lst = [] for i, spirr in enumerate(self.spirrid_lst): Lfi = self.short_reinf_lst[i].Lf def minfunc_short_fibers(w): spirr.eps_vars = dict(w=np.array([w]), x=np.array([0.0])) return -spirr.mu_q_arr.flatten() w_maxi = fminbound(minfunc_short_fibers, 0.0, Lfi / 3., maxfun=20, disp=0) w_arri = np.hstack( (np.linspace(0.0, w_maxi, 15), np.linspace(w_maxi + 1e-10, Lfi / 2., 15))) spirr.eps_vars = dict(w=w_arri, x=self.x_arr) interpolators_lst.append( interp2d(self.x_arr, w_arri, spirr.mu_q_arr, fill_value=0.0)) return interpolators_lst epsm_arr = Property(Array, depends_on='short_reinf_lst+,w,E_m,epsm_softening') @cached_property def _get_epsm_arr(self): epsm_x_arr = np.zeros(len(self.x_arr)) for i, interpolator in enumerate(self.spirrid_evaluation_cached): sigf_x_i = interpolator(self.x_arr, self.w) Ff_x_i = sigf_x_i * self.sorted_V_f[i] Fmax_i = np.max(Ff_x_i) epsm_x_i = (Fmax_i - Ff_x_i) / self.E_c epsm_x_arr += epsm_x_i.flatten() + self.epsm_softening return epsm_x_arr epsf0_arr = Property(Array, depends_on='short_reinf_lst+,w') @cached_property def _get_epsf0_arr(self): return np.array([ interpolator(0., self.w) / self.sorted_E_f[i] for i, interpolator in enumerate(self.spirrid_evaluation_cached) ]).flatten()