예제 #1
0
class GenDoc(HasTraits):
    '''
    Configuration of the document generation using sphinx.
    '''
    component_modules = []

    build_mode = Enum('local', 'global')

    build_dir = Property(depends_on='build_mode')

    def _get_build_dir(self):
        build_dir = {'local': '.', 'global': BUILD_DIR}
        return build_dir[self.build_mode]

    html_server = '[email protected]:/var/www/docs/oricrete'

    method_dispatcher = {'all': 'generate_examples'}

    def generate_html(self):
        for demo in self.component_modules:
            ged = GenExampleDoc(component_module=demo)
            ged.generate_html()

        os.chdir(DOCS_DIR)
        sphings_cmd = 'sphinx-build -b html -E . %s' % self.build_dir
        os.system(sphings_cmd)

    def push_html(self):
        '''
        Push the documentation to the server.
        '''
        rsync_cmd = 'rsync -av --delete %s/ %s' % (self.build_dir,
                                                   self.html_server)
        os.system(rsync_cmd)
예제 #2
0
class S(YMBSource):

    yarn_type = Enum('__TEST__',
                     changed_source=True,
                     editor=EnumEditor(values=['__TEST__']))
    root_dir = Str('')

    view = View(Item('yarn_type'))
예제 #3
0
class ShortFibers(Reinforcement):
    '''implements short fiber reinforcement'''
    phi = EitherType(
        klasses=[FloatType,
                 RV])  #inclination of short fibers to the crack plane normal
    Lf = Float  #length of short fibers
    bond_law = Enum('plastic', 'elasto-plastic')
    tau_fr = Float  # frictional bond at debonded interface
    k_tau = Float  #stiffness of the elastic adhesive bond
    beta = Float  #slip hardening coefficient
    snub = Float  # snubbing coefficient

    le = Property(depends_on='Lf')

    @cached_property
    def _get_le(self):
        return RV('uniform', loc=0.0, scale=self.Lf / 2.)
예제 #4
0
class GenDoc(HasTraits):
    '''
    Configuration of the document generation using sphinx.
    '''
    demo_modules = [fiber_tt_2p, fiber_tt_5p, fiber_po_8p]

    build_mode = Enum('local', 'global')

    build_dir = Property(depends_on='build_mode')

    def _get_build_dir(self):
        build_dir = {'local': '.', 'global': BUILD_DIR}
        return build_dir[self.build_mode]

    html_server = '[email protected]:/var/www/docs/spirrid'

    method_dispatcher = {
        'all': 'generate_examples',
        'sampling_structure': 'generate_examples_sampling_structure',
        'sampling_efficiency': 'generate_examples_sampling_efficiency',
        'language_efficiency': 'generate_examples_language_efficiency',
    }

    def generate_examples(self, kind='all'):
        method_name = self.method_dispatcher[kind]
        for demo in self.demo_modules:
            ged = GenExampleDoc(demo_module=demo)
            getattr(ged, method_name)()

    def generate_html(self):
        for demo in self.demo_modules:
            ged = GenExampleDoc(demo_module=demo)
            ged.generate_html()

        os.chdir(DOCS_DIR)
        sphings_cmd = 'sphinx-build -b html -E . %s' % self.build_dir
        os.system(sphings_cmd)

    def push_html(self):
        '''
        Push the documentation to the server.
        '''
        rsync_cmd = 'rsync -av --delete %s/ %s' % (self.build_dir,
                                                   self.html_server)
        os.system(rsync_cmd)
예제 #5
0
class MushRoofModelNonLin(MRquarter):
    '''Overload the nonlinear model.
    '''

    #-----------------
    # composite cross section unit cell:
    #-----------------
    #
    ccs_unit_cell_key = Enum(CCSUnitCell.db.keys(),
                             simdb=True,
                             input=True,
                             auto_set=False,
                             enter_set=True)

    ccs_unit_cell_ref = Property(Instance(SimDBClass),
                                 depends_on='ccs_unit_cell_key')

    @cached_property
    def _get_ccs_unit_cell_ref(self):
        return CCSUnitCell.db[self.ccs_unit_cell_key]

    # vary the failure strain in PhiFnGeneralExtended:
    factor_eps_fail = Float(1.0, input=True, ps_levels=(1.0, 1.2, 3))

    #-----------------
    # damage function:
    #-----------------
    #
    material_model = Str(input=True)

    def _material_model_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].material_model

    calibration_test = Str(input=True)

    def _calibration_test_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].calibration_test

    damage_function = Property(Instance(MFnLineArray),
                               depends_on='input_change')

    @cached_property
    def _get_damage_function(self):
        return self.ccs_unit_cell_ref.get_param(self.material_model,
                                                self.calibration_test)

    #-----------------
    # phi function extended:
    #-----------------
    #
    phi_fn = Property(Instance(PhiFnGeneralExtended),
                      depends_on='input_change,+ps_levels')

    @cached_property
    def _get_phi_fn(self):
        return PhiFnGeneralExtended(mfn=self.damage_function,
                                    factor_eps_fail=self.factor_eps_fail)

    #----------------------------------------------------------------------------------
    # mats_eval
    #----------------------------------------------------------------------------------

    # age of the plate at the time of testing
    # NOTE: that the same phi-function is used independent of age. This assumes a
    # an afine/proportional damage evolution for different ages.
    #
    age = Int(
        28,  #input = True
    )

    # composite E-modulus
    #
    E_c = Property(Float, depends_on='input_change')

    @cached_property
    def _get_E_c(self):
        return self.ccs_unit_cell_ref.get_E_c_time(self.age)

    # Poisson's ratio
    #
    nu = Property(Float, depends_on='input_change')

    @cached_property
    def _get_nu(self):
        return self.ccs_unit_cell_ref.nu

    # @todo: for mats_eval the information of the unit cell should be used
    # in order to use the same number of microplanes and model version etc...
    #
    mats = Property(Instance(MATS2D5MicroplaneDamage),
                    depends_on='input_change')

    @cached_property
    def _get_mats(self):
        mats = MATS2D5MicroplaneDamage(E=self.E_c,
                                       nu=self.nu,
                                       n_mp=30,
                                       symmetrization='sum-type',
                                       model_version='compliance',
                                       phi_fn=self.phi_fn)

        return mats

    tline = Instance(TLine)

    def _tline_default(self):
        return TLine(min=0.0, step=1.0, max=8.0)

    rtrace_list = List

    def _rtrace_list_default(self):
        return [
            #                                 RTraceDomainListField( name = 'Displacement' ,
            #                                                var = 'u', idx = 0, warp = True ),
            #                                 RTraceDomainListField( name = 'Stress' ,
            #                                                var = 'sig_app', idx = 0, warp = True,
            #                                                record_on = 'update', ),
            #                    self.max_princ_stress,
            RTraceDomainListField(name='Damage',
                                  var='omega_mtx',
                                  idx=0,
                                  warp=True,
                                  record_on='update'),
        ]
예제 #6
0
class LCCTable(HasTraits):
    '''Loading Case Manager.
    Generates and sorts the loading case combinations
    of all specified loading cases.
    '''

    # define ls
    #
    ls = Trait('ULS', {'ULS': ULS, 'SLS': SLS})

    # lcc-instance for the view
    #
    lcc = Instance(LCC)

    #-------------------------------
    # Define loading cases:
    #-------------------------------

    # path to the directory containing the state data files
    #
    data_dir = Directory

    # list of load cases
    #
    lc_list_ = List(Instance(LC))

    lc_list = Property(List, depends_on='+filter')

    def _set_lc_list(self, value):
        self.lc_list_ = value

    def _get_lc_list(self):
        #        for lc in self.lc_list_:
        #            if lc.data_filter != self.data_filter:
        #                lc.data_filter = self.data_filter
        return self.lc_list_

    lcc_table_columns = Property(depends_on='lc_list_, +filter')

    def _get_lcc_table_columns(self):
        return [ ObjectColumn(label='Id', name='lcc_id') ] + \
               [ ObjectColumn(label=lc.name, name=lc.name)
                for idx, lc in enumerate(self.lc_list) ] + \
                [ ObjectColumn(label='assess_value', name='assess_value') ]

    geo_columns = Property(List(Str), depends_on='lc_list_, +filter')

    def _get_geo_columns(self):
        '''derive the order of the geo columns
        from the first element in 'lc_list'. The internal
        consistency is checked separately in the
        'check_consistency' method.
        '''
        return self.lc_list[0].geo_columns

    sr_columns = Property(List(Str), depends_on='lc_list_, +filter')

    def _get_sr_columns(self):
        '''derive the order of the stress resultants
        from the first element in 'lc_list'. The internal
        consistency is checked separately in the
        'check_consistency' method.
        '''
        return self.lc_list[0].sr_columns

    #-------------------------------
    # check consistency
    #-------------------------------

    def _check_for_consistency(self):
        ''' check input files for consitency:
        '''
        return True

    #-------------------------------
    # lc_arr
    #-------------------------------

    lc_arr = Property(Array)

    def _get_lc_arr(self):
        '''stack stress resultants arrays of all loading cases together.
        This yields an array of shape ( n_lc, n_elems, n_sr )
        '''
        sr_arr_list = [lc.sr_arr for lc in self.lc_list]
        #        for x in sr_arr_list:
        #            print x.shape

        return array(sr_arr_list)

    #-------------------------------
    # Array dimensions:
    #-------------------------------

    n_sr = Property(Int)

    def _get_n_sr(self):
        return len(self.sr_columns)

    n_lc = Property(Int)

    def _get_n_lc(self):
        return len(self.lc_list)

    n_lcc = Property(Int)

    def _get_n_lcc(self):
        return self.combi_arr.shape[0]

    n_elems = Property(Int)

    def _get_n_elems(self):
        return self.lc_list[0].sr_arr.shape[0]

    #-------------------------------
    # auxilary method for get_combi_arr
    #-------------------------------

    def _product(self, args):
        """
        Get all possible permutations of the security factors
        without changing the order of the loading cases.
        The method corresponds to the build-in function 'itertools.product'.
        Instead of returning a generator object a list of all
        possible permutations is returned. As argument a list of list
        needs to be defined. In the original version of 'itertools.product'
        the function takes a tuple as argument ("*args").
        """
        pools = map(tuple,
                    args)  # within original version args defined as *args
        result = [[]]
        for pool in pools:
            result = [x + [y] for x in result for y in pool]
        return result

    # ------------------------------------------------------------
    # 'combi_arr' - array containing indices of all loading case combinations:
    # ------------------------------------------------------------

    # list of indices of the position of the imposed loads in 'lc_list'
    #
#    imposed_idx_list = Property( List, depends_on = 'lc_list_, lc_list_.+input' )
    imposed_idx_list = Property(List, depends_on='lc_list_')

    @cached_property
    def _get_imposed_idx_list(self):
        '''list of indices for the imposed loads
        '''
        imposed_idx_list = []
        for i_lc, lc in enumerate(self.lc_list):
            cat = lc.category
            if cat == 'imposed-load':
                imposed_idx_list.append(i_lc)
        return imposed_idx_list

    # array containing the psi with name 'psi_key' for the specified
    # loading cases defined in 'lc_list'. For dead-loads no value for
    # psi exists. In this case a value of 1.0 is defined.
    # This yields an array of shape ( n_lc, )
    #
    def _get_psi_arr(self, psi_key):
        '''psi_key must be defined as:
        'psi_0', 'psi_1', or 'psi_2'
        Returns an 1d-array of shape ( n_lc, )
        '''
        # get list of ones (used for dead-loads):
        #
        psi_list = [1] * len(self.lc_list)

        # overwrite ones with psi-values in case of imposed-loads:
        #
        for imposed_idx in self.imposed_idx_list:
            psi_value = getattr(self.lc_list[imposed_idx], psi_key)
            psi_list[imposed_idx] = psi_value

        return array(psi_list, dtype='float_')

    # list containing names of the loading cases
    #
    lc_name_list = Property(List, depends_on='lc_list_')

    @cached_property
    def _get_lc_name_list(self):
        '''list of names of all loading cases
        '''
        return [lc.name for lc in self.lc_list]

    show_lc_characteristic = Bool(True)

    # combination array:
    #
    combi_arr = Property(Array, depends_on='lc_list_, combination_SLS')

    @cached_property
    def _get_combi_arr(self):
        '''array containing the security and combination factors
        corresponding to the specified loading cases.
        This yields an array of shape ( n_lcc, n_lc )

        Properties defined in the subclasses 'LCCTableULS', 'LCCTableSLS':
        - 'gamma_list' = list of security factors (gamma)
        - 'psi_lead' = combination factors (psi) of the leading imposed load
        - 'psi_non_lead' = combination factors (psi) of the non-leading imposed loads
        '''
        # printouts:
        #
        if self.ls == 'ULS':
            print '*** load case combinations for limit state ULS ***'
        else:
            print '*** load case combinations for limit state SLS ***'
            print '*** SLS combination used: % s ***' % (self.combination_SLS)

        #---------------------------------------------------------------
        # get permutations of safety factors ('gamma')
        #---------------------------------------------------------------
        #
        permutation_list = self._product(self.gamma_list)

        combi_arr = array(permutation_list)

        # check if imposed loads are defined
        # if not no further processing of 'combi_arr' is necessary:
        #
        if self.imposed_idx_list == []:

            # if option is set to 'True' the loading case combination table
            # is enlarged with an identity matrix in order to see the
            # characteristic values of each loading case.
            #
            if self.show_lc_characteristic:
                combi_arr = vstack([identity(self.n_lc), combi_arr])

            return combi_arr

        #---------------------------------------------------------------
        # get leading and non leading combination factors ('psi')
        #---------------------------------------------------------------
        # go through all possible cases of leading imposed loads
        # For the currently investigated imposed loading case the
        # psi value is taken from 'psi_leading_arr' for all other
        # imposed loads the psi value is taken from 'psi_non_lead_arr'

        # Properties are defined in the subclasses
        #
        psi_lead_arr = self.psi_lead_arr
        psi_non_lead_arr = self.psi_non_lead_arr

        # for SLS limit state case 'rare' all imposed loads are multiplied
        # with 'psi_2'. In this case no distinction between leading or
        # non-leading imposed loads needs to be performed.
        #
        if all(psi_lead_arr == psi_non_lead_arr):
            combi_arr_psi = combi_arr * psi_lead_arr

        # generate a list or arrays obtained by multiplication
        # with the psi-factors.
        # This yields a list of length = number of imposed-loads.
        #
        else:
            combi_arr_psi_list = []
            for imposed_idx in self.imposed_idx_list:
                # copy in order to preserve initial state of the array
                # and avoid in place modification
                psi_arr = copy(psi_non_lead_arr)
                psi_arr[imposed_idx] = psi_lead_arr[imposed_idx]
                combi_arr_lead_i = combi_arr[where(
                    combi_arr[:, imposed_idx] != 0)] * psi_arr
                combi_arr_psi_list.append(combi_arr_lead_i)

            combi_arr_psi_no_0 = vstack(combi_arr_psi_list)

            # missing cases without any dead load have to be added
            # get combinations with all!! imposed = 0
            #
            lcc_all_imposed_zero = where(
                (combi_arr[:, self.imposed_idx_list] == 0).all(axis=1))

            # add to combinations
            #
            combi_arr_psi = vstack(
                (combi_arr[lcc_all_imposed_zero], combi_arr_psi_no_0))

        #---------------------------------------------------------------
        # get exclusive loading cases ('exclusive_to')
        #---------------------------------------------------------------

        # get a list of lists containing the indices of the loading cases
        # that are defined exclusive to each other.
        # The list still contains duplicates, e.g. [1,2] and [2,1]
        #
        exclusive_list = []
        for i_lc, lc in enumerate(self.lc_list):

            # get related load case number
            #
            for exclusive_name in lc.exclusive_to:
                if exclusive_name in self.lc_name_list:
                    exclusive_idx = self.lc_name_list.index(exclusive_name)
                    exclusive_list.append([i_lc, exclusive_idx])

        # eliminate the duplicates in 'exclusive_list'
        #
        exclusive_list_unique = []
        for exclusive_list_entry in exclusive_list:
            if sorted(exclusive_list_entry) not in exclusive_list_unique:
                exclusive_list_unique.append(sorted(exclusive_list_entry))

        # delete the rows in combination array that contain
        # loading case combinations with imposed-loads that have been defined
        # as exclusive to each other.
        #
        combi_arr_psi_exclusive = combi_arr_psi
        #        print 'combi_arr_psi_exclusive', combi_arr_psi_exclusive
        for exclusive_list_entry in exclusive_list_unique:
            # check where maximum one value of the exclusive load cases is unequal to one
            #              LC1  LC2  LC3  (all LCs are defined as exclusive to each other)
            #
            # e.g.         1.5  0.9  0.8  (example of 'combi_arr_psi')
            #              1.5  0.0  0.0
            #              0.0  0.0  0.0  (combination with all imposed loads = 0 after multiplication wit psi and gamma)
            #              ...  ...  ...
            #
            # this would yield the following mask_arr (containing ones or zeros):
            # e.g.         1.0  1.0  1.0  --> sum = 3 --> true combi --> accepted combination
            #              1.0  0.0  0.0  --> sum = 1 --> false combi --> no accepted combination
            # e.g.         0.0  0.0  0.0  --> sum = 0 --> true combi --> accepted combination (only body-loads)
            #              ...  ...  ...
            #
            mask_arr = where(
                combi_arr_psi_exclusive[:, exclusive_list_entry] != 0, 1.0,
                0.0)
            #            print 'mask_arr', mask_arr
            true_combi = where(sum(mask_arr, axis=1) <= 1.0)
            #            print 'true_combi', true_combi
            combi_arr_psi_exclusive = combi_arr_psi_exclusive[true_combi]

        #---------------------------------------------------------------
        # create array with only unique load case combinations
        #---------------------------------------------------------------
        # If the psi values of an imposed-load are defined as zero this
        # may led to zero entries in 'combi_arr'. This would yield rows
        # in 'combi_arr' which are duplicates. Those rows are removed.

        # Add first row in 'combi_arr_psi_exclusive' to '_unique' array
        # This array must have shape (1, n_lc) in order to use 'axis'-option
        #
        combi_arr_psi_exclusive_unique = combi_arr_psi_exclusive[0][None, :]

        for row in combi_arr_psi_exclusive:
            # Check if all factors in one row are equal to the rows in 'unique' array.
            # If this is not the case for any row the combination is added to 'unique'.
            # Broadcasting is used for the bool evaluation:
            #
            if (row == combi_arr_psi_exclusive_unique).all(
                    axis=1.0).any() == False:
                combi_arr_psi_exclusive_unique = vstack(
                    (combi_arr_psi_exclusive_unique, row))

        # if option is set to 'True' the loading case combination table
        # is enlarged with an identity matrix in order to see the
        # characteristic values of each loading case.
        #
#        if self.show_lc_characteristic:
#            combi_arr_psi_exclusive_unique = vstack( [ identity( self.n_lc ), combi_arr_psi_exclusive_unique ] )

        return combi_arr_psi_exclusive_unique

    #-------------------------------
    # lcc_arr
    #-------------------------------

    lcc_arr = Property(Array, depends_on='lc_list_')

    @cached_property
    def _get_lcc_arr(self):
        '''Array of all loading case combinations following the
        loading cases define in 'lc_list' and the combinations
        defined in 'combi_arr'.
        This yields an array of shape ( n_lcc, n_elems, n_sr )
        '''
        self._check_for_consistency()

        combi_arr = self.combi_arr

        # 'combi_arr' is of shape ( n_lcc, n_lc )
        # 'lc_arr' is of shape ( n_lc, n_elems, n_sr )
        #
        lc_arr = self.lc_arr

        # Broadcasting is used to generate array containing the multiplied lc's
        # yielding an array of shape ( n_lcc, n_lc, n_elems, n_sr )
        #

        lc_combi_arr = lc_arr[None, :, :, :] * combi_arr[:, :, None, None]

        # Then the sum over index 'n_lc' is evaluated yielding
        # an array of all loading case combinations.
        # This yields an array of shape ( n_lcc, n_elem, n_sr )
        #
        lcc_arr = sum(lc_combi_arr, axis=1)

        return lcc_arr

    #-------------------------------
    # lcc_lists
    #-------------------------------

    lcc_list = Property(List, depends_on='lc_list_')

    @cached_property
    def _get_lcc_list(self):
        '''list of loading case combinations (instances of LCC)
        '''
        combi_arr = self.combi_arr
        lcc_arr = self.lcc_arr
        sr_columns = self.sr_columns
        geo_columns = self.geo_columns

        n_lcc = self.n_lcc

        # return a dictionary of the stress resultants
        # this is used by LSTable to determine the stress
        # resultants of the current limit state
        #
        lcc_list = []
        for i_lcc in range(n_lcc):

            state_data_dict = {}
            for i_sr, name in enumerate(sr_columns):
                state_data_dict[name] = lcc_arr[i_lcc, :, i_sr][:, None]

            geo_data_dict = self.geo_data_dict

            lcc = LCC(  # lcc_table = self,
                factors=combi_arr[i_lcc, :],
                lcc_id=i_lcc,
                ls_table=LSTable(geo_data=geo_data_dict,
                                 state_data=state_data_dict,
                                 ls=self.ls))

            for idx, lc in enumerate(self.lc_list):
                lcc.add_trait(lc.name, Int(combi_arr[i_lcc, idx]))

            lcc_list.append(lcc)

        return lcc_list

    #-------------------------------
    # geo_arr
    #-------------------------------

    geo_data_dict = Property(Dict, depends_on='lc_list_')

    @cached_property
    def _get_geo_data_dict(self):
        '''Array of global coords derived from the first loading case defined in lc_list.
        Coords are identical for all LC's.
        '''
        return self.lc_list[0].geo_data_dict

    #-------------------------------
    # min/max-values
    #-------------------------------

    def get_min_max_state_data(self):
        ''' get the surrounding curve of all 'lcc' values
        '''
        lcc_arr = self.lcc_arr
        min_arr = ndmin(lcc_arr, axis=0)
        max_arr = ndmax(lcc_arr, axis=0)
        return min_arr, max_arr

    #--------------------------------------
    # use for case 'max N*' nach ZiE
    # Fall 'maximale Normalkraft' nach ZiE
    #--------------------------------------
#    max_sr_grouped_dict = Property( Dict )
#    @cached_property
#    def _get_max_sr_grouped_dict( self ):
#        ''' get the surrounding curve for each stress resultant
#            shape lcc_array ( n_lcc, n_elems, n_sr )
#        '''
#        sr_columns = self.sr_columns
#        lcc_arr = self.lcc_arr
#        dict = {}
#        for i, sr in enumerate( self.sr_columns ):
#            idx_1 = argmax( abs( lcc_arr[:, :, i] ), axis = 0 )
#            idx_2 = arange( 0, idx_1.shape[0], 1 )
#            dict[sr] = lcc_arr[idx_1, idx_2, :]
#        return dict

#--------------------------------------
# use for case 'max eta' nach ZiE
# Fall max Ausnutzungsgrad nach ZiE
#--------------------------------------
    max_sr_grouped_dict = Property(Dict)

    @cached_property
    def _get_max_sr_grouped_dict(self):
        '''evaluate eta and prepare plot
        '''
        sr_columns = self.sr_columns
        lcc_arr = self.lcc_arr
        # ## N_s6cm_d results from 'V_op_d'*1.5
        # assume a distribution of stresses as for a simple
        # supported beam with cantilever corresponding
        # to the distance of the screws to each other and to the edge
        # of the TRC shell (33cm/17cm)
        #
        N_s6cm_d = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33.

        # ## V_s6cm_d results from 'N_ip_d'/2
        # assume an equal distribution (50% each) of the
        # normal forces to each screw
        #
        V_s6cm_d = lcc_arr[:, :, 0] * 0.56
        #        V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5

        # resistance ac characteristic value obtained from the
        # experiment and EN DIN 1990
        #
        N_ck = 28.3
        V_ck = 63.8

        gamma_s = 1.5

        eta_N = N_s6cm_d / (N_ck / gamma_s)
        eta_V = abs(V_s6cm_d / (V_ck / gamma_s))
        eta_inter = (eta_N) + (eta_V)

        idx_max_hinge = eta_inter.argmax(axis=0)

        dict = {}
        for i, sr in enumerate(self.sr_columns):
            idx_1 = idx_max_hinge
            idx_2 = arange(0, idx_1.shape[0], 1)
            dict[sr] = lcc_arr[idx_1, idx_2, :]
        return dict

    def export_hf_max_grouped(self, filename):
        """exports the hinge forces as consistent pairs for the two case
        'max_eta' or 'max_N*'
        """
        from matplotlib import pyplot
        sr_columns = self.sr_columns
        dict = self.max_sr_grouped_dict
        length_xy_quarter = self.length_xy_quarter

        def save_bar_plot(x,
                          y,
                          filename='bla',
                          title='Title',
                          xlabel='xlabel',
                          ylabel='ylavel',
                          width=0.1,
                          xmin=0,
                          xmax=1000,
                          ymin=-1000,
                          ymax=1000,
                          figsize=[10, 5]):
            fig = pyplot.figure(facecolor="white", figsize=figsize)
            ax1 = fig.add_subplot(1, 1, 1)
            ax1.bar(x, y, width=width, align='center', color='green')
            ax1.set_xlim(xmin, xmax)
            ax1.set_ylim(ymin, ymax)
            ax1.set_xlabel(xlabel, fontsize=22)
            ax1.set_ylabel(ylabel, fontsize=22)
            if title == 'N_ip max':
                title = 'Fall max $\eta$'
#                title = 'Fall max $N^{*}$'

            if title == 'V_ip max':
                title = 'max $V_{ip}$'
            if title == 'V_op max':
                title = 'Fall max $V^{*}$'
            ax1.set_title(title)
            fig.savefig(filename, orientation='portrait', bbox_inches='tight')
            pyplot.clf()

        X = array(self.geo_data_dict['X_hf'])
        Y = array(self.geo_data_dict['Y_hf'])

        # symmetric axes
        #
        idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_sym = X[idx_sym].reshape(-1)
        idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_r0_r1 = Y[idx_r0_r1].reshape(-1)

        for sr in sr_columns:
            F_int = dict[sr]  # first row N_ip, second V_ip third V_op
            F_sym = F_int[idx_sym, :].reshape(-1, len(sr_columns))
            F_r0_r1 = F_int[idx_r0_r1, :].reshape(-1, len(sr_columns))

            save_bar_plot(X_sym,
                          F_sym[:, 0].reshape(-1),
                          xlabel='$X$ [m]',
                          ylabel='$N^{*}_{Ed}$ [kN]',
                          filename=filename + 'N_ip' + '_sym_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=3.5 * length_xy_quarter,
                          figsize=[10, 5],
                          ymin=-30,
                          ymax=+30)
            if self.link_type == 'inc_V_ip':
                save_bar_plot(X_sym,
                              F_sym[:, 1].reshape(-1),
                              xlabel='$X$ [m]',
                              ylabel='$V_{ip}$ [kN]',
                              filename=filename + 'V_ip' + '_sym_' + sr +
                              '_max',
                              title=sr + ' max',
                              xmin=0.0,
                              xmax=3.5 * length_xy_quarter,
                              figsize=[10, 5],
                              ymin=-30,
                              ymax=+30)

            save_bar_plot(X_sym,
                          F_sym[:, 2].reshape(-1),
                          xlabel='$X$ [m]',
                          ylabel='$V^{*}_{Ed}$ [kN]',
                          filename=filename + 'V_op' + '_sym_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=3.5 * length_xy_quarter,
                          figsize=[10, 5],
                          ymin=-10,
                          ymax=+10)

            # r0_r1
            #
            save_bar_plot(X_r0_r1,
                          F_r0_r1[:, 0].reshape(-1),
                          xlabel='$Y$ [m]',
                          ylabel='$N^{*}_{Ed}$ [kN]',
                          filename=filename + 'N_ip' + '_r0_r1_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=2.0 * length_xy_quarter,
                          figsize=[5, 5],
                          ymin=-30,
                          ymax=+30)
            if self.link_type == 'inc_V_ip':
                save_bar_plot(X_r0_r1,
                              F_r0_r1[:, 1].reshape(-1),
                              xlabel='$Y$ [m]',
                              ylabel='$V_{ip}$ [kN]',
                              filename=filename + 'V_ip' + '_r0_r1_' + sr +
                              '_max',
                              title=sr + ' max',
                              xmin=0.0,
                              xmax=2.0 * length_xy_quarter,
                              figsize=[5, 5],
                              ymin=-30,
                              ymax=+30)
            save_bar_plot(X_r0_r1,
                          F_r0_r1[:, 2].reshape(-1),
                          xlabel='$Y$ [m]',
                          ylabel='$V^{*}_{Ed}$ [kN]',
                          filename=filename + 'V_op' + '_r0_r1_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=2.0 * length_xy_quarter,
                          figsize=[5, 5],
                          ymin=-10,
                          ymax=+10)

    def plot_interaction_s6cm(self):
        """get the maximum values (consistent pairs of N and V) and plot them in an interaction plot
        """

        lcc_arr = self.lcc_arr

        # ## F_Edt results from 'V_op_d'*1.5
        # assume a distribution of stresses as for a simple
        # supported beam with cantilever corresponding

        # to the distance of the screws to each other and to the edge
        # of the TRC shell (33cm/17cm)
        #
        F_Edt = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33.

        # ## F_EdV1 results from 'N_ip_d'/2
        # assume an equal distribution (50% each) of the
        # normal forces to each screw
        #
        F_EdV1 = lcc_arr[:, :, 0] * 0.56
        #        V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5

        # resistance ac characteristic value obtained from the
        # experiment and EN DIN 1990
        #
        F_Rkt = 28.3
        F_RkV1 = 63.8

        gamma_M = 1.5

        eta_t = abs(F_Edt / (F_Rkt / gamma_M))
        eta_V1 = abs(F_EdV1 / (F_RkV1 / gamma_M))
        print 'eta_t.shape', eta_t.shape
        print 'eta_V1.shape', eta_V1.shape

        #        self.interaction_plot(abs(F_Edt), abs(F_EdV1))
        self.interaction_plot(eta_t, eta_V1)


#        eta_inter = ( eta_N ) + ( eta_V )
#
#        idx_max_hinge = eta_inter.argmax( axis = 0 )
#        idx_hinge = arange( 0, len( idx_max_hinge ), 1 )
#        plot_eta_N = eta_N[idx_max_hinge, idx_hinge]
#        plot_eta_V = eta_V[idx_max_hinge, idx_hinge]
#        self.interaction_plot( plot_eta_N, plot_eta_V )

    def interaction_plot(self, eta_N, eta_V):
        from matplotlib import font_manager
        ticks_font = font_manager.FontProperties(family='Times',
                                                 style='normal',
                                                 size=18,
                                                 weight='normal',
                                                 stretch='normal')
        from matplotlib import pyplot
        fig = pyplot.figure(facecolor="white", figsize=[10, 10])
        ax1 = fig.add_subplot(1, 1, 1)
        #            x = arange(0, 1.01, 0.01)
        #            y15 = (1 - x ** 1.5) ** (1 / 1.5)
        #            y = (1 - x)

        ax1.set_xlabel('$F_\mathrm{Ed,V1}/F_\mathrm{Rd,V1}$', fontsize=24)
        ax1.set_ylabel('$F_\mathrm{Ed,t}/F_\mathrm{Rd,t}$', fontsize=24)
        #            ax1.set_xlabel('$|N_\mathrm{Ed}|$' , fontsize=32)
        #            ax1.set_ylabel('$|V_\mathrm{Ed}|$', fontsize=32)

        #            ax1.plot(x , y, '--', color='black'
        #                      , linewidth=2.0)
        #            ax1.plot(x , y15, '--', color='black'
        #                      , linewidth=2.0)

        ax1.plot(eta_V, eta_N, 'wo', markersize=3)
        #            ax1.plot(eta_V, eta_N, 'o', color='green', markersize=8)

        #            ax1.plot( eta_V[where( limit < 1 )] , eta_N[where( limit < 1 )], 'o', markersize = 8 )
        #            ax1.plot( eta_V[where( limit > 1 )] , eta_N[where( limit > 1 )], 'o', color = 'red', markersize = 8 )

        for xlabel_i in ax1.get_xticklabels():
            xlabel_i.set_fontsize(24)
            xlabel_i.set_family('serif')

        for ylabel_i in ax1.get_yticklabels():
            ylabel_i.set_fontsize(24)
            ylabel_i.set_family('serif')

    #        ax1.plot( x , 1 - x, '--', color = 'black', label = 'lineare Interaktion' )

        ax1.set_xlim(0, 1.0)
        ax1.set_ylim(0, 1.0)
        ax1.legend()
        pyplot.show()
        pyplot.clf()

    # choose linking type (in-plane shear dof blocked or not)
    #
    link_type = Enum('exc_V_ip', 'inc_V_ip')

    # length of the shell (needed to plot the hinge forces plots correctly)
    #
    length_xy_quarter = 3.5  # m

    def export_hf_lc(self):
        """exports the hinge forces for each loading case separately
        """

        from matplotlib import pyplot
        sr_columns = self.sr_columns
        dict = self.max_sr_grouped_dict
        length_xy_quarter = self.length_xy_quarter

        def save_bar_plot(x,
                          y,
                          filename='bla',
                          xlabel='xlabel',
                          ylabel='ylavel',
                          ymin=-10,
                          ymax=10,
                          width=0.1,
                          xmin=0,
                          xmax=1000,
                          figsize=[10, 5]):
            fig = pyplot.figure(facecolor="white", figsize=figsize)
            ax1 = fig.add_subplot(1, 1, 1)
            ax1.bar(x, y, width=width, align='center', color='blue')
            ax1.set_xlim(xmin, xmax)
            ax1.set_ylim(ymin, ymax)
            ax1.set_xlabel(xlabel, fontsize=22)
            ax1.set_ylabel(ylabel, fontsize=22)
            fig.savefig(filename, orientation='portrait', bbox_inches='tight')
            pyplot.clf()

        X = array(self.geo_data_dict['X_hf'])
        Y = array(self.geo_data_dict['Y_hf'])

        # symmetric axes
        #
        idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_sym = X[idx_sym].reshape(-1)
        idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_r0_r1 = Y[idx_r0_r1].reshape(-1)
        F_int = self.lc_arr

        for i, lc_name in enumerate(self.lc_name_list):
            filename = self.lc_list[i].plt_export

            max_N_ip = max(int(ndmax(F_int[i, :, 0], axis=0)) + 1, 1)
            max_V_ip = max(int(ndmax(F_int[i, :, 1], axis=0)) + 1, 1)
            max_V_op = max(int(ndmax(F_int[i, :, 2], axis=0)) + 1, 1)

            F_int_lc = F_int[i, :, :]  # first row N_ip, second V_ip third V_op
            F_sym = F_int_lc[idx_sym, :].reshape(-1, len(sr_columns))
            F_r0_r1 = F_int_lc[idx_r0_r1, :].reshape(-1, len(sr_columns))

            save_bar_plot(
                X_sym,
                F_sym[:, 0].reshape(-1),
                #                          xlabel = '$X$ [m]', ylabel = '$N^{ip}$ [kN]',
                xlabel='$X$ [m]',
                ylabel='$N^{*}$ [kN]',
                filename=filename + 'N_ip' + '_sym',
                xmin=0.0,
                xmax=3.5 * length_xy_quarter,
                ymin=-max_N_ip,
                ymax=max_N_ip,
                figsize=[10, 5])

            save_bar_plot(X_sym,
                          F_sym[:, 1].reshape(-1),
                          xlabel='$X$ [m]',
                          ylabel='$V_{ip}$ [kN]',
                          filename=filename + 'V_ip' + '_sym',
                          xmin=0.0,
                          xmax=3.5 * length_xy_quarter,
                          ymin=-max_V_ip,
                          ymax=max_V_ip,
                          figsize=[10, 5])

            save_bar_plot(
                X_sym,
                F_sym[:, 2].reshape(-1),
                #                          xlabel = '$X$ [m]', ylabel = '$V_{op}$ [kN]',
                xlabel='$X$ [m]',
                ylabel='$V^{*}$ [kN]',
                filename=filename + 'V_op' + '_sym',
                xmin=0.0,
                xmax=3.5 * length_xy_quarter,
                ymin=-max_V_op,
                ymax=max_V_op,
                figsize=[10, 5])

            # r0_r1
            #
            save_bar_plot(
                X_r0_r1,
                F_r0_r1[:, 0].reshape(-1),
                #                          xlabel = '$Y$ [m]', ylabel = '$N_{ip}$ [kN]',
                xlabel='$Y$ [m]',
                ylabel='$N^{*}$ [kN]',
                filename=filename + 'N_ip' + '_r0_r1',
                xmin=0.0,
                xmax=2.0 * length_xy_quarter,
                ymin=-max_N_ip,
                ymax=max_N_ip,
                figsize=[5, 5])
            save_bar_plot(X_r0_r1,
                          F_r0_r1[:, 1].reshape(-1),
                          xlabel='$Y$ [m]',
                          ylabel='$V_{ip}$ [kN]',
                          filename=filename + 'V_ip' + '_r0_r1',
                          xmin=0.0,
                          xmax=2.0 * length_xy_quarter,
                          ymin=-max_V_ip,
                          ymax=max_V_ip,
                          figsize=[5, 5])
            save_bar_plot(
                X_r0_r1,
                F_r0_r1[:, 2].reshape(-1),
                #                          xlabel = '$Y$ [m]', ylabel = '$V_{op}$ [kN]',
                xlabel='$Y$ [m]',
                ylabel='$V^{*}$ [kN]',
                filename=filename + 'V_op' + '_r0_r1',
                xmin=0.0,
                xmax=2.0 * length_xy_quarter,
                ymin=-max_V_op,
                ymax=max_V_op,
                figsize=[5, 5])

    # ------------------------------------------------------------
    # View
    # ------------------------------------------------------------

    traits_view = View(VGroup(
        VSplit(
            Item('lcc_list', editor=lcc_list_editor, show_label=False),
            Item('lcc@', show_label=False),
        ), ),
                       resizable=True,
                       scrollable=True,
                       height=1.0,
                       width=1.0)
예제 #7
0
class MushRoofModel(IBVModel):
    '''Basis Class for Mushroof models (mr_quarter, mr_one, mr_one_free, mr_two, mr_four)
    '''

    #===========================================================================
    # initial strain
    #===========================================================================
    initial_strain_roof = False
    initial_strain_col = False

    alpha = Float(1.3e-5)
    t_up = Float(-100.)
    t_lo = Float(-100.)

    def temperature_strain_z(self, X_pnt, x_pnt):
        alpha, t_up, t_lo = self.alpha, self.t_up, self.t_lo
        delta_t = t_lo + (t_up - t_lo) * x_pnt[2]
        epsilon_0 = alpha * delta_t
        # return the initial volumetric strain tensor with n_dims
        return diag([epsilon_0 for i in range(3)])

    #===========================================================================
    # material model
    #===========================================================================

    E_roof = Float(28700)  # [MN/m^2]
    E_column = Float(32800)  # [MN/m^2] E_cm for concrete C45/55
    E_plate = Float(210000)  # [MN/m^2] steel plate
    nu = Float(0.2)  # [-]

    mats_roof = Property(Instance(MATS3DElastic), depends_on='+input')

    @cached_property
    def _get_mats_roof(self):
        if self.initial_strain_roof == True:
            return MATS3DElastic(E=self.E_roof,
                                 nu=self.nu,
                                 initial_strain=self.temperature_strain_z)
        else:
            return MATS3DElastic(E=self.E_roof, nu=self.nu)

    mats_column = Property(Instance(MATS3DElastic), depends_on='+input')

    @cached_property
    def _get_mats_column(self):
        if self.initial_strain_col == True:
            return MATS3DElastic(E=self.E_column,
                                 nu=self.nu,
                                 initial_strain=self.temperature_strain_z)
        else:
            return MATS3DElastic(E=self.E_column, nu=self.nu)

    mats_plate = Property(Instance(MATS3DElastic), depends_on='+input')

    @cached_property
    def _get_mats_plate(self):
        return MATS3DElastic(E=self.E_plate, nu=self.nu)

    #===========================================================================
    # finite elements
    #===========================================================================

    fe_linear_roof = Property(Instance(FETSEval, transient=True),
                              depends_on='+input')

    def _get_fe_linear_roof(self):
        return FETS3D8H(mats_eval=self.mats_roof)

    fe_quad_serendipity_roof = Property(Instance(FETSEval, transient=True),
                                        depends_on='+input')

    def _get_fe_quad_serendipity_roof(self):
        return FETS3D8H20U(mats_eval=self.mats_roof)

    fe_quad_serendipity_column = Property(Instance(FETSEval, transient=True),
                                          depends_on='+input')

    def _get_fe_quad_serendipity_column(self):
        return FETS3D8H20U(mats_eval=self.mats_column)

    fe_linear_plate = Property(Instance(FETSEval, transient=True),
                               depends_on='+input')

    def _get_fe_linear_plate(self):
        return FETS3D8H(mats_eval=self.mats_plate)

    fe_quad_serendipity_plate = Property(Instance(FETSEval, transient=True),
                                         depends_on='+input')

    def _get_fe_quad_serendipity_plate(self):
        return FETS3D8H20U(mats_eval=self.mats_plate)

    #===========================================================================
    # geometric dimensions
    #===========================================================================

    # dimensions of one quarter of the shell structure [m]
    #
    length_xy_quarter = Float(3.5, input=True)  # , ps_levels = [4, 16, 5] )
    length_z = Float(0.927, input=True)  # , ps_levels = [1, 2, 1] )

    # shell thickness is used only by option 'const_reinf_layer_elem'
    #
    t_shell = Float(0.06, input=True)

    # dimensions of the steel plate
    #
    t_plate = Float(0.03, unit='m', input=True)

    # dimensions of the column
    # NOTE: width of column at top is used also by option 'shift_elem' of 'HPShell'
    #
    width_top_col = Float(0.45, unit='m', input=True)
    width_bottom_col = Float(0.35, unit='m', input=True)

    # column length (from lowest point of the shell to the upper edge of the foundation:
    #
    h_col = Float(3.60, unit='m', input=True)

    #    r_pipe = Float( 0.1, unit = 'm', input = True )

    scalefactor_delta_h = Float(1.00, input=True)  # [-]
    scalefactor_length_xy = Float(1.00, input=True)  # [-]

    #-----------------------------------------------------------------
    # specify the relation of the total structure (in the 'mushroof'-model)
    # with respect to a quarter of one shell defined in 'HPShell'
    #-----------------------------------------------------------------

    # choose model and discretization for roof shell
    #
    mushroof_part = Enum('one', 'quarter', 'four', input=True)

    def _mushroof_part_default(self):
        return 'one'

    # @todo: add comment!
    # this is no input!
    #
    X0 = List([0, 0, 0], input=True)

    #-----------------------------------------------------------------
    # discretization
    #-----------------------------------------------------------------

    # shell discretization:
    #
    n_elems_xy_quarter = Int(10, input=True)
    n_elems_z = Int(1, input=True)

    # @todo: remove "+input", use mapped traits instead!
    #
    n_elems_xy_dict = Property(Dict, depends_on='+ps_levels, +input')

    def _get_n_elems_xy_dict(self):
        return {
            'quarter': self.n_elems_xy_quarter,
            'one': self.n_elems_xy_quarter * 2,
            # @todo: include "scale_size" parameter used by HPShell!
            'detail': self.n_elems_xy_quarter * 2
        }

    n_elems_xy = Property(Int, depends_on='+ps_levels, +input')

    def _get_n_elems_xy(self):
        # @todo: mushroff_part == "four" is not supported! (see HPShell)
        return int(self.n_elems_xy_dict[self.mushroof_part])

    # column discretization:
    #
    n_elems_col_xy = Int(2, input=True)  # , ps_levels = [5, 20, 3 ] )

    #-----------------------------------------------------------------
    # option 'shift_elem'
    #-----------------------------------------------------------------

    # optional parameters
    #
    shift_elems = Bool(True, input=True)

    # set fixed points for shell discretization
    # NOTE: method is overwritten in subclass, e.g. 'MRtwo'
    #
    shift_array = Array
    #    def _get_shift_array( self ):
    #        return array( [[self.width_top_col / 2 ** 0.5,
    #                        self.width_top_col / 2 ** 0.5,
    #                        self.n_elems_col_xy / 2], ] )

    #-----------------------------------------------------------------
    # option 'const_reinf_layer_elem'
    #-----------------------------------------------------------------

    # element thickness defined (e.g to 3 cm) for bottom and top layer of the roof
    # needed to simulate reinforced area of the shell for non-linear simulation
    # @todo: remove "+input"
    #
    const_reinf_layer_elem = Bool(False, input=True)

    #-----------------------------------------------------------------
    # geometric transformations
    #-----------------------------------------------------------------

    # @todo: remove! geo_transforme is performed in the subclass 'MRTwo'

    #===========================================================================
    # evaluation
    #===========================================================================

    tline = Instance(TLine)

    def _tline_default(self):
        return TLine(min=0.0, step=1.0, max=1.0)

    max_princ_stress = Instance(RTraceDomainListField)

    def _max_princ_stress_default(self):
        return RTraceDomainListField(
            name='max principal stress',
            idx=0,
            var='max_principle_sig',
            warp=True,
            #                                      position = 'int_pnts',
            record_on='update',
        )

    dof = Int(1)
    f_dof = Property(Instance(RTraceGraph), depends_on='+ps_levels, +input')

    def _get_f_dof(self):
        return RTraceGraph(name='Fi,right over u_right (iteration)',
                           var_y='F_int',
                           idx_y=self.dof,
                           var_x='U_k',
                           idx_x=self.dof + 2,
                           record_on='update')

    sig_app = Property(Instance(RTraceDomainListField),
                       depends_on='+ps_levels, +input')

    @cached_property
    def _get_sig_app(self):
        return RTraceDomainListField(
            name='sig_app',
            # position='int_pnts',
            var='sig_app',
            record_on='update',
        )

    eps_app = Property(Instance(RTraceDomainListField),
                       depends_on='+ps_levels, +input')

    @cached_property
    def _get_eps_app(self):
        return RTraceDomainListField(
            name='eps_app',
            #                                      position = 'int_pnts',
            var='eps_app',
            record_on='update',
        )

    u = Property(Instance(RTraceDomainListField),
                 depends_on='+ps_levels, +input')

    @cached_property
    def _get_u(self):
        return RTraceDomainListField(
            name='displacement',
            var='u',
            warp=True,
            record_on='update',
        )

    damage = Property(Instance(RTraceDomainListField),
                      depends_on='+ps_levels, +input')

    @cached_property
    def _get_damage(self):
        return RTraceDomainListField(name='damage',
                                     var='omega_mtx',
                                     idx=0,
                                     warp=False,
                                     record_on='update')

    phi_pdc = Property(Instance(RTraceDomainListField),
                       depends_on='+ps_levels, +input')

    @cached_property
    def _get_phi_pdc(self):
        return RTraceDomainListField(
            name='principal damage',
            #                                      position = 'int_pnts',
            var='phi_pdc',
            record_on='update',
        )

    max_omega_i = Property(Instance(RTraceDomainListField),
                           depends_on='+ps_levels, +input')

    @cached_property
    def _get_max_omega_i(self):
        return RTraceDomainListField(
            name='max_omega_i',
            #                                      position = 'int_pnts',
            var='max_omega_i',
            record_on='update',
        )

    fracture_energy = Property(Instance(RTraceDomainListField),
                               depends_on='+ps_levels, +input')

    @cached_property
    def _get_fracture_energy(self):
        return RTraceDomainListField(
            name='Fracture energy',
            #                                      position = 'int_pnts',
            var='fracture_energy',
            record_on='update',
        )

    # sorting force of slice by number of dofs internal forces
    #
    def sort_by_dofs(self, dofs, unsorted):
        """
        for dof slices, the slice for one edge, results in a more
        dimensional array of form (a,b,c)
        where    a = elements connected to slice
                 b = nodes of each element
                 c = degree of freedom at each node
        however the degree of freedoms are no more ordered in global order,
        but on the local element basis. therefore sum([:,:-1,:])
        for the hinge force is not possible, sort_dofs solves the
        problem by sorting the values in respect to the dof number,
        which is ordered globally.
        """
        if size(unsorted) != size(dofs):
            raise "--- array must have same size ---"
        # order of dofs
        order = argsort(dofs, axis=1)[:, :, 0]
        sorted = zeros_like(unsorted)
        for i, elem in enumerate(order):
            sorted[i] = unsorted[i][elem]
        return sorted
예제 #8
0
class PDistrib(HasTraits):

    implements = IPDistrib

    # puts all chosen continuous distributions distributions defined
    # in the scipy.stats.distributions module as a list of strings
    # into the Enum trait
    distr_choice = Enum(distr_enum)
    distr_dict = Dict(distr_dict)

    #    distr_choice = Enum('sin2x', 'weibull_min', 'sin_distr', 'uniform', 'norm')
    #    distr_dict = {'sin2x' : sin2x,
    #                  'uniform' : uniform,
    #                  'norm' : norm,
    #                  'weibull_min' : weibull_min,
    #                  'sin_distr' : sin_distr}

    # instantiating the continuous distributions
    distr_type = Property(Instance(Distribution), depends_on='distr_choice')

    @cached_property
    def _get_distr_type(self):
        return Distribution(self.distr_dict[self.distr_choice])

    # change monitor - accumulate the changes in a single event trait
    changed = Event

    @on_trait_change('distr_choice, distr_type.changed, quantile, n_segments')
    def _set_changed(self):
        self.changed = True

    # ------------------------------------------------------------------------
    # Methods setting the statistical modments
    # ------------------------------------------------------------------------
    mean = Property

    def _get_mean(self):
        return self.distr_type.mean

    def _set_mean(self, value):
        self.distr_type.mean = value

    variance = Property

    def _get_variance(self):
        return self.distr_type.mean

    def _set_variance(self, value):
        self.distr_type.mean = value

    # ------------------------------------------------------------------------
    # Methods preparing visualization
    # ------------------------------------------------------------------------

    quantile = Float(0.00001, auto_set=False, enter_set=True)
    range = Property(Tuple(Float), depends_on='distr_type.changed, quantile')

    @cached_property
    def _get_range(self):
        return (self.distr_type.distr.ppf(self.quantile),
                self.distr_type.distr.ppf(1 - self.quantile))

    n_segments = Int(500, auto_set=False, enter_set=True)

    dx = Property(Float, depends_on='distr_type.changed, quantile, n_segments')

    @cached_property
    def _get_dx(self):
        range_length = self.range[1] - self.range[0]
        return range_length / self.n_segments

    # -------------------------------------------------------------------------
    # Discretization of the distribution domain
    # -------------------------------------------------------------------------
    x_array = Property(Array('float_'),
                       depends_on='distr_type.changed,'
                       'quantile, n_segments')

    @cached_property
    def _get_x_array(self):
        '''Get the intrinsic discretization of the distribution
        respecting its  bounds.
        '''
        return linspace(self.range[0], self.range[1], self.n_segments + 1)

    # ===========================================================================
    # Access function to the scipy distribution
    # ===========================================================================
    def pdf(self, x):
        return self.distr_type.distr.pdf(x)

    def cdf(self, x):
        return self.distr_type.distr.cdf(x)

    def rvs(self, n):
        return self.distr_type.distr.rvs(n)

    def ppf(self, e):
        return self.distr_type.distr.ppf(e)

    # ===========================================================================
    # PDF - permanent array
    # ===========================================================================

    pdf_array = Property(Array('float_'),
                         depends_on='distr_type.changed,'
                         'quantile, n_segments')

    @cached_property
    def _get_pdf_array(self):
        '''Get pdf values in intrinsic positions'''
        return self.distr_type.distr.pdf(self.x_array)

    def get_pdf_array(self, x_array):
        '''Get pdf values in externally specified positions'''
        return self.distr_type.distr.pdf(x_array)

    # ===========================================================================
    # CDF permanent array
    # ===========================================================================
    cdf_array = Property(Array('float_'),
                         depends_on='distr_type.changed,'
                         'quantile, n_segments')

    @cached_property
    def _get_cdf_array(self):
        '''Get cdf values in intrinsic positions'''
        return self.distr_type.distr.cdf(self.x_array)

    def get_cdf_array(self, x_array):
        '''Get cdf values in externally specified positions'''
        return self.distr_type.distr.cdf(x_array)

    # -------------------------------------------------------------------------
    # Randomization
    # -------------------------------------------------------------------------
    def get_rvs_array(self, n_samples):
        return self.distr_type.distr.rvs(n_samples)
예제 #9
0
class SimBT3PT(IBVModel):
    '''Simulation: Bending Test Three Point
    '''

    input_change = Event

    @on_trait_change('+input,ccs_unit_cell.input_change')
    def _set_input_change(self):
        self.input_change = True

    implements(ISimModel)

    #-----------------
    # discretization:
    #-----------------
    #
    # discretization in x-direction (longitudinal):
    shape_x = Int(10, input=True, ps_levels=(4, 12, 3))

    # discretization in x-direction (longitudinal):
    mid_shape_x = Int(1, input=True, ps_levels=(1, 4, 1))

    # discretization in y-direction (width):
    shape_y = Int(3, input=True, ps_levels=(1, 4, 2))

    # discretization in z-direction:
    shape_z = Int(2, input=True, ps_levels=(1, 3, 3))

    # support discretization in xy-direction:
    # NOTE: chose '2' for 2x2-grid or '4' for 4x4-grid
    #
    shape_supprt_x = Int(2, input=True, ps_levels=(2, 4, 2))

    #-----------------
    # geometry:
    #-----------------
    #
    # edge length of the bending specimen (beam) (entire length without symmetry)
    length = Float(1.15, input=True)
    elstmr_length = Float(0.05, input=True)
    elstmr_thickness = Float(0.005, input=True, enter_set=True, auto_set=False)
    width = Float(0.20, input=True)
    thickness = Float(0.06, input=True, ps_levels=(0.054, 0.060, 0.066))

    # thickness of the tappered support
    #
    thickness_supprt = Float(0.02, input=True)
    #-----------------
    # derived geometric parameters
    #-----------------
    #
    # half the length of the elastomer (load introduction
    # with included symmetry)
    #
    sym_specmn_length = Property

    def _get_sym_specmn_length(self):
        return self.length / 2.

    # half the length of the elastomer (load introduction
    # with included symmetry
    #
    sym_elstmr_length = Property

    def _get_sym_elstmr_length(self):
        return (self.elstmr_length) / 2.

    # half the specimen width
    #
    sym_width = Property

    def _get_sym_width(self):
        return self.width / 2.

    #-----------------
    # specify the refinement of the idealization
    #-----------------

    # specify weather elastomer is to be modeled for load introduction
    #
    elstmr_flag = True

    # specify weather steel support is to be modeled
    #
    supprt_flag = False

    #-----------------
    # 'geo_transform'
    #-----------------

    # geometry transformation for four sided tappered support with reduced stiffness towards the edges (if modeled)
    #
    geo_supprt = Property(Instance(GeoSUPPRT), depends_on='+ps_levels, +input')

    @cached_property
    def _get_geo_supprt(self):
        # element length within the range of the slab without the area of the
        # load introduction plate
        #
        elem_size = self.sym_specmn_length / self.shape_x
        width_supprt = self.shape_supprt_x * elem_size
        print 'width_supprt = ', width_supprt
        return GeoSUPPRT(thickness_supprt=self.thickness_supprt,
                         width_supprt=width_supprt,
                         xyoffset=0.,
                         zoffset=-self.thickness_supprt)

    #----------------------------------------------------------------------------------
    # mats_eval
    #----------------------------------------------------------------------------------

    # age of the specimen at the time of testing
    # determines the E-modulus and nu based on the time dependent function stored
    # in 'CCSUniteCell' if params are not explicitly set
    #
    age = Int(28, input=True)

    # composite E-modulus / Poisson's ratio
    # NOTE 1: value are retrieved from the database
    #         same values are used for calibration (as taken from tensile test) and for slab test
    # NOTE 2: alternatively the same phi-function could be used independent of age. This would assume
    #         an afine/proportional damage evolution for different ages, i.e. a different E would be
    #         used in the simulation of the slab test and within the calibration procedure.

    # time stepping params
    #
    tstep = Float(0.05, auto_set=False, enter_set=True, input=True)
    tmax = Float(1.0, auto_set=False, enter_set=True, input=True)
    tolerance = Float(0.001, auto_set=False, enter_set=True, input=True)

    # specify type of 'linalg.norm'
    # default value 'None' sets norm to 2-norm,
    # i.e "norm = sqrt(sum(x_i**2))
    #
    # set 'ord=np.inf' to switch norm to
    # "norm = max(abs(x_i))"
    #
    ord = Enum(None, np.inf)

    # number of microplanes
    #
    n_mp = Int(30., auto_set=False, enter_set=True, input=True)

    #----------------------------------------------------------------------------------
    # mats_eval
    #----------------------------------------------------------------------------------

    # @todo: for mats_eval the information of the unit cell should be used
    # in order to use the same number of microplanes and model version etc...
    #
    specmn_mats = Property(Instance(MATS2D5MicroplaneDamage),
                           depends_on='input_change')

    @cached_property
    def _get_specmn_mats(self):
        return MATS2D5MicroplaneDamage(
            E=self.E_c,
            #                                 E=self.E_m,  # relevant for compressive behavior/used for calibration of phi_fn
            nu=self.nu,
            # corresponding to settings in "MatsCalib"
            n_mp=30,
            symmetrization='sum-type',
            model_version='compliance',
            phi_fn=self.phi_fn)

    E_elstmr = Float(3000., input=True)

    elstmr_mats = Property(Instance(MATS3DElastic), depends_on='input_change')

    @cached_property
    def _get_elstmr_mats(self):
        E_elstmr = self.E_elstmr
        print 'effective elastomer E_modulus', E_elstmr
        return MATS3DElastic(E=E_elstmr, nu=0.4)

    # E-modulus and nu of steel support
    E_s = Float(210000., auto_set=False, enter_set=True, input=True)
    nu_s = Float(0.20, auto_set=False, enter_set=True, input=True)

    supprt_mats = Property(Instance(MATS3DElastic), depends_on='input_change')

    @cached_property
    def _get_supprt_mats(self):
        return MATS3DElastic(E=self.E_s, nu=self.nu_s)

    #-----------------
    # fets:
    #-----------------

    # specify element shrink factor in plot of fe-model
    #
    vtk_r = Float(0.95)

    # use quadratic serendipity elements
    # NOTE: 2D5 elements behave linear elastic in out of plane direction!
    #
    specmn_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_specmn_fets(self):
        fets = FETS2D58H20U(mats_eval=self.specmn_mats)
        fets.vtk_r *= self.vtk_r
        return fets

    # use quadratic serendipity elements
    #
    elstmr_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_elstmr_fets(self):
        fets = FETS2D58H20U(mats_eval=self.elstmr_mats)
        fets.vtk_r *= self.vtk_r
        return fets

    supprt_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_supprt_fets(self):
        # linear-elastic behavior quadratic serendipity elements
        fets = FETS3D8H20U(mats_eval=self.supprt_mats)
        fets.vtk_r *= self.vtk_r
        return fets

    #-----------------
    # fe_grid:
    #-----------------

    fe_domain = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_fe_domain(self):
        return FEDomain()

    mid_specmn_fe_level = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_mid_specmn_fe_level(self):
        return FERefinementGrid(name='middle specimen patch',
                                fets_eval=self.specmn_fets,
                                domain=self.fe_domain)

    mid_specmn_fe_grid = Property(Instance(FEGrid),
                                  depends_on='+ps_levels, +input')

    @cached_property
    def _get_mid_specmn_fe_grid(self):
        # only a quarter of the beam is simulated due to symmetry:
        fe_grid = FEGrid(coord_min=(0., 0., 0.),
                         coord_max=(self.sym_elstmr_length, self.width / 2,
                                    self.thickness),
                         shape=(self.mid_shape_x, self.shape_y, self.shape_z),
                         level=self.mid_specmn_fe_level,
                         fets_eval=self.specmn_fets)
        return fe_grid

    specmn_fe_level = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_specmn_fe_level(self):
        return FERefinementGrid(name='specimen patch',
                                fets_eval=self.specmn_fets,
                                domain=self.fe_domain)

    specmn_fe_grid = Property(Instance(FEGrid),
                              depends_on='+ps_levels, +input')

    @cached_property
    def _get_specmn_fe_grid(self):
        # only a quarter of the beam is simulated due to symmetry:
        fe_grid = FEGrid(coord_min=(self.sym_elstmr_length, 0., 0.),
                         coord_max=(self.sym_specmn_length, self.sym_width,
                                    self.thickness),
                         shape=(self.shape_x, self.shape_y, self.shape_z),
                         level=self.specmn_fe_level,
                         fets_eval=self.specmn_fets)
        return fe_grid

#    if elstmr_flag:

    elstmr_fe_level = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_elstmr_fe_level(self):
        return FERefinementGrid(name='elastomer patch',
                                fets_eval=self.elstmr_fets,
                                domain=self.fe_domain)

    elstmr_fe_grid = Property(Instance(FEGrid),
                              depends_on='+ps_levels, +input')

    @cached_property
    def _get_elstmr_fe_grid(self):
        x_max = self.sym_elstmr_length
        y_max = self.width / 2.
        z_max = self.thickness + self.elstmr_thickness
        fe_grid = FEGrid(coord_min=(0, 0, self.thickness),
                         coord_max=(x_max, y_max, z_max),
                         level=self.elstmr_fe_level,
                         shape=(self.mid_shape_x, self.shape_y, 1),
                         fets_eval=self.elstmr_fets)
        return fe_grid

    if supprt_flag:
        supprt_fe_level = Property(depends_on='+ps_levels, +input')

        @cached_property
        def _get_supprt_fe_level(self):
            return FERefinementGrid(name='elastomer patch',
                                    fets_eval=self.supprt_fets,
                                    domain=self.fe_domain)

        supprt_fe_grid = Property(Instance(FEGrid),
                                  depends_on='+ps_levels, +input')

        @cached_property
        def _get_supprt_fe_grid(self):
            return FEGrid(
                coord_min=(0, 0, 0),
                coord_max=(1, 1, 1),
                level=self.supprt_fe_level,
                # use shape (2,2) in order to place support in the center of the steel support
                # corresponding to 4 elements of the slab mesh
                #
                shape=(self.shape_supprt_x, self.shape_supprt_x, 1),
                geo_transform=self.geo_supprt,
                fets_eval=self.supprt_fets)

    #===========================================================================
    # Boundary conditions
    #===========================================================================

    # w_max = center displacement:
    #
    w_max = Float(-0.010, input=True)  # [m]

    bc_list = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_bc_list(self):
        specmn = self.specmn_fe_grid
        mid_specmn = self.mid_specmn_fe_grid
        if self.elstmr_flag:
            elstmr = self.elstmr_fe_grid

        #--------------------------------------------------------------
        # boundary conditions for the symmetry
        #--------------------------------------------------------------
        # the x-axis corresponds to the axis of symmetry along the longitudinal axis of the beam:
        bc_symplane_xz = BCSlice(var='u',
                                 value=0.,
                                 dims=[1],
                                 slice=specmn[:, 0, :, :, 0, :])

        bc_mid_symplane_xz = BCSlice(var='u',
                                     value=0.,
                                     dims=[1],
                                     slice=mid_specmn[:, 0, :, :, 0, :])

        bc_mid_symplane_yz = BCSlice(var='u',
                                     value=0.,
                                     dims=[0],
                                     slice=mid_specmn[0, :, :, 0, :, :])

        if self.elstmr_flag:
            bc_el_symplane_xz = BCSlice(var='u',
                                        value=0.,
                                        dims=[1],
                                        slice=elstmr[:, 0, :, :, 0, :])
            bc_el_symplane_yz = BCSlice(var='u',
                                        value=0.,
                                        dims=[0],
                                        slice=elstmr[0, :, :, 0, :, :])

        #--------------------------------------------------------------
        # boundary conditions for the support
        #--------------------------------------------------------------
        bc_support_0y0 = BCSlice(var='u',
                                 value=0.,
                                 dims=[2],
                                 slice=specmn[-1, :, 0, -1, :, 0])

        #--------------------------------------------------------------
        # link domains
        #--------------------------------------------------------------
        link_msp_sp = BCDofGroup(var='u',
                                 value=0.,
                                 dims=[0, 1, 2],
                                 get_dof_method=mid_specmn.get_right_dofs,
                                 get_link_dof_method=specmn.get_left_dofs,
                                 link_coeffs=[1.])

        #        link_msp_sp_xyz = BCSlice(var = 'u', value = 0., dims = [0, 1, 2],
        #                             slice = specmn[0, :, :, 0, :, :],
        #                             link_slice = mid_specmn[-1 :, :, -1, :, :],
        #                             link_dims = [0, 1, 2],
        #                             link_coeffs = [1.])

        #        link_msp_sp_y = BCSlice(var = 'u', value = 0., dims = [1],
        #                             slice = specmn[0, :, :, 0, :, :],
        #                             link_slice = mid_specmn[-1 :, :, -1, :, :],
        #                             link_dims = [1],
        #                             link_coeffs = [1.])
        #
        #        link_msp_sp_z = BCSlice(var = 'u', value = 0., dims = [2],
        #                             slice = specmn[0, :, :, 0, :, :],
        #                             link_slice = mid_specmn[-1 :, :, -1, :, :],
        #                             link_dims = [2],
        #                             link_coeffs = [1.])

        #        link_msp_sp = [ link_msp_sp_xyz ]

        if self.elstmr_flag:
            link_el_sp = BCDofGroup(
                var='u',
                value=0.,
                dims=[2],
                get_dof_method=elstmr.get_back_dofs,
                get_link_dof_method=mid_specmn.get_front_dofs,
                link_coeffs=[1.])

        #--------------------------------------------------------------
        # loading
        #--------------------------------------------------------------
        w_max = self.w_max
        #        f_max = -0.010 / 0.10 # [MN/m]

        if self.elstmr_flag:
            # apply displacement at all top node (surface load)
            #
            bc_w = BCSlice(var='u',
                           value=w_max,
                           dims=[2],
                           slice=elstmr[:, :, -1, :, :, -1])
            # apply a single force at the center of the beam (system origin at top of the elastomer
            # and us elastomer-domain as load distribution plate with a high stiffness (e.g. steel)
#            F_max = -0.010 #[MN]
#            bc_F = BCSlice(var = 'f', value = F_max, dims = [2],
#                           slice = elstmr[0, 0, -1, 0, 0, -1])
        else:
            # center top nodes (line load)
            #
            bc_w = BCSlice(var='u',
                           value=w_max,
                           dims=[2],
                           slice=mid_specmn[0, :, -1, 0, :, -1])
            # NOTE: the entire symmetry axis (yz)-plane is moved downwards
            # in order to avoid large indentations at the top nodes
            #


#          bc_center_w = BCSlice( var = 'w', value = w_max, dims = [2], slice = mid_specmn[0, :, :, 0, :, :] )

        bc_list = [
            bc_symplane_xz, bc_mid_symplane_xz, bc_mid_symplane_yz,
            bc_support_0y0, link_msp_sp, bc_w
        ]

        if self.elstmr_flag:
            bc_list_elstmr = [link_el_sp, bc_el_symplane_xz, bc_el_symplane_yz]
            bc_list += bc_list_elstmr

        return bc_list

    tloop = Property(depends_on='input_change')

    @cached_property
    def _get_tloop(self):

        #--------------------------------------------------------------
        # ts
        #--------------------------------------------------------------

        specmn = self.specmn_fe_grid
        mid_specmn = self.mid_specmn_fe_grid

        if self.supprt_flag:
            supprt = self.supprt_fe_grid
            supprt_dofs_z = np.unique(supprt[self.shape_supprt_x / 2,
                                             self.shape_y / 2, 0, 0, 0,
                                             0].dofs[:, :, 2].flatten())
        else:
            supprt_dofs_z = np.unique(specmn[-1, :, 0, -1, :,
                                             0].dofs[:, :, 2].flatten())
        print 'supprt_dofs_z (unique)', supprt_dofs_z

        if self.elstmr_flag:
            elstmr = self.elstmr_fe_grid
            load_dofs_z = np.unique(elstmr[:, :, -1, :, :,
                                           -1].dofs[:, :, 2].flatten())
        else:
            # center_top_line_dofs
            #
            load_dofs_z = np.unique(mid_specmn[0, :, -1, 0, :,
                                               -1].dofs[:, :, 2].flatten())
        print 'load_dofs_z used for integration of force: ', load_dofs_z

        # center top z-dof
        #
        center_top_dof_z = mid_specmn[0, 0, 0, 0, 0, 0].dofs[0, 0, 2]
        print 'center_top_dof used for displacement tracing: ', center_top_dof_z

        # force-displacement-diagram (LOAD)
        # (surface load on the elstmr or line load at specimen center)
        #
        self.f_w_diagram_center = RTraceGraph(
            name='displacement (center) - reaction 2',
            var_x='U_k',
            idx_x=center_top_dof_z,
            var_y='F_int',
            idx_y_arr=load_dofs_z,
            record_on='update',
            transform_x='-x * 1000',  # %g * x' % ( fabs( w_max ),),
            # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis):
            #
            transform_y='-4000. * y')

        # force-displacement-diagram (SUPPORT)
        # (dofs at support line of the specmn used to integrate the force)
        #
        self.f_w_diagram_supprt = RTraceGraph(
            name='displacement (center) - reaction 2',
            var_x='U_k',
            idx_x=center_top_dof_z,
            var_y='F_int',
            idx_y_arr=supprt_dofs_z,
            record_on='update',
            transform_x='-x * 1000',  # %g * x' % ( fabs( w_max ),),
            # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis):
            #
            transform_y='4000. * y')

        ts = TS(
            sdomain=self.fe_domain,
            bcond_list=self.bc_list,
            rtrace_list=[
                self.f_w_diagram_center,
                self.f_w_diagram_supprt,
                RTraceDomainListField(name='Displacement',
                                      var='u',
                                      idx=0,
                                      warp=True),
                #                             RTraceDomainListField(name = 'Stress' ,
                #                                            var = 'sig_app', idx = 0, warp = True,
                #                                            record_on = 'update'),
                #                             RTraceDomainListField(name = 'Strain' ,
                #                                        var = 'eps_app', idx = 0, warp = True,
                #                                        record_on = 'update'),
                #                             RTraceDomainListField(name = 'Damage' ,
                #                                        var = 'omega_mtx', idx = 0, warp = True,
                #                                        record_on = 'update'),
                RTraceDomainListField(name='max_omega_i',
                                      warp=True,
                                      var='max_omega_i',
                                      idx=0,
                                      record_on='update'),
                #                             RTraceDomainListField(name = 'IStress' ,
                #                                            position = 'int_pnts',
                #                                            var = 'sig_app', idx = 0,
                #                                            record_on = 'update'),
                #                             RTraceDomainListField(name = 'IStrain' ,
                #                                            position = 'int_pnts',
                #                                            var = 'eps_app', idx = 0,
                #                                            record_on = 'update')
            ])

        # Add the time-loop control
        tloop = TLoop(tstepper=ts,
                      KMAX=50,
                      tolerance=self.tolerance,
                      RESETMAX=0,
                      tline=TLine(min=0.0, step=self.tstep, max=self.tmax))

        return tloop

    #--------------------------------------------------------------
    # prepare pstudy
    #--------------------------------------------------------------

    def peval(self):
        '''
        Evaluate the model and return the array of results specified
        in the method get_sim_outputs.
        '''
        U = self.tloop.eval()

        self.f_w_diagram_center.refresh()
        F_max = max(self.f_w_diagram_center.trace.ydata)

        u_center_top_z = U[self.center_top_dofs][0, 0, 2]
        return array([u_center_top_z, F_max], dtype='float_')

    def get_sim_outputs(self):
        '''
        Specifies the results and their order returned by the model
        evaluation.
        '''
        return [
            SimOut(name='u_center_top_z', unit='m'),
            SimOut(name='F_max', unit='kN')
        ]
예제 #10
0
class MFnLineArray(HasTraits):

    # Public Traits
    xdata = Array(float, value = [0.0, 1.0])
    def _xdata_default(self):
        '''
        convenience default - when xdata not defined created automatically as
        an array of integers with the same shape as ydata
        '''
        return np.arange(self.ydata.shape[0])

    ydata = Array(float, value = [0.0, 1.0])

    extrapolate = Enum('constant', 'exception', 'diff', 'zero')

    # alternative vectorized interpolation using scipy.interpolate   
    def get_values(self, x, k = 1):
        '''
        vectorized interpolation, k is the spline order, default set to 1 (linear)
        '''
        tck = ip.splrep(self.xdata, self.ydata, s = 0, k = k)

        x = np.array([x]).flatten()

        if self.extrapolate == 'diff':
            values = ip.splev(x, tck, der = 0)
        elif self.extrapolate == 'exception':
            if x.all() < self.xdata[0] and x.all() > self.xdata[-1]:
                values = values = ip.splev(x, tck, der = 0)
            else:
                raise ValueError('value(s) outside interpolation range')

        elif self.extrapolate == 'constant':
            mask = x >= self.xdata[0]
            mask *= x <= self.xdata[-1]
            l_mask = x < self.xdata[0]
            r_mask = x > self.xdata[-1]
            extrapol_left = np.repeat(ip.splev(self.xdata[0], tck, der = 0), len(x)) * l_mask
            extrapol_right = np.repeat(ip.splev(self.xdata[-1], tck, der = 0), len(x)) * r_mask
            extrapol = extrapol_left + extrapol_right
            values = ip.splev(x, tck, der = 0) * mask + extrapol
        elif self.extrapolate == 'zero':
            mask = x >= self.xdata[0]
            mask *= x <= self.xdata[-1]
            mask_extrapol = mask == False
            extrapol = np.zeros(len(x)) * mask_extrapol
            values = ip.splev(x, tck, der = 0) * mask + extrapol
        return values

    def get_value(self, x):
        x2idx = self.xdata.searchsorted(x)
        if x2idx == len(self.xdata):
            x2idx -= 1
        x1idx = x2idx - 1
        x1 = self.xdata[ x1idx ]
        x2 = self.xdata[ x2idx ]
        dx = x2 - x1
        y1 = self.ydata[ x1idx ]
        y2 = self.ydata[ x2idx ]
        dy = y2 - y1
        y = y1 + dy / dx * (x - x1)
        return y

    data_changed = Event

    def get_diffs(self, x, k = 1, der = 1):
        '''
        vectorized interpolation, der is the nth derivative, default set to 1;
        k is the spline order of the data inetrpolation, default set to 1 (linear)
        '''
        xdata = np.sort(np.hstack((self.xdata, x)))
        idx = np.argwhere(np.diff(xdata) == 0).flatten()
        xdata = np.delete(xdata, idx)
        tck = ip.splrep(xdata, self.get_values(xdata, k = k), s = 0, k = k)
        return ip.splev(x, tck, der = der)

    def get_diff(self, x):
        x2idx = self.xdata.searchsorted(x)
        if x2idx == len(self.xdata):
            x2idx -= 1
        x1idx = x2idx - 1
        x1 = self.xdata[ x1idx ]
        x2 = self.xdata[ x2idx ]
        dx = x2 - x1
        y1 = self.ydata[ x1idx ]
        y2 = self.ydata[ x2idx ]
        dy = y2 - y1
        return dy / dx

    dump_button = ToolbarButton('Print data',
                                style = 'toolbar')
    @on_trait_change('dump_button')
    def print_data(self, event = None):
        print 'x = ', repr(self.xdata)
        print 'y = ', repr(self.ydata)

    integ_value = Property(Float(), depends_on = 'ydata')
    @cached_property
    def _get_integ_value(self):
        _xdata = self.xdata
        _ydata = self.ydata
        # integral under the stress strain curve
        E_t = np.trapz(_ydata, _xdata)
        # area of the stored elastic energy  
        U_t = 0.0
        if len(_xdata) != 0:
            U_t = 0.5 * _ydata[-1] * _xdata[-1]
        return E_t - U_t

    def clear(self):
        self.xdata = np.array([])
        self.ydata = np.array([])

    def plot(self, axes, *args, **kw):
        self.mpl_plot(axes, *args, **kw)

    def mpl_plot(self, axes, *args, **kw):
        '''plot within matplotlib window'''
        axes.plot(self.xdata, self.ydata, *args, **kw)
예제 #11
0
class SPIRRIDLAB(HasTraits):
    '''Class used for elementary parametric studies of spirrid.
    '''

    s = Instance(SPIRRID)

    evars = DelegatesTo('s')

    tvars = DelegatesTo('s')

    q = DelegatesTo('s')

    exact_arr = Array('float')

    dpi = Int

    plot_mode = Enum(['subplots', 'figures'])

    fig_output_dir = Directory('fig')

    @on_trait_change('fig_output_dir')
    def _check_dir(self):
        if os.access(self.fig_output_dir, os.F_OK) == False:
            os.mkdir(self.fig_output_dir)

    e_arr = Property

    def _get_e_arr(self):
        return self.s.evar_lst[0]

    hostname = Property

    def _get_hostname(self):
        return gethostname()

    qname = Str

    def get_qname(self):
        if self.qname == '':
            if isinstance(self.q, types.FunctionType):
                qname = self.q.__name__
            else:  # if isinstance(self.q, types.ClassType):
                qname = self.q.__class__.__name__
        else:
            qname = self.qname
        return qname

    show_output = False

    save_output = True

    plot_sampling_idx = Array(value=[0, 1], dtype=int)

    def _plot_sampling(self,
                       i,
                       n_col,
                       sampling_type,
                       p=p,
                       ylim=None,
                       xlim=None):
        '''Construct a spirrid object, run the calculation
        plot the mu_q / e curve and save it in the subdirectory.
        '''

        s = self.s
        s.sampling_type = sampling_type

        plot_idx = self.plot_sampling_idx

        qname = self.get_qname()

        # get n randomly selected realizations from the sampling
        theta = s.sampling.get_samples(500)
        tvar_x = s.tvar_lst[plot_idx[0]]
        tvar_y = s.tvar_lst[plot_idx[1]]
        min_x, max_x, d_x = s.sampling.get_theta_range(tvar_x)
        min_y, max_y, d_y = s.sampling.get_theta_range(tvar_y)
        # for vectorized execution add a dimension for control variable
        theta_args = [t[:, np.newaxis] for t in theta]
        q_arr = s.q(self.e_arr[None, :], *theta_args)
        if self.plot_mode == 'figures':
            f = p.figure(figsize=(7., 6.))
            f.subplots_adjust(left=0.15, right=0.97, bottom=0.15, top=0.92)
        if self.plot_mode == 'subplots':
            if i == 0:
                f = p.figure()
            p.subplot('2%i%i' % (n_col, (i + 1)))

        p.plot(theta[plot_idx[0]], theta[plot_idx[1]], 'o', color='grey')
        p.xlabel('$\lambda$')
        p.ylabel('$\\xi$')

        p.xlim(min_x, max_x)
        p.ylim(min_y, max_y)
        p.title(s.sampling_type)

        if self.save_output:
            fname = os.path.join(
                self.fig_output_dir,
                qname + '_sampling_' + s.sampling_type + '.png')
            p.savefig(fname, dpi=self.dpi)

        if self.plot_mode == 'figures':
            f = p.figure(figsize=(7., 5))
            f.subplots_adjust(left=0.15, right=0.97, bottom=0.18, top=0.91)
        elif self.plot_mode == 'subplots':
            p.subplot('2%i%i' % (n_col, (i + 5)))

        p.plot(self.e_arr, q_arr.T, color='grey')

        if len(self.exact_arr) > 0:
            p.plot(self.e_arr,
                   self.exact_arr,
                   label='exact solution',
                   color='black',
                   linestyle='--',
                   linewidth=2)

        # numerically obtained result
        p.plot(self.e_arr,
               s.mu_q_arr,
               label='numerical integration',
               linewidth=3,
               color='black')
        p.title(s.sampling_type)
        p.xlabel('$\\varepsilon$ [-]')
        p.ylabel(r'$q(\varepsilon;\, \lambda,\, \xi)$')
        if ylim:
            p.ylim(0.0, ylim)
        if xlim:
            p.xlim(0.0, xlim)
        p.xticks(position=(0, -.015))
        p.legend(loc=2)

        if self.save_output:
            fname = os.path.join(self.fig_output_dir,
                                 qname + '_' + s.sampling_type + '.png')
            p.savefig(fname, dpi=self.dpi)

    sampling_structure_btn = Button(label='compare sampling structure')

    @on_trait_change('sampling_structure_btn')
    def sampling_structure(self, **kw):
        '''Plot the response into the file in the fig subdirectory.
        '''
        if self.plot_mode == 'subplots':
            p.rcdefaults()
        else:
            fsize = 28
            p.rcParams['font.size'] = fsize
            rc('legend', fontsize=fsize - 8)
            rc('axes', titlesize=fsize)
            rc('axes', labelsize=fsize + 6)
            rc('xtick', labelsize=fsize - 8)
            rc('ytick', labelsize=fsize - 8)
            rc('xtick.major', pad=8)

        s_lst = ['TGrid', 'PGrid', 'MCS', 'LHS']

        for i, s in enumerate(s_lst):
            self._plot_sampling(i, len(s_lst), sampling_type=s, **kw)

        if self.show_output:
            p.show()

    n_int_range = Array()

    #===========================================================================
    # Output file names for sampling efficiency
    #===========================================================================
    fname_sampling_efficiency_time_nint = Property

    def _get_fname_sampling_efficiency_time_nint(self):
        return self.get_qname(
        ) + '_' + '%s' % self.hostname + '_time_nint' + '.png'

    fname_sampling_efficiency_error_nint = Property

    def _get_fname_sampling_efficiency_error_nint(self):
        return self.get_qname(
        ) + '_' + '%s' % self.hostname + '_error_nint' + '.png'

    fname_sampling_efficiency_error_time = Property

    def _get_fname_sampling_efficiency_error_time(self):
        return self.get_qname(
        ) + '_' + '%s' % self.hostname + '_error_time' + '.png'

    fnames_sampling_efficiency = Property

    def _get_fnames_sampling_efficiency(self):
        fnames = [self.fname_sampling_efficiency_time_nint]
        if len(self.exact_arr) > 0:
            fnames += [
                self.fname_sampling_efficiency_error_nint,
                self.fname_sampling_efficiency_error_time
            ]
        return fnames

    #===========================================================================
    # Run sampling efficiency studies
    #===========================================================================

    sampling_types = Array(value=['TGrid', 'PGrid', 'MCS', 'LHS'], dtype=str)

    sampling_efficiency_btn = Button(label='compare sampling efficiency')

    @on_trait_change('sampling_efficiency_btn')
    def sampling_efficiency(self):
        '''
        Run the code for all available sampling types.
        Plot the results.
        '''
        def run_estimation(n_int, sampling_type):
            # instantiate spirrid with samplingetization methods
            print 'running', sampling_type, n_int
            self.s.set(n_int=n_int, sampling_type=sampling_type)
            n_sim = self.s.sampling.n_sim
            exec_time = np.sum(self.s.exec_time)
            return self.s.mu_q_arr, exec_time, n_sim

        # vectorize the estimation to accept arrays
        run_estimation_vct = np.vectorize(run_estimation, [object, float, int])

        #===========================================================================
        # Generate the inspected domain of input parameters using broadcasting
        #===========================================================================

        run_estimation_vct([5], ['PGrid'])

        sampling_types = self.sampling_types
        sampling_colors = np.array(
            ['grey', 'black', 'grey', 'black'],
            dtype=str)  # 'blue', 'green', 'red', 'magenta'
        sampling_linestyle = np.array(['--', '--', '-', '-'], dtype=str)

        # run the estimation on all combinations of n_int and sampling_types
        mu_q, exec_time, n_sim_range = run_estimation_vct(
            self.n_int_range[:, None], sampling_types[None, :])

        p.rcdefaults()
        f = p.figure(figsize=(12, 6))
        f.subplots_adjust(left=0.06, right=0.94)

        #===========================================================================
        # Plot the results
        #===========================================================================
        p.subplot(1, 2, 1)
        p.title('response for %d $n_\mathrm{sim}$' % n_sim_range[-1, -1])
        for i, (sampling, color, linestyle) in enumerate(
                zip(sampling_types, sampling_colors, sampling_linestyle)):
            p.plot(self.e_arr,
                   mu_q[-1, i],
                   color=color,
                   label=sampling,
                   linestyle=linestyle)

        if len(self.exact_arr) > 0:
            p.plot(self.e_arr,
                   self.exact_arr,
                   color='black',
                   label='Exact solution')

        p.legend(loc=1)
        p.xlabel('e', fontsize=18)
        p.ylabel('q', fontsize=18)

        # @todo: get n_sim - x-axis
        p.subplot(1, 2, 2)
        for i, (sampling, color, linestyle) in enumerate(
                zip(sampling_types, sampling_colors, sampling_linestyle)):
            p.loglog(n_sim_range[:, i],
                     exec_time[:, i],
                     color=color,
                     label=sampling,
                     linestyle=linestyle)

        p.legend(loc=2)
        p.xlabel('$n_\mathrm{sim}$', fontsize=18)
        p.ylabel('$t$ [s]', fontsize=18)

        if self.save_output:
            basename = self.fname_sampling_efficiency_time_nint
            fname = os.path.join(self.fig_output_dir, basename)
            p.savefig(fname, dpi=self.dpi)

        #===========================================================================
        # Evaluate the error
        #===========================================================================

        if len(self.exact_arr) > 0:
            er = ErrorEval(exact_arr=self.exact_arr)

            def eval_error(mu_q, error_measure):
                return error_measure(mu_q)

            eval_error_vct = np.vectorize(eval_error)

            error_measures = np.array(
                [er.eval_error_max, er.eval_error_energy, er.eval_error_rms])
            error_table = eval_error_vct(mu_q[:, :, None],
                                         error_measures[None, None, :])

            f = p.figure(figsize=(14, 6))
            f.subplots_adjust(left=0.07, right=0.97, wspace=0.26)

            p.subplot(1, 2, 1)
            p.title('max rel. lack of fit')
            for i, (sampling, color, linestyle) in enumerate(
                    zip(sampling_types, sampling_colors, sampling_linestyle)):
                p.loglog(n_sim_range[:, i],
                         error_table[:, i, 0],
                         color=color,
                         label=sampling,
                         linestyle=linestyle)

            #p.ylim( 0, 10 )
            p.legend()
            p.xlabel('$n_\mathrm{sim}$', fontsize=18)
            p.ylabel('$\mathrm{e}_{\max}$ [-]', fontsize=18)

            p.subplot(1, 2, 2)
            p.title('rel. root mean square error')
            for i, (sampling, color, linestyle) in enumerate(
                    zip(sampling_types, sampling_colors, sampling_linestyle)):
                p.loglog(n_sim_range[:, i],
                         error_table[:, i, 2],
                         color=color,
                         label=sampling,
                         linestyle=linestyle)
            p.legend()
            p.xlabel('$n_{\mathrm{sim}}$', fontsize=18)
            p.ylabel('$\mathrm{e}_{\mathrm{rms}}$ [-]', fontsize=18)

            if self.save_output:
                basename = self.fname_sampling_efficiency_error_nint
                fname = os.path.join(self.fig_output_dir, basename)
                p.savefig(fname, dpi=self.dpi)

            f = p.figure(figsize=(14, 6))
            f.subplots_adjust(left=0.07, right=0.97, wspace=0.26)

            p.subplot(1, 2, 1)
            p.title('rel. max lack of fit')
            for i, (sampling, color, linestyle) in enumerate(
                    zip(sampling_types, sampling_colors, sampling_linestyle)):
                p.loglog(exec_time[:, i],
                         error_table[:, i, 0],
                         color=color,
                         label=sampling,
                         linestyle=linestyle)
            p.legend()
            p.xlabel('time [s]', fontsize=18)
            p.ylabel('$\mathrm{e}_{\max}$ [-]', fontsize=18)

            p.subplot(1, 2, 2)
            p.title('rel. root mean square error')
            for i, (sampling, color, linestyle) in enumerate(
                    zip(sampling_types, sampling_colors, sampling_linestyle)):
                p.loglog(exec_time[:, i],
                         error_table[:, i, 2],
                         color=color,
                         label=sampling,
                         linestyle=linestyle)
            p.legend()
            p.xlabel('time [s]', fontsize=18)
            p.ylabel('$\mathrm{e}_{\mathrm{rms}}$ [-]', fontsize=18)

            if self.save_output:
                basename = self.fname_sampling_efficiency_error_time
                fname = os.path.join(self.fig_output_dir, basename)
                p.savefig(fname, dpi=self.dpi)

        if self.show_output:
            p.show()

    #===========================================================================
    # Efficiency of numpy versus C code
    #===========================================================================
    run_lst_detailed_config = Property(List)

    def _get_run_lst_detailed_config(self):
        run_lst = []
        if hasattr(self.q, 'c_code'):
            run_lst += [
                #                ('c',
                #                 {'cached_dG'         : True,
                #                  'compiled_eps_loop' : True },
                #                  'go-',
                #                  '$\mathsf{C}_{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\,  q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec',
                #                  ),
                #                ('c',
                #                 {'cached_dG'         : True,
                #                  'compiled_eps_loop' : False },
                #                 'r-2',
                #                 '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\,  q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec'
                #                 ),
                #                ('c',
                #                 {'cached_dG'         : False,
                #                  'compiled_eps_loop' : True },
                #                 'r-2',
                #                 '$\mathsf{C}_{\\varepsilon} \{\, \mathsf{C}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\}\,\} $ - %4.2f sec'
                #                 ),
                (
                    'c',
                    {
                        'cached_dG': False,
                        'compiled_eps_loop': False
                    },
                    'bx-',
                    '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta}  \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec',
                )
            ]
        if hasattr(self.q, 'cython_code'):
            run_lst += [
                #                ('cython',
                #                 {'cached_dG'         : True,
                #                  'compiled_eps_loop' : True },
                #                  'go-',
                #                  '$\mathsf{Cython}_{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\,  q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec',
                #                  ),
                #                ('cython',
                #                 {'cached_dG'         : True,
                #                  'compiled_eps_loop' : False },
                #                 'r-2',
                #                 '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\,  q(\\varepsilon,\\theta) \cdot G[\\theta] \,\}\,\} $ - %4.2f sec'
                #                 ),
                #                ('cython',
                #                 {'cached_dG'         : False,
                #                  'compiled_eps_loop' : True },
                #                 'r-2',
                #                 '$\mathsf{Cython}_{\\varepsilon} \{\, \mathsf{Cython}_{\\theta} \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\}\,\} $ - %4.2f sec'
                #                 ),
                #                ('cython',
                #                 {'cached_dG'         : False,
                #                  'compiled_eps_loop' : False },
                #                  'bx-',
                #                  '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta}  \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec',
                #                 )
            ]
        if hasattr(self.q, '__call__'):
            run_lst += [
                #                ('numpy',
                #                 {},
                #                 'y--',
                #                 '$\mathsf{Python}_{\\varepsilon} \{\,  \mathsf{Numpy}_{\\theta} \{\,  q(\\varepsilon,\\theta) \cdot G[\\theta] \,\} \,\} $ - %4.2f sec'
                #                 )
            ]
        return run_lst

    # number of recalculations to get new time.
    n_recalc = Int(2)

    def codegen_efficiency(self):
        # define a tables with the run configurations to start in a batch

        basenames = []

        qname = self.get_qname()

        s = self.s

        legend = []
        legend_lst = []
        time_lst = []
        p.figure()

        for idx, run in enumerate(self.run_lst_detailed_config):
            code, run_options, plot_options, legend_string = run
            s.codegen_type = code
            s.codegen.set(**run_options)
            print 'run', idx, run_options

            for i in range(self.n_recalc):
                s.recalc = True  # automatically proagated within spirrid
                print 'execution time', s.exec_time

            p.plot(s.evar_lst[0], s.mu_q_arr, plot_options)

            # @todo: this is not portable!!
            #legend.append(legend_string % s.exec_time)
            #legend_lst.append(legend_string[:-12])
            time_lst.append(s.exec_time)

        p.xlabel('strain [-]')
        p.ylabel('stress')
        #p.legend(legend, loc = 2)
        p.title(qname)

        if self.save_output:
            print 'saving codegen_efficiency'
            basename = qname + '_' + 'codegen_efficiency' + '.png'
            basenames.append(basename)
            fname = os.path.join(self.fig_output_dir, basename)
            p.savefig(fname, dpi=self.dpi)

        self._bar_plot(legend_lst, time_lst)
        p.title('%s' % s.sampling_type)
        if self.save_output:
            basename = qname + '_' + 'codegen_efficiency_%s' % s.sampling_type + '.png'
            basenames.append(basename)
            fname = os.path.join(self.fig_output_dir, basename)
            p.savefig(fname, dpi=self.dpi)

        if self.show_output:
            p.show()

        return basenames

    #===========================================================================
    # Efficiency of numpy versus C code
    #===========================================================================
    run_lst_language_config = Property(List)

    def _get_run_lst_language_config(self):
        run_lst = []
        if hasattr(self.q, 'c_code'):
            run_lst += [(
                'c',
                {
                    'cached_dG': False,
                    'compiled_eps_loop': False
                },
                'bx-',
                '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{C}_{\\theta}  \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec',
            )]
        if hasattr(self.q, 'cython_code'):
            run_lst += [(
                'cython',
                {
                    'cached_dG': False,
                    'compiled_eps_loop': False
                },
                'bx-',
                '$\mathsf{Python} _{\\varepsilon} \{\, \mathsf{Cython}_{\\theta}  \{\, q(\\varepsilon,\\theta) \cdot g[\\theta_1] \cdot \ldots \cdot g[\\theta_m] \,\} \,\} $ - %4.2f sec',
            )]
        if hasattr(self.q, '__call__'):
            run_lst += [(
                'numpy', {}, 'y--',
                '$\mathsf{Python}_{\\varepsilon} \{\,  \mathsf{Numpy}_{\\theta} \{\,  q(\\varepsilon,\\theta) \cdot G[\\theta] \,\} \,\} $ - %4.2f sec'
            )]
        return run_lst

    extra_compiler_args = Bool(True)
    le_sampling_lst = List(['LHS', 'PGrid'])
    le_n_int_lst = List([440, 5000])

    #===========================================================================
    # Output file names for language efficiency
    #===========================================================================
    fnames_language_efficiency = Property

    def _get_fnames_language_efficiency(self):
        return [
            '%s_codegen_efficiency_%s_extra_%s.png' %
            (self.qname, self.hostname, extra)
            for extra in [self.extra_compiler_args]
        ]

    language_efficiency_btn = Button(label='compare language efficiency')

    @on_trait_change('language_efficiency_btn')
    def codegen_language_efficiency(self):
        # define a tables with the run configurations to start in a batch

        home_dir = expanduser("~")

        #        pyxbld_dir = os.path.join(home_dir, '.pyxbld')
        #        if os.path.exists(pyxbld_dir):
        #            shutil.rmtree(pyxbld_dir)

        python_compiled_dir = os.path.join(home_dir, '.python27_compiled')
        if os.path.exists(python_compiled_dir):
            shutil.rmtree(python_compiled_dir)

        for extra, fname in zip([self.extra_compiler_args],
                                self.fnames_language_efficiency):
            print 'extra compilation args:', extra
            legend_lst = []
            error_lst = []
            n_sim_lst = []
            exec_times_sampling = []

            meth_lst = zip(self.le_sampling_lst, self.le_n_int_lst)
            for item, n_int in meth_lst:
                print 'sampling method:', item
                s = self.s
                s.exec_time  # eliminate first load time delay (first column)
                s.n_int = n_int
                s.sampling_type = item
                exec_times_lang = []

                for idx, run in enumerate(self.run_lst_language_config):
                    code, run_options, plot_options, legend_string = run

                    #os.system('rm -fr ~/.python27_compiled')

                    s.codegen_type = code
                    s.codegen.set(**run_options)
                    if s.codegen_type == 'c':
                        s.codegen.set(**dict(use_extra=extra))
                    print 'run', idx, run_options

                    exec_times_run = []
                    for i in range(self.n_recalc):
                        s.recalc = True  # automatically propagated
                        exec_times_run.append(s.exec_time)
                        print 'execution time', s.exec_time

                    legend_lst.append(legend_string[:-12])
                    if s.codegen_type == 'c':
                        # load weave.inline time from tmp file and fix values in time_arr
                        #@todo - does not work on windows
                        import tempfile
                        tdir = tempfile.gettempdir()
                        f = open(os.path.join(tdir, 'w_time'), 'r')
                        value_t = float(f.read())
                        f.close()
                        exec_times_run[0][1] = value_t
                        exec_times_run[0][2] -= value_t
                        exec_times_lang.append(exec_times_run)
                    else:
                        exec_times_lang.append(exec_times_run)

                print 'legend_lst', legend_lst
                n_sim_lst.append(s.sampling.n_sim)
                exec_times_sampling.append(exec_times_lang)
                #===========================================================================
                # Evaluate the error
                #===========================================================================
                if len(self.exact_arr) > 0:
                    er = ErrorEval(exact_arr=self.exact_arr)
                    error_lst.append((er.eval_error_rms(s.mu_q_arr),
                                      er.eval_error_max(s.mu_q_arr)))

            times_arr = np.array(exec_times_sampling, dtype='d')
            self._multi_bar_plot(meth_lst, legend_lst, times_arr, error_lst,
                                 n_sim_lst)
            if self.save_output:
                fname_path = os.path.join(self.fig_output_dir, fname)
                p.savefig(fname_path, dpi=self.dpi)

        if self.show_output:
            p.show()

    def combination_efficiency(self, tvars_det, tvars_rand):
        '''
        Run the code for all available random parameter combinations.
        Plot the results.
        '''
        qname = self.get_qname()

        s = self.s
        s.set(sampling_type='TGrid')

        # list of all combinations of response function parameters
        rv_comb_lst = list(powerset(s.tvars.keys()))

        p.figure()
        exec_time_lst = []

        for id, rv_comb in enumerate(rv_comb_lst[163:219]):  # [1:-1]
            s.tvars = tvars_det
            print 'Combination', rv_comb

            for rv in rv_comb:
                s.tvars[rv] = tvars_rand[rv]

            #legend = []
            #p.figure()
            time_lst = []
            for idx, run in enumerate(self.run_lst):
                code, run_options, plot_options, legend_string = run
                print 'run', idx, run_options
                s.codegen_type = code
                s.codegen.set(**run_options)

                #p.plot(s.evar_lst[0], s.mu_q_arr, plot_options)

                #print 'integral of the pdf theta', s.eval_i_dG_grid()
                print 'execution time', s.exec_time
                time_lst.append(s.exec_time)
                #legend.append(legend_string % s.exec_time)
            exec_time_lst.append(time_lst)
        p.plot(np.array((1, 2, 3, 4)), np.array(exec_time_lst).T)
        p.xlabel('method')
        p.ylabel('time')

        if self.save_output:
            print 'saving codegen_efficiency'
            fname = os.path.join(
                self.fig_output_dir,
                qname + '_' + 'combination_efficiency' + '.png')
            p.savefig(fname, dpi=self.dpi)

        if self.show_output:
            p.title(s.q.title)
            p.show()

    def _bar_plot(self, legend_lst, time_lst):
        rc('font', size=15)
        #rc('font', family = 'serif', style = 'normal', variant = 'normal', stretch = 'normal', size = 15)
        fig = p.figure(figsize=(10, 5))

        n_tests = len(time_lst)
        times = np.array(time_lst)
        x_norm = times[1]
        xmax = times.max()
        rel_xmax = xmax / x_norm
        rel_times = times / x_norm
        m = int(rel_xmax % 10)

        if m < 5:
            x_max_plt = int(rel_xmax) - m + 10
        else:
            x_max_plt = int(rel_xmax) - m + 15

        ax1 = fig.add_subplot(111)
        p.subplots_adjust(left=0.45, right=0.88)
        #fig.canvas.set_window_title('window title')
        pos = np.arange(n_tests) + 0.5
        rects = ax1.barh(pos,
                         rel_times,
                         align='center',
                         height=0.5,
                         color='w',
                         edgecolor='k')

        ax1.set_xlabel('normalized execution time [-]')
        ax1.axis([0, x_max_plt, 0, n_tests])
        ax1.set_yticks(pos)
        ax1.set_yticklabels(legend_lst)

        for rect, t in zip(rects, rel_times):
            width = rect.get_width()

            xloc = width + (0.03 * rel_xmax)
            clr = 'black'
            align = 'left'

            yloc = rect.get_y() + rect.get_height() / 2.0
            ax1.text(xloc,
                     yloc,
                     '%4.2f' % t,
                     horizontalalignment=align,
                     verticalalignment='center',
                     color=clr)  #, weight = 'bold')

        ax2 = ax1.twinx()
        ax1.plot([1, 1], [0, n_tests], 'k--')
        ax2.set_yticks([0] + list(pos) + [n_tests])
        ax2.set_yticklabels([''] + ['%4.2f s' % s for s in list(times)] + [''])
        ax2.set_xticks([0, 1] + range(5, x_max_plt + 1, 5))
        ax2.set_xticklabels(
            ['%i' % s for s in ([0, 1] + range(5, x_max_plt + 1, 5))])

    def _multi_bar_plot(self, title_lst, legend_lst, time_arr, error_lst,
                        n_sim_lst):
        '''Plot the results if the code efficiency. 
        '''
        p.rcdefaults()
        fsize = 14
        fig = p.figure(figsize=(15, 3))
        rc('font', size=fsize)
        rc('legend', fontsize=fsize - 2)
        legend_lst = ['weave', 'cython', 'numpy']

        # times are stored in 3d array - dimensions are:
        n_sampling, n_lang, n_run, n_times = time_arr.shape
        print 'arr', time_arr.shape
        times_sum = np.sum(time_arr, axis=n_times)

        p.subplots_adjust(left=0.1,
                          right=0.95,
                          wspace=0.1,
                          bottom=0.15,
                          top=0.8)

        for meth_i in range(n_sampling):

            ax1 = fig.add_subplot(1, n_sampling, meth_i + 1)
            ax1.set_xlabel('execution time [s]')
            ytick_pos = np.arange(n_lang) + 1

            #        ax1.axis([0, x_max_plt, 0, n_lang])
            # todo: **2 n_vars
            if len(self.exact_arr) > 0:
                ax1.set_title(
                    '%s: $ n_\mathrm{sim} = %s, \mathrm{e}_\mathrm{rms}=%s, \mathrm{e}_\mathrm{max}=%s$'
                    % (title_lst[meth_i][0],
                       self._formatSciNotation('%.2e' % n_sim_lst[meth_i]),
                       self._formatSciNotation('%.2e' % error_lst[meth_i][0]),
                       self._formatSciNotation('%.2e' % error_lst[meth_i][1])))
            else:
                ax1.set_title(
                    '%s: $ n_\mathrm{sim} = %s$' %
                    (title_lst[meth_i][0],
                     self._formatSciNotation('%.2e' % n_sim_lst[meth_i])))
            ax1.set_yticks(ytick_pos)
            if meth_i == 0:
                ax1.set_yticklabels(legend_lst, fontsize=fsize + 2)
            else:
                ax1.set_yticklabels([])

            ax1.set_xlim(0, 1.2 * np.max(times_sum[meth_i]))

            distance = 0.2
            height = 1.0 / n_run - distance
            offset = height / 2.0

            colors = ['w', 'w', 'w', 'r', 'y', 'b', 'g', 'm']
            hatches = ['/', '\\', 'x', '-', '+', '|', 'o', 'O', '.', '*']
            label_lst = ['sampling', 'compilation', 'integration']

            for i in range(n_run):
                pos = np.arange(n_lang) + 1 - offset + i * height
                end_bar_pos = np.zeros((n_lang, ), dtype='d')
                for j in range(n_times):
                    if i > 0:
                        label = label_lst[j]
                    else:
                        label = None
                    bar_lengths = time_arr[meth_i, :, i, j]
                    rects = ax1.barh(pos,
                                     bar_lengths,
                                     align='center',
                                     height=height,
                                     left=end_bar_pos,
                                     color=colors[j],
                                     edgecolor='k',
                                     hatch=hatches[j],
                                     label=label)
                    end_bar_pos += bar_lengths
                for k in range(n_lang):
                    x_val = times_sum[meth_i, k,
                                      i] + 0.01 * np.max(times_sum[meth_i])
                    ax1.text(x_val,
                             pos[k],
                             '$%4.2f\,$s' % x_val,
                             horizontalalignment='left',
                             verticalalignment='center',
                             color='black')  #, weight = 'bold')
                    if meth_i == 0:
                        ax1.text(0.02 * np.max(times_sum[0]),
                                 pos[k],
                                 '$%i.$' % (i + 1),
                                 horizontalalignment='left',
                                 verticalalignment='center',
                                 color='black',
                                 bbox=dict(pad=0., ec="w", fc="w"))
            p.legend(loc=0)

    def _formatSciNotation(self, s):
        # transform 1e+004 into 1e4, for example
        tup = s.split('e')
        try:
            significand = tup[0].rstrip('0').rstrip('.')
            sign = tup[1][0].replace('+', '')
            exponent = tup[1][1:].lstrip('0')
            if significand == '1':
                # reformat 1x10^y as 10^y
                significand = ''
            if exponent:
                exponent = '10^{%s%s}' % (sign, exponent)
            if significand and exponent:
                return r'%s{\cdot}%s' % (significand, exponent)
            else:
                return r'%s%s' % (significand, exponent)

        except IndexError, msg:
            return s
예제 #12
0
class SimBT4PT(IBVModel):
    '''Simulation: four point bending test.
    '''

    input_change = Event

    @on_trait_change('+input,ccs_unit_cell.input_change')
    def _set_input_change(self):
        self.input_change = True

    implements(ISimModel)

    #-----------------
    # discretization:
    #-----------------

    # specify weather the elastomer is to be modeled or if the load is
    # introduced as line load
    #
    elstmr_flag = Bool(True)

    # discretization in x-direction (longitudinal):
    #
    outer_zone_shape_x = Int(6, input=True, ps_levels=(4, 12, 3))

    # discretization in x-direction (longitudinal):
    #
    load_zone_shape_x = Int(2, input=True, ps_levels=(1, 4, 1))

    # middle part discretization in x-direction (longitudinal):
    #
    mid_zone_shape_x = Int(3, input=True, ps_levels=(1, 4, 1))

    # discretization in y-direction (width):
    #
    shape_y = Int(2, input=True, ps_levels=(1, 4, 2))

    # discretization in z-direction:
    #
    shape_z = Int(2, input=True, ps_levels=(1, 3, 3))

    #-----------------
    # geometry:
    #-----------------
    #
    # edge length of the bending specimen (beam) (entire length without symmetry)
    #
    length = Float(1.50, input=True)

    elstmr_length = Float(0.05, input=True)
    mid_zone_length = Float(0.50, input=True)
    elstmr_thickness = Float(0.005, input=True)
    width = Float(0.20, input=True)
    thickness = Float(0.06, input=True)

    #-----------------
    # derived geometric parameters
    #-----------------
    #
    # half the length of the elastomer (load introduction
    # with included symmetry)
    #
    sym_specmn_length = Property

    def _get_sym_specmn_length(self):
        return self.length / 2.

    sym_mid_zone_specmn_length = Property

    def _get_sym_mid_zone_specmn_length(self):
        return self.mid_zone_length / 2.

    # half the length of the elastomer (load introduction
    # with included symmetry
    #
    sym_elstmr_length = Property

    def _get_sym_elstmr_length(self):
        return self.elstmr_length / 2.

    # half the specimen width
    #
    sym_width = Property

    def _get_sym_width(self):
        return self.width / 2.

    #----------------------------------------------------------------------------------
    # mats_eval
    #----------------------------------------------------------------------------------

    # age of the plate at the time of testing
    # NOTE: that the same phi-function is used independent of age. This assumes a
    # an afine/proportional damage evolution for different ages.
    #
    age = Int(28, input=True)

    # time stepping params
    #
    tstep = Float(0.05, auto_set=False, enter_set=True, input=True)
    tmax = Float(1.0, auto_set=False, enter_set=True, input=True)
    tolerance = Float(0.001, auto_set=False, enter_set=True, input=True)

    # specify type of 'linalg.norm'
    # default value 'None' sets norm to 2-norm,
    # i.e "norm = sqrt(sum(x_i**2))
    #
    # set 'ord=np.inf' to switch norm to
    # "norm = max(abs(x_i))"
    #
    ord = Enum(np.inf, None)

    n_mp = Int(30, input=True)

    # @todo: for mats_eval the information of the unit cell should be used
    # in order to use the same number of microplanes and model version etc...
    #
    specmn_mats = Property(Instance(MATS2D5MicroplaneDamage),
                           depends_on='input_change')

    @cached_property
    def _get_specmn_mats(self):
        return MATS2D5MicroplaneDamage(
            E=self.E_c,
            #                                 E=self.E_m,
            nu=self.nu,
            # corresponding to settings in "MatsCalib"
            n_mp=self.n_mp,
            symmetrization='sum-type',
            model_version='compliance',
            phi_fn=self.phi_fn)

    if elstmr_flag:
        elstmr_mats = Property(Instance(MATS3DElastic),
                               depends_on='input_change')

        @cached_property
        def _get_elstmr_mats(self):
            # specify a small elastomer stiffness (approximation)
            E_elast = self.E_c / 10.
            print 'effective elastomer E_modulus', E_elast
            return MATS3DElastic(E=E_elast, nu=0.4)

    #-----------------
    # fets:
    #-----------------

    # specify element shrink factor in plot of fe-model
    #
    vtk_r = Float(0.95)

    # use quadratic serendipity elements
    #
    specmn_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_specmn_fets(self):
        fets = FETS2D58H20U(mats_eval=self.specmn_mats)
        fets.vtk_r *= self.vtk_r
        return fets

    # use quadratic serendipity elements
    #
    elstmr_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_elstmr_fets(self):
        fets = FETS2D58H20U(mats_eval=self.elstmr_mats)
        fets.vtk_r *= self.vtk_r
        return fets

    fe_domain = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_fe_domain(self):
        return FEDomain()

    #===========================================================================
    # fe level
    #===========================================================================

    outer_zone_specmn_fe_level = Property(depends_on='+ps_levels, +input')

    def _get_outer_zone_specmn_fe_level(self):
        return FERefinementGrid(name='outer zone specimen patch',
                                fets_eval=self.specmn_fets,
                                domain=self.fe_domain)

    load_zone_specmn_fe_level = Property(depends_on='+ps_levels, +input')

    def _get_load_zone_specmn_fe_level(self):
        return FERefinementGrid(name='load zone specimen patch',
                                fets_eval=self.specmn_fets,
                                domain=self.fe_domain)

    mid_zone_specmn_fe_level = Property(depends_on='+ps_levels, +input')

    def _get_mid_zone_specmn_fe_level(self):
        return FERefinementGrid(name='mid zone specimen patch',
                                fets_eval=self.specmn_fets,
                                domain=self.fe_domain)

    elstmr_fe_level = Property(depends_on='+ps_levels, +input')

    def _get_elstmr_fe_level(self):
        return FERefinementGrid(name='elastomer patch',
                                fets_eval=self.elstmr_fets,
                                domain=self.fe_domain)

    #===========================================================================
    # Grid definition
    #===========================================================================

    mid_zone_specmn_fe_grid = Property(Instance(FEGrid),
                                       depends_on='+ps_levels, +input')

    @cached_property
    def _get_mid_zone_specmn_fe_grid(self):
        # only a quarter of the beam is simulated due to symmetry:
        fe_grid = FEGrid(coord_min=(0., 0., 0.),
                         coord_max=(self.sym_mid_zone_specmn_length -
                                    self.sym_elstmr_length, self.sym_width,
                                    self.thickness),
                         shape=(self.mid_zone_shape_x, self.shape_y,
                                self.shape_z),
                         level=self.mid_zone_specmn_fe_level,
                         fets_eval=self.specmn_fets)
        return fe_grid

    load_zone_specmn_fe_grid = Property(Instance(FEGrid),
                                        depends_on='+ps_levels, +input')

    @cached_property
    def _get_load_zone_specmn_fe_grid(self):
        # only a quarter of the beam is simulated due to symmetry:
        fe_grid = FEGrid(coord_min=(self.sym_mid_zone_specmn_length -
                                    self.sym_elstmr_length, 0., 0.),
                         coord_max=(self.sym_mid_zone_specmn_length +
                                    self.sym_elstmr_length, self.sym_width,
                                    self.thickness),
                         shape=(self.load_zone_shape_x, self.shape_y,
                                self.shape_z),
                         level=self.load_zone_specmn_fe_level,
                         fets_eval=self.specmn_fets)
        return fe_grid

#    if elstmr_flag:

    elstmr_fe_grid = Property(Instance(FEGrid),
                              depends_on='+ps_levels, +input')

    @cached_property
    def _get_elstmr_fe_grid(self):
        fe_grid = FEGrid(coord_min=(self.sym_mid_zone_specmn_length -
                                    self.sym_elstmr_length, 0.,
                                    self.thickness),
                         coord_max=(self.sym_mid_zone_specmn_length +
                                    self.sym_elstmr_length, self.sym_width,
                                    self.thickness + self.elstmr_thickness),
                         level=self.elstmr_fe_level,
                         shape=(self.load_zone_shape_x, self.shape_y, 1),
                         fets_eval=self.elstmr_fets)
        return fe_grid

    outer_zone_specmn_fe_grid = Property(Instance(FEGrid),
                                         depends_on='+ps_levels, +input')

    @cached_property
    def _get_outer_zone_specmn_fe_grid(self):
        # only a quarter of the plate is simulated due to symmetry:
        fe_grid = FEGrid(coord_min=(self.sym_mid_zone_specmn_length +
                                    self.sym_elstmr_length, 0., 0.),
                         coord_max=(self.sym_specmn_length, self.sym_width,
                                    self.thickness),
                         shape=(self.outer_zone_shape_x, self.shape_y,
                                self.shape_z),
                         level=self.outer_zone_specmn_fe_level,
                         fets_eval=self.specmn_fets)
        return fe_grid

    #===========================================================================
    # Boundary conditions
    #===========================================================================
    w_max = Float(-0.030, input=True)  # [m]

    w_max = Float(-0.030, input=True)  # [m]

    bc_list = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_bc_list(self):
        mid_zone_specimen = self.mid_zone_specmn_fe_grid
        load_zone_specimen = self.load_zone_specmn_fe_grid
        outer_zone_specimen = self.outer_zone_specmn_fe_grid

        if self.elstmr_flag:
            elastomer = self.elstmr_fe_grid

        #--------------------------------------------------------------
        # boundary conditions for the symmetry
        #--------------------------------------------------------------
        # symmetry in the xz-plane
        # (Note: the x-axis corresponds to the axis of symmetry along the longitudinal axis of the beam)
        #
        bc_outer_zone_symplane_xz = BCSlice(var='u',
                                            value=0.,
                                            dims=[1],
                                            slice=outer_zone_specimen[:,
                                                                      0, :, :,
                                                                      0, :])
        bc_load_zone_symplane_xz = BCSlice(var='u',
                                           value=0.,
                                           dims=[1],
                                           slice=load_zone_specimen[:, 0, :, :,
                                                                    0, :])
        bc_mid_zone_symplane_xz = BCSlice(var='u',
                                          value=0.,
                                          dims=[1],
                                          slice=mid_zone_specimen[:, 0, :, :,
                                                                  0, :])

        if self.elstmr_flag:
            bc_el_symplane_xz = BCSlice(var='u',
                                        value=0.,
                                        dims=[1],
                                        slice=elastomer[:, 0, :, :, 0, :])
        # symmetry in the yz-plane
        #
        bc_mid_zone_symplane_yz = BCSlice(var='u',
                                          value=0.,
                                          dims=[0],
                                          slice=mid_zone_specimen[0, :, :,
                                                                  0, :, :])

        #--------------------------------------------------------------
        # boundary conditions for the support
        #--------------------------------------------------------------
        bc_support_0y0 = BCSlice(var='u',
                                 value=0.,
                                 dims=[2],
                                 slice=outer_zone_specimen[-1, :, 0, -1, :, 0])

        #--------------------------------------------------------------
        # connect all grids
        #--------------------------------------------------------------
        link_loadzn_outerzn = BCDofGroup(
            var='u',
            value=0.,
            dims=[0, 1, 2],
            get_dof_method=load_zone_specimen.get_right_dofs,
            get_link_dof_method=outer_zone_specimen.get_left_dofs,
            link_coeffs=[1.])
        link_midzn_loadzn = BCDofGroup(
            var='u',
            value=0.,
            dims=[0, 1, 2],
            get_dof_method=mid_zone_specimen.get_right_dofs,
            get_link_dof_method=load_zone_specimen.get_left_dofs,
            link_coeffs=[1.])

        if self.elstmr_flag:
            link_elstmr_loadzn_z = BCDofGroup(
                var='u',
                value=0.,
                dims=[2],
                get_dof_method=elastomer.get_back_dofs,
                get_link_dof_method=load_zone_specimen.get_front_dofs,
                link_coeffs=[1.])

            # hold elastomer in a single point in order to avoid kinematic movement yielding singular K_mtx
            #
            bc_elstmr_fix = BCSlice(var='u',
                                    value=0.,
                                    dims=[0],
                                    slice=elastomer[0, 0, 0, 0, 0, 0])

        #--------------------------------------------------------------
        # loading
        #--------------------------------------------------------------
        # w_max = center displacement:
        w_max = self.w_max

        if self.elstmr_flag:
            # apply displacement at all top nodes of the elastomer (surface load)
            #
            bc_w = BCSlice(var='u',
                           value=w_max,
                           dims=[2],
                           slice=elastomer[:, :, -1, :, :, -1])
        else:
            bc_w = BCSlice(
                var='u',
                value=w_max,
                dims=[2],
                # slice is only exactly in the center of the loading zone for 'load_zone_shape_x' = 2
                # center line of the load zone
                slice=load_zone_specimen[0, :, -1, -1, :, -1])


#        f_max = 0.010 / 4. / self.sym_width
#        bc_line_f = BCSlice(var = 'f', value = f_max, dims = [2],
#                            # slice is only valid for 'load_zone_shape_x' = 2
#                            # center line of the load zone
#                            slice = load_zone_specimen[0, :, -1, -1, :, -1])

        bc_list = [
            bc_outer_zone_symplane_xz,
            bc_load_zone_symplane_xz,
            bc_mid_zone_symplane_xz,
            bc_mid_zone_symplane_yz,
            #
            link_midzn_loadzn,
            link_loadzn_outerzn,
            bc_support_0y0,
            #
            bc_w,
        ]

        if self.elstmr_flag:
            bc_list += [bc_el_symplane_xz, link_elstmr_loadzn_z, bc_elstmr_fix]

        return bc_list

    #----------------------
    # tloop
    #----------------------

    tloop = Property(depends_on='input_change')

    @cached_property
    def _get_tloop(self):

        #--------------------------------------------------------------
        # ts
        #--------------------------------------------------------------

        mid_zone_spec = self.mid_zone_specmn_fe_grid
        load_zone_spec = self.load_zone_specmn_fe_grid
        outer_zone_spec = self.outer_zone_specmn_fe_grid

        if self.elstmr_flag:
            # ELSTRMR TOP SURFACE
            # dofs at elastomer top surface (used to integrate the force)
            #
            elastomer = self.elstmr_fe_grid
            elstmr_top_dofs_z = elastomer[:, :, -1, :, :,
                                          -1].dofs[:, :, 2].flatten()
            load_dofs_z = np.unique(elstmr_top_dofs_z)
            print 'load_dofs_z', load_dofs_z
        else:
            # LINE LOAD TOP OF LOAD ZONE
            # dofs at center line of the specmn load zone (used to integrate the force)
            # note slice index in x-direction is only valid for load_zone_shape_x = 2 !
            #
            load_zone_spec_topline_dofs_z = load_zone_spec[
                0, :, -1, -1, :, -1].dofs[:, :, 2].flatten()
            load_dofs_z = np.unique(load_zone_spec_topline_dofs_z)
            print 'load_dofs_z', load_dofs_z

        # SUPPRT LINE
        # dofs at support line of the specmn (used to integrate the force)
        #
        outer_zone_spec_supprtline_dofs_z = outer_zone_spec[
            -1, :, 0, -1, :, 0].dofs[:, :, 2].flatten()
        supprt_dofs_z = np.unique(outer_zone_spec_supprtline_dofs_z)
        print 'supprt_dofs_z', supprt_dofs_z

        # CENTER DOF (used for tracing of the displacement)
        #
        center_bottom_dof = mid_zone_spec[0, 0, 0, 0, 0, 0].dofs[0, 0, 2]
        print 'center_bottom_dof', center_bottom_dof

        # THIRDPOINT DOF (used for tracing of the displacement)
        # dofs at center middle of the laod zone at the bottom side
        #
        # NOTE: slice index in x-direction is only valid for load_zone_shape_x = 2 !
        thirdpoint_bottom_dof = load_zone_spec[0, 0, 0, -1, 0, 0].dofs[0, 0, 2]
        print 'thirdpoint_bottom_dof', thirdpoint_bottom_dof

        # force-displacement-diagram (CENTER)
        #
        self.f_w_diagram_center = RTraceGraph(
            name='displacement_elasttop (center) - force',
            var_x='U_k',
            idx_x=center_bottom_dof,
            var_y='F_int',
            idx_y_arr=load_dofs_z,
            record_on='update',
            transform_x='-x * 1000',  # %g * x' % ( fabs( w_max ),),
            # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis):
            #
            transform_y='-4000. * y')

        # force-displacement-diagram_supprt (SUPPRT)
        #
        self.f_w_diagram_supprt = RTraceGraph(
            name='displacement_supprtline (center) - force',
            var_x='U_k',
            idx_x=center_bottom_dof,
            var_y='F_int',
            idx_y_arr=supprt_dofs_z,
            record_on='update',
            transform_x='-x * 1000',  # %g * x' % ( fabs( w_max ),),
            # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis):
            #
            transform_y='4000. * y')

        # force-displacement-diagram (THIRDPOINT)
        #
        self.f_w_diagram_thirdpoint = RTraceGraph(
            name='displacement_elasttop (thirdpoint) - force',
            var_x='U_k',
            idx_x=thirdpoint_bottom_dof,
            var_y='F_int',
            idx_y_arr=load_dofs_z,
            record_on='update',
            transform_x='-x * 1000',  # %g * x' % ( fabs( w_max ),),
            # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis):
            #
            transform_y='-4000. * y')

        ts = TS(
            sdomain=self.fe_domain,
            bcond_list=self.bc_list,
            rtrace_list=[
                self.f_w_diagram_center,
                self.f_w_diagram_thirdpoint,
                self.f_w_diagram_supprt,
                RTraceDomainListField(name='Displacement',
                                      var='u',
                                      idx=0,
                                      warp=True),
                #                             RTraceDomainListField(name = 'Stress' ,
                #                                            var = 'sig_app', idx = 0, warp = True,
                #                                            record_on = 'update'),
                #                             RTraceDomainListField(name = 'Strain' ,
                #                                        var = 'eps_app', idx = 0, warp = True,
                #                                        record_on = 'update'),
                #                             RTraceDomainListField(name = 'Damage' ,
                #                                        var = 'omega_mtx', idx = 0, warp = True,
                #                                        record_on = 'update'),
                RTraceDomainListField(name='max_omega_i',
                                      warp=True,
                                      var='max_omega_i',
                                      idx=0,
                                      record_on='update'),
                #                             RTraceDomainListField(name = 'IStress' ,
                #                                            position = 'int_pnts',
                #                                            var = 'sig_app', idx = 0,
                #                                            record_on = 'update'),
                #                             RTraceDomainListField(name = 'IStrain' ,
                #                                            position = 'int_pnts',
                #                                            var = 'eps_app', idx = 0,
                #                                            record_on = 'update'),
            ])

        # Add the time-loop control
        tloop = TLoop(tstepper=ts,
                      KMAX=50,
                      tolerance=self.tolerance,
                      RESETMAX=0,
                      tline=TLine(min=0.0, step=self.tstep, max=self.tmax),
                      ord=self.ord)

        return tloop

    def peval(self):
        '''
        Evaluate the model and return the array of results specified
        in the method get_sim_outputs.
        '''
        U = self.tloop.eval()

        self.f_w_diagram_center.refresh()
        F_max = max(self.f_w_diagram_center.trace.ydata)

        u_center_top_z = U[self.center_top_dofs][0, 0, 2]
        return array([u_center_top_z, F_max], dtype='float_')

    def get_sim_outputs(self):
        '''
        Specifies the results and their order returned by the model
        evaluation.
        '''
        return [
            SimOut(name='u_center_top_z', unit='m'),
            SimOut(name='F_max', unit='kN')
        ]
예제 #13
0
class SPIRRIDModelView(ModelView):
    '''
    Size effect depending on the yarn length
    '''
    model = Instance(SPIRRID)
    def _model_changed(self):
        self.model.rf = self.rf

    rf_values = List(IRF)
    def _rf_values_default(self):
        return [ Filament() ]

    rf = Enum(values = 'rf_values')
    def _rf_default(self):
        return self.rf_values[0]
    def _rf_changed(self):
        # reset the rf in the spirrid model and in the rf_modelview
        self.model.rf = self.rf
        self.rf_model_view = RFModelView(model = self.rf)
        # actually, the view should be reusable but the model switch
        # did not work for whatever reason
        # the binding of the view generated by edit_traits does not 
        # react to the change in the 'model' attribute.
        #
        # Remember - we are implementing a handler here
        # that has an associated view.
        #
        # self.rf.model_view.model = self.rf

    rf_model_view = Instance(RFModelView)
    def _rf_model_view_default(self):
        return RFModelView(model = self.rf)


    rv_list = Property(List(RIDVariable), depends_on = 'rf')
    @cached_property
    def _get_rv_list(self):
        return [ RIDVariable(spirrid = self.model, rf = self.rf,
                              varname = nm, trait_value = st)
                 for nm, st in zip(self.rf.param_keys, self.rf.param_values) ]

    selected_var = Instance(RIDVariable)
    def _selected_var_default(self):
        return self.rv_list[0]

    run = Button(desc = 'Run the computation')
    def _run_fired(self):
        self._redraw()

    sample = Button(desc = 'Show samples')
    def _sample_fired(self):
        n_samples = 50

        self.model.set(
                    min_eps = 0.00, max_eps = self.max_eps, n_eps = self.n_eps,
                    )

        # get the parameter combinations for plotting
        rvs_theta_arr = self.model.get_rvs_theta_arr(n_samples)

        eps_arr = self.model.eps_arr

        figure = self.figure
        axes = figure.gca()

        for theta_arr in rvs_theta_arr.T:
            q_arr = self.rf(eps_arr, *theta_arr)
            axes.plot(eps_arr, q_arr, color = 'grey')

        self.data_changed = True

    run_legend = Str('',
                     desc = 'Legend to be added to the plot of the results')

    clear = Button
    def _clear_fired(self):
        axes = self.figure.axes[0]
        axes.clear()
        self.data_changed = True

    min_eps = Float(0.0,
                     desc = 'minimum value of the control variable')

    max_eps = Float(1.0,
                     desc = 'maximum value of the control variable')

    n_eps = Int(100,
                 desc = 'resolution of the control variable')

    label_eps = Str('epsilon',
                    desc = 'label of the horizontal axis')

    label_sig = Str('sigma',
                    desc = 'label of the vertical axis')

    figure = Instance(Figure)
    def _figure_default(self):
        figure = Figure(facecolor = 'white')
        #figure.add_axes( [0.08, 0.13, 0.85, 0.74] )
        return figure

    data_changed = Event(True)

    def _redraw(self):

        figure = self.figure
        axes = figure.gca()

        self.model.set(
                    min_eps = 0.00, max_eps = self.max_eps, n_eps = self.n_eps,
                )

        mc = self.model.mean_curve

        axes.plot(mc.xdata, mc.ydata,
                   linewidth = 2, label = self.run_legend)

        axes.set_xlabel(self.label_eps)
        axes.set_ylabel(self.label_sig)
        axes.legend(loc = 'best')

        self.data_changed = True

    traits_view_tabbed = View(
                              VGroup(
                              HGroup(
                                    Item('run_legend', resizable = False, label = 'Run label',
                                          width = 80, springy = False),
                                    Item('run', show_label = False, resizable = False),
                                    Item('sample', show_label = False, resizable = False),
                                    Item('clear', show_label = False,
                                      resizable = False, springy = False)
                                ),
                               Tabbed(
                               VGroup(
                                     Item('rf', show_label = False),
                                     Item('rf_model_view@', show_label = False, resizable = True),
                                     label = 'Deterministic model',
                                     id = 'spirrid.tview.model',
                                     ),
                                Group(
                                    Item('rv_list', editor = rv_list_editor, show_label = False),
                                    id = 'spirrid.tview.randomization.rv',
                                    label = 'Model variables',
                                ),
                                Group(
                                    Item('selected_var@', show_label = False, resizable = True),
                                    id = 'spirrid.tview.randomization.distr',
                                    label = 'Distribution',
                                ),
                                VGroup(
                                       Item('model.cached_dG' , label = 'Cached weight factors',
                                             resizable = False,
                                             springy = False),
                                       Item('model.compiled_QdG_loop' , label = 'Compiled loop over the integration product',
                                             springy = False),
                                       Item('model.compiled_eps_loop' ,
                                             enabled_when = 'model.compiled_QdG_loop',
                                             label = 'Compiled loop over the control variable',
                                             springy = False),
                                        scrollable = True,
                                       label = 'Execution configuration',
                                       id = 'spirrid.tview.exec_params',
                                       dock = 'tab',
                                     ),
                                VGroup(
                                          HGroup(
                                                 Item('min_eps' , label = 'Min',
                                                       springy = False, resizable = False),
                                                 Item('max_eps' , label = 'Max',
                                                       springy = False, resizable = False),
                                                 Item('n_eps' , label = 'N',
                                                       springy = False, resizable = False),
                                                 label = 'Simulation range',
                                                 show_border = True
                                                 ),
                                          VGroup(
                                                 Item('label_eps' , label = 'x', resizable = False,
                                                 springy = False),
                                                 Item('label_sig' , label = 'y', resizable = False,
                                                 springy = False),
                                                 label = 'Axes labels',
                                                 show_border = True,
                                                 scrollable = True,
                                                 ),
                                           label = 'Execution control',
                                           id = 'spirrid.tview.view_params',
                                           dock = 'tab',
                                 ),
                                VGroup(
                                        Item('figure', editor = MPLFigureEditor(),
                                             resizable = True, show_label = False),
                                        label = 'Plot sheet',
                                        id = 'spirrid.tview.figure_window',
                                        dock = 'tab',
                                        scrollable = True,
                                ),
                                scrollable = True,
                                id = 'spirrid.tview.tabs',
                                dock = 'tab',
                        ),
                        ),
                        title = 'SPIRRID',
                        id = 'spirrid.viewmodel',
                        dock = 'tab',
                        resizable = True,
                        height = 1.0, width = 1.0
                        )
예제 #14
0
class MRquarterDB(MRquarter):
    '''get 'phi_fn' as calibrated by the fitter and stored in the DB
    '''

    # vary the failure strain in PhiFnGeneralExtended:
    factor_eps_fail = Float(1.4, input=True, ps_levels=(1.0, 1.2, 3))

    #-----------------
    # composite cross section unit cell:
    #-----------------
    #
    ccs_unit_cell_key = Enum('FIL-10-09_2D-05-11_0.00462_all0',
                             CCSUnitCell.db.keys(),
                             simdb=True,
                             input=True,
                             auto_set=False,
                             enter_set=True)

    ccs_unit_cell_ref = Property(Instance(SimDBClass),
                                 depends_on='ccs_unit_cell_key')

    @cached_property
    def _get_ccs_unit_cell_ref(self):
        return CCSUnitCell.db[self.ccs_unit_cell_key]

    #-----------------
    # damage function:
    #-----------------
    #
    material_model = Str(input=True)

    def _material_model_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].material_model

    calibration_test = Str(input=True)

    def _calibration_test_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].calibration_test

    damage_function = Property(Instance(MFnLineArray), depends_on='+input')

    @cached_property
    def _get_damage_function(self):
        print 'getting damage function'
        return self.ccs_unit_cell_ref.get_param(self.material_model,
                                                self.calibration_test)

    #-----------------
    # phi function extended:
    #-----------------
    #
    phi_fn = Property(Instance(PhiFnGeneralExtended),
                      depends_on='+input,+ps_levels')

    @cached_property
    def _get_phi_fn(self):
        return PhiFnGeneralExtendedExp(mfn=self.damage_function,
                                       Dfp=0.01,
                                       Efp_frac=0.007)


#        return PhiFnGeneralExtended( mfn = self.damage_function,
#                                     factor_eps_fail = self.factor_eps_fail )

#----------------------------------------------------------------------------------
# mats_eval
#----------------------------------------------------------------------------------

# age of the plate at the time of testing
# NOTE: that the same phi-function is used independent of age. This assumes a
# an afine/proportional damage evolution for different ages.
#

    age = Int(
        28,  # input = True
    )

    # composite E-modulus
    #
    E_c = Property(Float, depends_on='+input')

    @cached_property
    def _get_E_c(self):
        return self.ccs_unit_cell_ref.get_E_c_time(self.age)

    # Poisson's ratio
    #
    nu = Property(Float, depends_on='+input')

    @cached_property
    def _get_nu(self):
        return self.ccs_unit_cell_ref.nu
예제 #15
0
class HPShell(HasTraits):
    '''Geometry definition.
    '''

    #===========================================================================
    # geometric variables and values
    #===========================================================================

    # part of mushroof
    #
    # @todo: "four" is not supported by "n_elems_xy_dict"in mushroff_model
    mushroof_part = Enum('detail', 'quarter', 'one', 'four')

    # origin
    # @todo: define as "_default"
    #
    X0 = Array(float, value=[0., 0., 0.])

    # element properties of grid
    #
    n_elems_xy = Int(6)
    n_elems_z = Int(3)

    n_elems_xy_quarter = Property(Int)

    @cached_property
    def _get_n_elems_xy_quarter(self):
        return self.n_elems_xy / self.scale_size

    # standard array for column shift
    # shift array shifts the elements towards the defined coordinates
    # [x_shift,y_shift,element_between]
    # array needs to have the right shape (:,3)!!
    #
    # @todo: define as "_default"
    #
    shift_array = Array(float,
                        shape=(None, 3),
                        value=[[0.45 / 2**0.5, 0.45 / 2**0.5, 1]])

    # dimensions of the shell for one quarter of mush_roof
    # @todo: add "_quarter"
    #
    length_x = Float(4.0)  # [m]
    length_y = Float(4.0)  # [m]
    length_z = Float(1.062)  # [m]
    t_shell = Float(0.06)  # [m]
    width_column = Float(0.45)  # [m]

    length_x_detail = Float(1.5)  # [m]
    length_y_detail = Float(1.5)  # [m]

    scale_size_detail_x = Property(Float)

    def _get_scale_size_detail_x(self):
        return self.length_x_detail / self.length_x * 2.

    scale_size_detail_y = Property(Float)

    def _get_scale_size_detail_y(self):
        return self.length_y_detail / self.length_y * 2.

    # scale factor for different mushroof parts
    # Defines the proportion between the lenght of the model
    # with respect to the length of a quarter shell as the
    # basic substructure of which the model consists of.
    # @todo: add "depend_on" or remove "cached_property"
    # @todo: move to class definition of "mushroof_model" not in "HPShell"
    #        (also see there "n_elems_dict" with implicit "scale_factor")
    #
    scale_size = Property(Float)

    #    @cached_property
    def _get_scale_size(self):
        #        scale_size_detail = self.lenght_x_detail / self.length_x
        scale_dict = {
            'detail': self.scale_size_detail_x,
            'quarter': 1.0,
            'one': 2.0,
            'four': 4.0
        }
        return scale_dict[self.mushroof_part]

    # factor to scale delta_h (inclination of the shell)
    # The thickness remains unchanged by this scaling, e.g. self.thickness = 0.06 [m]
    #
    delta_h_scalefactor = Float(1.00)  # [-]

    # shift of column elements
    #
    shift_elems = Bool(True)

    # const_edge element operator
    # (for non-linear analysis only, where an element layer of constant
    # thickness is needed to simulate the reinforced behaviour of the
    # concrete.
    #
    const_edge_elem = Bool(False)
    t_edge = Float(0.03)  # [m]
    n_elems_edge = Int(1)  #number of dofs used for edge refinement

    #===========================================================================
    # reading options
    #===========================================================================

    # "lowerface_cut_off" - option replaces constant height for the coordinates
    # which connect to the column (this cuts of the shell geometry horizontally
    # at the bottom of the lower face of the shell geometry.
    # Option should be used for the robj-file with 4x4m geometry
    #
    cut_off_lowerface = Bool(True)

    # corresponds to the delta in the geometry .obj-file with name '4x4m' as a cut off
    #
    delta_h = Float(1.00)  # [m]

    # choose geometric file (obj-data file)
    #
    geo_input_name = Enum('4x4m', '02')

    # filter for '4x4m' file needs to be done to have regular grid
    # in order to rbf-function leading to stable solution without oscilation
    #
    geo_filter = Dict({'4x4m': delete_second_rows})

    def _read_arr(self, side='lowerface_'):
        file_name = side + self.geo_input_name + '.robj'
        file_path = join('geometry_files', file_name)
        v_arr = read_rsurface(file_path)
        filter = self.geo_filter.get(self.geo_input_name, None)
        if filter != None:
            v_arr = filter(v_arr)
        return v_arr

    # array of the vertex positions in global
    # x,y,z-coordinates defining the lower surface of the shell
    vl_arr = Property(Array(float))

    @cached_property
    def _get_vl_arr(self):
        vl_arr = self._read_arr('lowerface_')
        if self.cut_off_lowerface == True:
            print '--- lower face z-coords cut off ---'

            # z-values of the coords from the lower face are cut off.
            # From the highest z-coordinate of the lower face the vertical
            # distance is 1 m (=delta h). At this limit the lower face is
            # cut off. Global z coordinate is assumed to point up.
            #
            vl_z_max = max(vl_arr[:, 2])
            vl_z_min = vl_z_max - self.delta_h
            vl_arr_z = where(vl_arr[:, 2] < vl_z_min, vl_z_min, vl_arr[:, 2])
            vl_arr = c_[vl_arr[:, 0:2], vl_arr_z]
        return vl_arr

    # array of the vertex positions in global
    # x,y,z-coordinates defining the upper surface of the shell
    vu_arr = Property(Array(float))

    @cached_property
    def _get_vu_arr(self):
        return self._read_arr('upperface_')

    #------------------------------------------------------------------------------
    # hp_shell geometric transformation
    #------------------------------------------------------------------------------

    def __call__(self, points):
        '''Return the global coordinates of the supplied local points.
        '''

        # number of local grid points for each coordinate direction
        # values must range between 0 and 1
        #
        xi, yi, zi = points[:, 0], points[:, 1], points[:, 2]
        print "xi", xi
        print "xi.shape", xi.shape

        # size of total structure
        #
        # @todo: move to class definition of "mushroof_model" and send to "__call__"
        scale_size = self.scale_size
        print "scale_size", scale_size

        # @todo: add "_quarter" (see above)
        length_x_tot = self.length_x * scale_size
        length_y_tot = self.length_y * scale_size
        n_elems_xy_quarter = self.n_elems_xy_quarter

        # distance from origin for each mushroof_part
        #
        def d_origin_fn(self, coords):
            #            if self.mushroof_part == 'quarter':
            #                return coords
            #            if self.mushroof_part == 'one':
            #                return abs( 2.0 * coords - 1.0 )

            if self.mushroof_part == 'detail':
                print 'in d_origin_fn'
                return abs(1.0 * coords - 0.5) * scale_size


#            # @todo: corresponding "scale_factor" needs to be added
#            #        in order for this to work
#            if self.mushroof_part == 'four':
#                return  where( coords < 0.5, abs( 4 * coords - 1 ), abs( -4 * coords + 3 ) )

# values are used to calculate the z-coordinate using RBF-function of the quarter
# (= values of the distance to the origin as absolute value)
#

        xi_rbf = d_origin_fn(self, xi)
        print 'xi_rbf', xi_rbf
        yi_rbf = d_origin_fn(self, yi)

        # normalized coordinates of the vertices for lower- and upperface
        # NOTE: the underline character indicates a normalized value
        #
        vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr)

        # use a radial basis function approximation (rbf) (i.e. interpolation of
        # scattered data) based on the normalized vertex points of the lower face
        #
        x_ = vl_arr_[:, 0]

        # flip the orientation of the local coordinate system in the
        # corresponding y-direction depending on the data file
        #
        geo_input_name = self.geo_input_name
        if geo_input_name == '4x4m':
            y_ = vl_arr_[:, 1]
        else:
            y_ = 1 - vl_arr_[:, 1]

        z_ = vl_arr_[:, 2]
        rbf = Rbf(x_, y_, z_, function='cubic')

        # get the z-value at the supplied local grid points
        # of the lower face
        #
        zi_lower_ = rbf(xi_rbf, yi_rbf)

        # use a radial basis function approximation (rbf) (i.e. interpolation of
        # scattered data) based on the normalized vertex points of the upper face
        #
        x_ = vu_arr_[:, 0]

        # flip the orientation of the local coordinate system in the
        # corresponding y-direction depending on the data file
        #
        geo_input_name = self.geo_input_name
        if geo_input_name == '4x4m':
            y_ = vu_arr_[:, 1]
        else:
            y_ = 1 - vu_arr_[:, 1]

        z_ = vu_arr_[:, 2]
        rbf = Rbf(x_, y_, z_, function='cubic')

        # get the z-value at the supplied local grid points
        # of the upper face
        #
        # note that zi_upper_ is a normalized coordinate!
        #
        zi_upper_ = rbf(xi_rbf, yi_rbf)

        # thickness is multiplied by the supplied zi coordinate
        #
        z_ = (zi_lower_ + (zi_upper_ - zi_lower_) * zi /
              self.delta_h_scalefactor) * self.delta_h_scalefactor

        # coordinates of origin
        #
        X, Y, Z = self.X0

        print '--- geometric transformation done ---'

        # multiply the local grid points with the real dimensions in order to obtain the
        # global coordinates of the mushroof_part:
        #
        return c_[X + xi * length_x_tot, Y + yi * length_y_tot,
                  Z + z_ * self.length_z]
예제 #16
0
class RandomField(HasStrictTraits):
    '''Class for generating a 1D random field by scaling a standardized
    normally distributed random field. The random field array is stored
    in the property random_field. Gaussian or Weibull local distributions
    are available.
    The parameters of the Weibull random field are related to the minimum
    extreme along the whole length of the field.
    '''

    # Parameters to be set
    lacor = Float(1., auto_set=False, enter_set=True,
                   desc='autocorrelation length', modified=True)
    nsim = Int(1, auto_set=False, enter_set=True,
                desc='No of fields to be simulated', modified=True)
    mean = Float(0, auto_set=False, enter_set=True,
                  desc='mean value', modified=True)
    stdev = Float(1., auto_set=False, enter_set=True,
                   desc='standard deviation', modified=True)
    shape = Float(10., auto_set=False, enter_set=True,
                  desc='shape for Weibull distr', modified=True)
    scale = Float(5., auto_set=False, enter_set=True,
                   desc='scale for Weibull distr. corresp. to a length < lacor', modified=True)
    loc = Float(auto_set=False, enter_set=True,
                   desc='location for 3 params weibull', modified=True)
    length = Float(1000., auto_set=False, enter_set=True,
                   desc='length of the random field', modified=True)
    nx = Int(200, auto_set=False, enter_set=True,
             desc='number of discretization points', modified=True)
    non_negative_check = False
    reevaluate = Event
    seed = Bool(False)
    distr_type = Enum('Weibull', 'Gauss', modified=True)

    xgrid = Property(Array, depends_on='length,nx')
    @cached_property
    def _get_xgrid(self):
        '''get the discretized grid for the random field'''
        return np.linspace(0, self.length, self.nx)

    gridpoint_scale = Property(depends_on='scale,shape,length,nx,lacor')
    @cached_property
    def _get_gridpoint_scale(self):
        '''Scaling of the defined distribution to the distribution of a single
        grid point. This option is only available for Weibull random field'''
        delta_x = self.xgrid[1] - self.xgrid[0]
        return self.scale * (self.lacor / (delta_x + self.lacor)) ** (-1. / self.shape)

    def acor(self, dx, lcorr):
        '''autocorrelation function'''
        return e ** (-(dx / lcorr) ** 2)

    eigenvalues = Property(depends_on='lacor,length,nx')
    @cached_property
    def _get_eigenvalues(self):
        '''evaluates the eigenvalues and eigenvectors of the autocorrelation matrix'''
        # creating a symm. toeplitz matrix with (xgrid, xgrid) data points
        Rdist = toeplitz(self.xgrid, self.xgrid)
        # apply the autocorrelation func. to get the correlation matrix
        R = self.acor(Rdist, self.lacor)
        # evaluate the eigenvalues and eigenvectors of the autocorrelation matrix
        print 'evaluating eigenvalues for random field...'
        eigenvalues = eig(R)
        print 'complete'
        return eigenvalues

    random_field = Property(Array, depends_on='+modified, reevaluate')
    @cached_property
    def _get_random_field(self):
        if self.seed == True:
            np.random.seed(101)
        '''simulates the Gaussian random field'''
        # evaluate the eigenvalues and eigenvectors of the autocorrelation matrix
        _lambda, phi = self.eigenvalues
        # simulation points from 0 to 1 with an equidistant step for the LHS
        randsim = linspace(0, 1, len(self.xgrid) + 1) - 0.5 / (len(self.xgrid))
        randsim = randsim[1:]
        # shuffling points for the simulation
        shuffle(randsim)
        # matrix containing standard Gauss distributed random numbers
        xi = transpose(ones((self.nsim, len(self.xgrid))) * array([ norm().ppf(randsim) ]))
        # eigenvalue matrix
        LAMBDA = eye(len(self.xgrid)) * _lambda
        # cutting out the real part
        ydata = dot(dot(phi, (LAMBDA) ** 0.5), xi).real
        if self.distr_type == 'Gauss':
            # scaling the std. distribution
            scaled_ydata = ydata * self.stdev + self.mean
        elif self.distr_type == 'Weibull':
            # setting Weibull params
            Pf = norm().cdf(ydata)
            scaled_ydata = weibull_min(self.shape, scale=self.scale, loc=self.loc).ppf(Pf)
        self.reevaluate = False
        rf = reshape(scaled_ydata, len(self.xgrid))
        if self.non_negative_check == True:
            if (rf < 0).any():
                raise ValueError, 'negative value(s) in random field'
        return rf

    view_traits = View(Item('lacor'),
                       Item('nsim'),
                       Item('shape'),
                       Item('scale'),
                       Item('length'),
                       Item('nx'),
                       Item('distr_type'),
                       )
예제 #17
0
class RandomField(HasTraits):
    '''
    This class implements a 3D random field on a regular grid
    and allows for interpolation using the EOLE method
    '''
    lacor_arr = Array(Float, modified=True) #(nD,1) array of autocorrelation lengths
    nDgrid = List(Array, modified=True) # list of nD entries: each entry is an array of points in the part. dimension
    reevaluate = Event
    seed = Bool(False)
    distr_type = Enum('Gauss', 'Weibull', modified=True)
    stdev = Float(1.0, modified=True)
    mean = Float(0.0, modified=True)
    shape = Float(5.0, modified=True)
    scale = Float(1.0, modified=True)
    loc = Float(0.0, modified=True)
    
    def acor(self, dx, lacor):
        '''autocorrelation function'''
        C = e ** (-(dx / lacor) ** 2)
        return C

    eigenvalues = Property(depends_on='+modified')
    @cached_property
    def _get_eigenvalues(self):
        '''evaluates the eigenvalues and eigenvectors of the covariance matrix'''
        # creating distances from the first coordinate
        for i, grid_i in enumerate(self.nDgrid):
            self.nDgrid[i] -= grid_i[0]
        # creating a symm. toeplitz matrix with (xgrid, xgrid) data points
        coords_lst = [toeplitz(grid_i) for grid_i in self.nDgrid]
        # apply the autocorrelation func. on the coord matrices to obtain the covariance matrices
        C_matrices = [self.acor(coords_i, self.lacor_arr[i]) for i, coords_i in enumerate(coords_lst)]
        # evaluate the eigenvalues and eigenvectors of the autocorrelation matrices
        eigen_lst = []
        for i, C_i in enumerate(C_matrices):
            print 'evaluating eigenvalues for dimension ' + str(i+1)
            lambda_i, Phi_i = eigh(C_i)
            # truncate the eigenvalues at 99% of tr(C)
            truncation_limit = 0.99 * np.trace(C_i)
            argsort = np.argsort(lambda_i)
            cum_sum_lambda = np.cumsum(np.sort(lambda_i)[::-1])
            idx_trunc = int(np.sum(cum_sum_lambda < truncation_limit))
            eigen_lst.append([lambda_i[argsort[::-1]][:idx_trunc], Phi_i[:, argsort[::-1]][:,:idx_trunc]])
        print 'complete'
        Lambda_C = 1.0
        Phi_C = 1.0
        for lambda_i, Phi_i in eigen_lst:
            Lambda_i = np.diag(lambda_i)
            Lambda_C = np.kron(Lambda_C, Lambda_i)
            Phi_C = np.kron(Phi_C, Phi_i)
        
        return Lambda_C, Phi_C
    
    generated_random_vector = Property(Array, depends_on='reevaluate')
    @cached_property
    def _get_generated_random_vector(self):
        if self.seed == True:
            np.random.seed(141)
        # points between 0 to 1 with an equidistant step for the LHS
        # No. of points = No. of truncated eigenvalues
        npts = self.eigenvalues[0].shape[0]
        randsim = np.linspace(0.5/npts, 1 - 0.5/npts, npts)
        # shuffling points for the simulation
        np.random.shuffle(randsim)
        # matrix containing standard Gauss distributed random numbers
        xi = norm().ppf(randsim)
        return xi
    
    random_field = Property(Array, depends_on='+modified')
    @cached_property
    def _get_random_field(self):
        '''simulates the Gaussian random field'''
        # evaluate the eigenvalues and eigenvectors of the autocorrelation matrix
        Lambda_C_sorted, Phi_C_sorted = self.eigenvalues
        # generate the RF with standardized Gaussian distribution
        ydata = np.dot(np.dot(Phi_C_sorted, (Lambda_C_sorted) ** 0.5), self.generated_random_vector)
        # transform the standardized Gaussian distribution
        if self.distr_type == 'Gauss':
            # scaling the std. distribution
            scaled_ydata = ydata * self.stdev + self.mean
        elif self.distr_type == 'Weibull':
            # setting Weibull params
            Pf = norm().cdf(ydata)
            scaled_ydata = weibull_min(self.shape, scale=self.scale, loc=self.loc).ppf(Pf)
        shape = tuple([len(grid_i) for grid_i in self.nDgrid])
        rf = np.reshape(scaled_ydata, shape)
        return rf
    
    def interpolate_rf(self, coords):
        '''interpolate RF values using the EOLE method
        coords = list of 1d arrays of coordinates'''
        # check consistency of dimensions
        if len(coords) != len(self.nDgrid):
            raise ValueError('point dimension differs from random field dimension')
        # create the covariance matrix
        C_matrices = [self.acor(coords_i.reshape(1, len(coords_i)) - self.nDgrid[i].reshape(len(self.nDgrid[i]),1), self.lacor_arr[i]) for i, coords_i in enumerate(coords)]
        
        C_u = 1.0
        for i, C_ui in enumerate(C_matrices):
            if i == 0:
                C_u *= C_ui
            else:
                C_u = C_u.reshape(C_u.shape[0], 1, C_u.shape[1]) * C_ui
            grid_size = 1.0
            for j in np.arange(i+1):
                grid_size *= len(self.nDgrid[j])
            C_u = C_u.reshape(grid_size,len(coords[0]))

        Lambda_Cx, Phi_Cx = self.eigenvalues
        # values interpolated in the standardized Gaussian rf 
        u = np.sum(self.generated_random_vector / np.diag(Lambda_Cx) ** 0.5 * np.dot(C_u.T, Phi_Cx), axis=1)
        if self.distr_type == 'Gauss':
            scaled_u = u * self.stdev + self.mean
        elif self.distr_type == 'Weibull':
            Pf = norm().cdf(u)
            scaled_u = weibull_min(self.shape, scale=self.scale, loc=self.loc).ppf(Pf)
        return scaled_u
예제 #18
0
class ImageProcessing(HasTraits):
    def __init__(self, **kw):
        super(ImageProcessing, self).__init__(**kw)
        self.on_trait_change(self.refresh, '+params')
        self.refresh()

    image_path = Str

    def rgb2gray(self, rgb):
        return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])

    filter = Bool(False, params=True)
    block_size = Range(1, 100, params=True)
    offset = Range(1, 20, params=True)
    denoise = Bool(False, params=True)
    denoise_spatial = Range(1, 100, params=True)

    processed_image = Property

    def _get_processed_image(self):
        # read image
        image = mpimg.imread(self.image_path)
        mask = image[:, :, 1] > 150.
        image[mask] = 255.
        #plt.imshow(image)
        #plt.show()
        # convert to grayscale
        image = self.rgb2gray(image)
        # crop image
        image = image[100:1000, 200:1100]
        mask = mask[100:1000, 200:1100]
        image = image - np.min(image)
        image[mask] *= 255. / np.max(image[mask])
        if self.filter == True:
            image = denoise_bilateral(image,
                                      sigma_spatial=self.denoise_spatial)
        if self.denoise == True:
            image = threshold_adaptive(image,
                                       self.block_size,
                                       offset=self.offset)
        return image, mask

    edge_detection_method = Enum('canny', 'sobel', 'roberts', params=True)
    canny_sigma = Range(2.832, 5, params=True)
    canny_low = Range(5.92, 100, params=True)
    canny_high = Range(0.1, 100, params=True)

    edges = Property

    def _get_edges(self):
        img_edg, mask = self.processed_image
        if self.edge_detection_method == 'canny':
            img_edg = canny(img_edg,
                            sigma=self.canny_sigma,
                            low_threshold=self.canny_low,
                            high_threshold=self.canny_high)
        elif self.edge_detection_method == 'roberts':
            img_edg = roberts(img_edg)
        elif self.edge_detection_method == 'sobel':
            img_edg = sobel(img_edg)
        img_edg = img_edg > 0.0
        return img_edg

    radii = Int(80, params=True)
    radius_low = Int(40, params=True)
    radius_high = Int(120, params=True)
    step = Int(2, params=True)

    hough_circles = Property

    def _get_hough_circles(self):
        hough_radii = np.arange(self.radius_low, self.radius_high,
                                self.step)[::-1]
        hough_res = hough_circle(self.edges, hough_radii)
        centers = []
        accums = []
        radii = []  # For each radius, extract num_peaks circles
        num_peaks = 3
        for radius, h in zip(hough_radii, hough_res):
            peaks = peak_local_max(h, num_peaks=num_peaks)
            centers.extend(peaks)
            print 'circle centers = ', peaks
            accums.extend(h[peaks[:, 0], peaks[:, 1]])
            radii.extend([radius] * num_peaks)

        im = mpimg.imread(self.image_path)
        # crop image
        im = im[100:1000, 200:1100]
        for idx in np.arange(len(centers)):
            center_x, center_y = centers[idx]
            radius = radii[idx]
            cx, cy = circle_perimeter(center_y, center_x, radius)
            mask = (cx < im.shape[0]) * (cy < im.shape[1])
            im[cy[mask], cx[mask]] = (220., 20., 20.)
        return im

    eval_edges = Button

    def _eval_edges_fired(self):
        edges = self.figure_edges
        edges.clear()
        axes_edges = edges.gca()
        axes_edges.imshow(self.edges, plt.gray())
        self.data_changed = True

    eval_circles = Button

    def _eval_circles_fired(self):
        circles = self.figure_circles
        circles.clear()
        axes_circles = circles.gca()
        axes_circles.imshow(self.hough_circles, plt.gray())
        self.data_changed = True

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        return figure

    figure_edges = Instance(Figure)

    def _figure_edges_default(self):
        figure = Figure(facecolor='white')
        return figure

    figure_circles = Instance(Figure)

    def _figure_circles_default(self):
        figure = Figure(facecolor='white')
        return figure

    data_changed = Event

    def plot(self, fig, fig2):
        figure = fig
        figure.clear()
        axes = figure.gca()
        img, mask = self.processed_image
        axes.imshow(img, plt.gray())

    def refresh(self):
        self.plot(self.figure, self.figure_edges)
        self.data_changed = True

    traits_view = View(HGroup(
        Group(Item('filter', label='filter'),
              Item('block_size'),
              Item('offset'),
              Item('denoise', label='denoise'),
              Item('denoise_spatial'),
              label='Filters'),
        Group(
            Item('figure',
                 editor=MPLFigureEditor(),
                 show_label=False,
                 resizable=True),
            scrollable=True,
            label='Plot',
        ),
    ),
                       Tabbed(
                           VGroup(Item('edge_detection_method'),
                                  Item('canny_sigma'),
                                  Item('canny_low'),
                                  Item('canny_high'),
                                  Item('eval_edges', label='Evaluate'),
                                  Item('figure_edges',
                                       editor=MPLFigureEditor(),
                                       show_label=False,
                                       resizable=True),
                                  scrollable=True,
                                  label='Plot_edges'), ),
                       Tabbed(
                           VGroup(Item('radii'),
                                  Item('radius_low'),
                                  Item('radius_high'),
                                  Item('step'),
                                  Item('eval_circles'),
                                  Item('figure_circles',
                                       editor=MPLFigureEditor(),
                                       show_label=False,
                                       resizable=True),
                                  scrollable=True,
                                  label='Plot_circles'), ),
                       id='imview',
                       dock='tab',
                       title='Image processing',
                       scrollable=True,
                       resizable=True,
                       width=600,
                       height=400)
예제 #19
0
class LS(HasTraits):
    '''Limit state class
    '''

    # backward link to the info shell to access the
    # input data when calculating
    # the limit-state-specific values
    #
    ls_table = WeakRef

    # parameters of the limit state
    #
    dir = Enum(DIRLIST)
    stress_res = Enum(SRLIST)

    #-------------------------------
    # ls columns
    #-------------------------------
    # defined in the subclasses
    #
    ls_columns = List
    show_ls_columns = Bool(True)

    #-------------------------------
    # sr columns
    #-------------------------------

    # stress resultant columns - for ULS this is defined in the subclasses
    #
    sr_columns = List(['m', 'n'])
    show_sr_columns = Bool(True)

    # stress resultant columns - generated from the parameter combination
    # dir and stress_res - one of MX, NX, MY, NY
    #
    m_varname = Property(Str)

    def _get_m_varname(self):
        # e.g. mx_N
        appendix = self.dir + '_' + self.stress_res
        return 'm' + appendix

    n_varname = Property(Str)

    def _get_n_varname(self):
        # e.g. nx_N
        appendix = self.dir + '_' + self.stress_res
        return 'n' + appendix

    n = Property(Float)

    def _get_n(self):
        return getattr(self.ls_table, self.n_varname)

    m = Property(Float)

    def _get_m(self):
        return getattr(self.ls_table, self.m_varname)

    #-------------------------------
    # geo columns form info shell
    #-------------------------------

    geo_columns = List(['elem_no', 'X', 'Y', 'Z', 'D_elem'])
    show_geo_columns = Bool(True)

    elem_no = Property(Float)

    def _get_elem_no(self):
        return self.ls_table.elem_no

    X = Property(Float)

    def _get_X(self):
        return self.ls_table.X

    Y = Property(Float)

    def _get_Y(self):
        return self.ls_table.Y

    Z = Property(Float)

    def _get_Z(self):
        return self.ls_table.Z

    D_elem = Property(Float)

    def _get_D_elem(self):
        return self.ls_table.D_elem

    #-------------------------------
    # state columns form info shell
    #-------------------------------


#    state_columns = List( ['mx', 'my', 'mxy', 'nx', 'ny', 'nxy' ] )
    state_columns = List([
        'mx',
        'my',
        'mxy',
        'nx',
        'ny',
        'nxy',
        'sigx_lo',
        'sigy_lo',
        'sigxy_lo',
        'sig1_lo',
        'sig2_lo',
        'alpha_sig_lo',
        'sigx_up',
        'sigy_up',
        'sigxy_up',
        'sig1_up',
        'sig2_up',
        'alpha_sig_up',
    ])

    show_state_columns = Bool(True)

    mx = Property(Float)

    def _get_mx(self):
        return self.ls_table.mx

    my = Property(Float)

    def _get_my(self):
        return self.ls_table.my

    mxy = Property(Float)

    def _get_mxy(self):
        return self.ls_table.mxy

    nx = Property(Float)

    def _get_nx(self):
        return self.ls_table.nx

    ny = Property(Float)

    def _get_ny(self):
        return self.ls_table.ny

    nxy = Property(Float)

    def _get_nxy(self):
        return self.ls_table.nxy

    # evaluate principal stresses
    # upper face:
    #
    sigx_up = Property(Float)

    def _get_sigx_up(self):
        return self.ls_table.sigx_up

    sigy_up = Property(Float)

    def _get_sigy_up(self):
        return self.ls_table.sigy_up

    sigxy_up = Property(Float)

    def _get_sigxy_up(self):
        return self.ls_table.sigxy_up

    sig1_up = Property(Float)

    def _get_sig1_up(self):
        return self.ls_table.sig1_up

    sig2_up = Property(Float)

    def _get_sig2_up(self):
        return self.ls_table.sig2_up

    alpha_sig_up = Property(Float)

    def _get_alpha_sig_up(self):
        return self.ls_table.alpha_sig_up

    # lower face:
    #
    sigx_lo = Property(Float)

    def _get_sigx_lo(self):
        return self.ls_table.sigx_lo

    sigy_lo = Property(Float)

    def _get_sigy_lo(self):
        return self.ls_table.sigy_lo

    sigxy_lo = Property(Float)

    def _get_sigxy_lo(self):
        return self.ls_table.sigxy_lo

    sig1_lo = Property(Float)

    def _get_sig1_lo(self):
        return self.ls_table.sig1_lo

    sig2_lo = Property(Float)

    def _get_sig2_lo(self):
        return self.ls_table.sig2_lo

    alpha_sig_lo = Property(Float)

    def _get_alpha_sig_lo(self):
        return self.ls_table.alpha_sig_lo

    #-------------------------------
    # ls table
    #-------------------------------

    # all columns associated with the limit state including the corresponding
    # stress resultants
    #
    columns = Property(List,
                       depends_on='show_geo_columns, show_state_columns,\
                                             show_sr_columns, show_ls_columns')

    @cached_property
    def _get_columns(self):
        columns = []

        if self.show_geo_columns:
            columns += self.geo_columns

        if self.show_state_columns:
            columns += self.state_columns

        if self.show_sr_columns:
            columns += self.sr_columns

        if self.show_ls_columns:
            columns += self.ls_columns

        return columns

    # select column used for sorting the data in selected sorting order
    #
    sort_column = Enum(values='columns')

    def _sort_column_default(self):
        return self.columns[-1]

    sort_order = Enum('descending', 'ascending', 'unsorted')

    #-------------------------------------------------------
    # get the maximum value of the selected variable
    # 'max_in_column' of the current sheet (only one sheet)
    #-------------------------------------------------------

    # get the maximum value of the chosen column
    #
    max_in_column = Enum(values='columns')

    def _max_in_column_default(self):
        return self.columns[-1]

    max_value = Property(depends_on='max_in_column')

    def _get_max_value(self):
        col = getattr(self, self.max_in_column)[:, 0]
        return max(col)

    #-------------------------------------------------------
    # get the maximum value and the corresponding case of
    # the selected variable 'max_in_column' in all (!) sheets
    #-------------------------------------------------------

    max_value_all = Property(depends_on='max_in_column')

    def _get_max_value_all(self):
        return self.ls_table.max_value_and_case[
            self.max_in_column]['max_value']

    max_case = Property(depends_on='max_in_column')

    def _get_max_case(self):
        return self.ls_table.max_value_and_case[self.max_in_column]['max_case']

    #-------------------------------------------------------
    # get ls_table for View
    #-------------------------------------------------------

    # stack columns together for table used by TabularEditor
    #
    ls_array = Property(
        Array,
        depends_on='sort_column, sort_order, show_geo_columns, \
                                              show_state_columns, show_sr_columns, show_ls_columns'
    )

    @cached_property
    def _get_ls_array(self):

        arr_list = [getattr(self, col) for col in self.columns]

        # get the array currently selected by the sort_column enumeration
        #
        sort_arr = getattr(self, self.sort_column)[:, 0]
        sort_idx = argsort(sort_arr)
        ls_array = hstack(arr_list)

        if self.sort_order == 'descending':
            return ls_array[sort_idx[::-1]]
        if self.sort_order == 'ascending':
            return ls_array[sort_idx]
        if self.sort_order == 'unsorted':
            return ls_array

    #---------------------------------
    # plot outputs in mlab-window
    #---------------------------------

    plot_column = Enum(values='columns')
    plot = Button

    def _plot_fired(self):
        X = self.ls_table.X[:, 0]
        Y = self.ls_table.Y[:, 0]
        Z = self.ls_table.Z[:, 0]
        plot_col = getattr(self, self.plot_column)[:, 0]

        if self.plot_column == 'n_tex':
            plot_col = where(plot_col < 0, 0, plot_col)

        mlab.figure(figure="SFB532Demo",
                    bgcolor=(1.0, 1.0, 1.0),
                    fgcolor=(0.0, 0.0, 0.0))

        mlab.points3d(
            X,
            Y,
            (-1.0) * Z,
            plot_col,
            #                       colormap = "gist_rainbow",
            #                       colormap = "Reds",
            colormap="YlOrBr",
            mode="cube",
            scale_factor=0.15)

        mlab.scalarbar(title=self.plot_column, orientation='vertical')

        mlab.show

    # name of the trait that is used to assess the evaluated design
    #
    assess_name = Str('')

    #-------------------------------
    # ls group
    #-------------------------------

    # @todo: the dynamic selection of the columns to be displayed
    # does not work in connection with the LSArrayAdapter
    ls_group = VGroup(
        HGroup(  #Item( 'assess_name' ),
            Item('max_in_column'),
            Item('max_value', style='readonly', format_str='%6.2f'),
            Item('max_value_all', style='readonly', format_str='%6.2f'),
            Item('max_case', style='readonly', label='found in case: '),
        ),
        HGroup(
            Item('sort_column'),
            Item('sort_order'),
            Item('show_geo_columns', label='show geo'),
            Item('show_state_columns', label='show state'),
            Item('show_sr_columns', label='show sr'),
            Item('plot_column'),
            Item('plot'),
        ),
    )
예제 #20
0
class HPShell(HasTraits):
    '''Geometry definition.
    '''
    # dimensions of the shell structure [m]
    # (only one quart of the shell structure)
    #
    # NOTE: lenth_z = 1.0 m + 0.062 m = 1.062
    # NOTE: lenth_z = 0.865 m + 0.062 m = 0.927
    length_x = Float(3.50)
    length_y = Float(3.50)
    length_z = Float(0.927)

    # corresponds to the delta in the geometry obj file '4x4m'
    delta_h = Float(0.865)  # [m]

    # factor to scale height of lower surface
    # thickness remains unchanged as 0.06 m
    #
    delta_h_scalefactor = Float(1.00)  # [-]

    # cut of the z-coordinates of the lowerface if set to True
    #
    cut_off_lowerface = Bool(True)

    geo_input_name = Enum('350x350cm')
    geo_filter = Dict({'4x4m': delete_second_rows})

    def _read_arr(self, side='lowerface_'):
        file_name = side + self.geo_input_name + '.robj'
        file_path = join('geometry_files', file_name)
        v_arr = read_rsurface(file_path)

        filter = self.geo_filter.get(self.geo_input_name, None)
        if filter != None:
            v_arr = filter(v_arr)
        return v_arr

    # array of the vertex positions in global
    # x,y,z-coordinates defining the lower surface of the shell
    vl_arr = Property(Array(float))

    @cached_property
    def _get_vl_arr(self):
        vl_arr = self._read_arr('lowerface_')
        if self.cut_off_lowerface == True:
            print '--- lower face z-coords cut off ---'

            # z-values of the coords from the lower face are cut off.
            # From the highest z-coordinate of the lower face the vertical
            # distance is 1 m (=delta h). At this limit the lower face is
            # cut off. Global z coordinate is assumed to point up.
            #
            vl_z_max = max(vl_arr[:, 2])
            if self.geo_input_name == '4x4m':
                # NOTE: the global z-coordinates are given in the geo data file in [m]
                # no conversion of unites necessary (self.delta_h is given in [m])
                delta_h = self.delta_h
            elif self.geo_input_name == '350x350cm':
                # NOTE: the global z-coordinates are given in the geo data file in [cm]
                # convert delta_h from [m] to [cm]
                #
                delta_h = self.delta_h * 100.
            vl_z_min = vl_z_max - self.delta_h
            vl_arr_z = where(vl_arr[:, 2] < vl_z_min, vl_z_min, vl_arr[:, 2])
            vl_arr = c_[vl_arr[:, 0:2], vl_arr_z]
        return vl_arr

    # array of the vertex positions in global
    # x,y,z-coordinates defining the upper surface of the shell
    vu_arr = Property(Array(float))

    @cached_property
    def _get_vu_arr(self):
        return self._read_arr('upperface_')

    def get_mid_surface_and_thickness(self, points, perpendicular_t=True):
        '''Return the global coordinates of the supplied local points.
        '''
        print '*** get mid surface and thickness ***'

        #-----------------------------------------------
        # get the global coordinates as defined in the
        # input file and transform them to the coordinate
        # system of the master quarter
        #-----------------------------------------------

        #
        if self.geo_input_name == '350x350cm':
            X0 = [3.50, 3.50, 0.]
        else:
            X0 = [0., 0., 0.]

        # number of global grid points for each coordinate direction
        #
        xi, yi = points[:, 0] - X0[0], points[:, 1] - X0[1]

        # NOTE:
        # -- The available rbf-function is only defined for a quarter of one shell.
        # in order to get z and t values for an entire shell the abs-function
        # is used. The coordinate system of the quarter must be defined in the
        # lower left corner; the coordinate systemn of the entire one shell must
        # be defined in the center of the shell so that the coordinate system
        # for the master quarter remains unchanged.
        # -- The transformation is performed based on the defined class attributes
        # of hp_shell_stb: length_x, length_y, length_z, delta_h, delta_h_scalefactor
        # characterizing the properties of the master quarter

        # number of local grid points for each coordinate direction
        # values must range between 0 and 1
        #
        points_tilde_list = []
        for i_row in range(points.shape[0]):
            # get the x, y coordinate pair defined in the input
            # file in global coordinates
            #
            x = xi[i_row]
            y = yi[i_row]

            # transform values to local coordinate system,
            # i.e. move point to the 'master roof' containing the
            # global coordinate system:
            #
            if x <= self.length_x and y <= self.length_y:
                # point lays in first (master) roof
                #
                x_tilde = x
                y_tilde = y

            elif x >= self.length_x and y <= self.length_y:
                # point lays in second roof:
                #
                # roof length = 2* length of the master quarter
                # (e.g. 2*4,0m = 8,00m for obj-file "4x4m")
                x_tilde = x - 2 * self.length_x
                y_tilde = y

            elif x <= self.length_x and y >= self.length_y:
                # point lays in third roof:
                #
                x_tilde = x
                y_tilde = y - 2 * self.length_y

            elif x >= self.length_x and y >= self.length_y:
                # point lays in fourth roof:
                #
                x_tilde = x - 2 * self.length_x
                y_tilde = y - 2 * self.length_y

            points_tilde_list.append([x_tilde, y_tilde])

        points_tilde_arr = array(points_tilde_list, dtype='float_')
        xi_tilde = points_tilde_arr[:, 0]
        yi_tilde = points_tilde_arr[:, 1]
        #        print 'points_tilde_arr', points_tilde_arr

        xi_ = abs(xi_tilde) / self.length_x
        yi_ = abs(yi_tilde) / self.length_y

        #-----------------------------------------------
        # get the normalized rbf-function for the upper
        # and lower face of the master quarter
        #-----------------------------------------------
        # NOTE: the underline character indicates a normalized value

        # normalized coordinates of the vertices for lower- and upper-face
        #
        vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr)

        # use a radial basis function approximation (rbf) (i.e. interpolation of
        # scattered data) based on the normalized vertex points of the lower face
        #
        x_ = vl_arr_[:, 0]
        y_ = vl_arr_[:, 1]

        if self.geo_input_name == '350x350cm':
            x_ = 1 - vl_arr_[:, 0]

        z_l_ = vl_arr_[:, 2]
        rbf_l = Rbf(x_, y_, z_l_, function='cubic')

        # get the z-value at the supplied local grid points
        # of the lower face
        #
        zi_lower_ = rbf_l(xi_, yi_)

        # use a radial basis function approximation (rbf) (i.e. interpolation of
        # scattered data) based on the normalized vertex points of the upper face
        #
        x_ = vu_arr_[:, 0]
        y_ = vu_arr_[:, 1]

        if self.geo_input_name == '350x350cm':
            x_ = 1 - vu_arr_[:, 0]

        z_u_ = vu_arr_[:, 2]
        rbf_u = Rbf(x_, y_, z_u_, function='cubic')

        # get the z-value at the supplied local grid points
        # of the upper face
        #
        zi_upper_ = rbf_u(xi_, yi_)

        # approach of the slope to get thickness perpendicular to slope
        #
        # thickness is multiplied by the supplied zi coordinate
        # and z value of mid plane
        #
        t_ = zi_upper_ - zi_lower_

        z_middle_ = (zi_lower_ + (zi_upper_ - zi_lower_) * 0.5 /
                     self.delta_h_scalefactor) * self.delta_h_scalefactor

        if perpendicular_t == True:
            # delta shift of x and y for estimation of slope will be done in 4 direction
            # 0, 45, 90 and 135 degrees
            print "--- perpendicular ---"
            delta = 0.000001

            # shift in x

            dz_x_p_ = (rbf_u(xi_ + delta, yi_) + rbf_l(xi_ + delta, yi_)) / 2.0
            dz_x_m_ = (rbf_u(xi_ - delta, yi_) + rbf_l(xi_ - delta, yi_)) / 2.0

            slope_x_ = (dz_x_p_ - dz_x_m_) / (2.0 * delta)
            angle_x = arctan(slope_x_ * self.length_z / self.length_x)
            f_1 = cos(angle_x)

            # shift in y

            dz_y_p_ = (rbf_u(xi_, yi_ + delta) + rbf_l(xi_, yi_ + delta)) / 2.0
            dz_y_m_ = (rbf_u(xi_, yi_ - delta) + rbf_l(xi_, yi_ - delta)) / 2.0

            slope_y_ = (dz_y_p_ - dz_y_m_) / (2.0 * delta)
            angle_y = arctan(slope_y_ * self.length_z / self.length_x)
            f_2 = cos(angle_y)

            #shift +x +y; -x -y

            dz_x_p_y_p_ = (rbf_u(xi_ + delta, yi_ + delta) +
                           rbf_l(xi_ + delta, yi_ + delta)) / 2.0
            dz_x_m_y_m_ = (rbf_u(xi_ - delta, yi_ - delta) +
                           rbf_l(xi_ - delta, yi_ - delta)) / 2.0

            slope_x_p_y_p_ = (dz_x_p_y_p_ - dz_x_m_y_m_) / (2.0 * sqrt(2) *
                                                            delta)
            angle_x_p_y_p = arctan(slope_x_p_y_p_ * self.length_z /
                                   (self.length_x**2 + self.length_y**2)**0.5)
            f_3 = cos(angle_x_p_y_p)

            # shift in +x,-y ; -x and +y

            dz_x_p_y_m_ = (rbf_u(xi_ + delta, yi_ - delta) +
                           rbf_l(xi_ + delta, yi_ - delta)) / 2.0
            dz_x_m_y_p_ = (rbf_u(xi_ - delta, yi_ + delta) +
                           rbf_l(xi_ - delta, yi_ + delta)) / 2.0

            slope_x_p_y_m_ = (dz_x_p_y_m_ - dz_x_m_y_p_) / (sqrt(2) * 2.0 *
                                                            delta)
            angle_x_p_y_m = arctan(slope_x_p_y_m_ * self.length_z /
                                   (self.length_x**2 + self.length_y**2)**0.5)
            f_4 = cos(angle_x_p_y_m)

            # obtain minimum factor for good estimate of maximum slope

            factor = min([f_1, f_2, f_3, f_4], axis=0)
            t_ = t_ * factor

        return xi, yi, z_middle_ * self.length_z, t_ * self.length_z

    def _read_thickness_data(self, file_name):
        '''to read the stb - X and Y coordinates ( m ) save the xls - worksheet
        to a csv - file using ';' as filed delimiter and ' ' ( blank )
        as text delimiter.
        Stb Data needs to have same range of values in X and Y direction and same unit [m],
        as defined as length_x and length_y
        '''
        print '*** reading thickness data from file: ', file_name, ' ***'

        # get the column headings defined in the second row
        # of the csv thickness input file
        # "Nr.;X;Y;Z;[mm]"
        #
        file = open(file_name, 'r')
        lines = file.readlines()
        column_headings = array(lines[1].split(';'))
        elem_no_idx = where('Nr.' == column_headings)[0]
        X_idx = where('X' == column_headings)[0]
        Y_idx = where('Y' == column_headings)[0]
        Z_idx = where('Z' == column_headings)[0]
        thickness_idx = where('[mm]\n' == column_headings)[0]

        input_arr = loadtxt(file_name, delimiter=';', skiprows=2)

        # elem number:
        #
        elem_no = input_arr[:, elem_no_idx]

        # coordinates [m]:
        #
        X = input_arr[:, X_idx][:, 0]
        Y = input_arr[:, Y_idx][:, 0]

        #        print 'thickness_idx', thickness_idx
        if thickness_idx != []:
            thickness_stb = input_arr[:, thickness_idx][:, 0] / 1000.
            return elem_no, X, Y, thickness_stb
        else:
            thickness_stb = ones_like(elem_no)
            return elem_no, X, Y, thickness_stb

    def _read_elem_coords(self, file_name):
        '''x,y -coordinates must be read from old file
        '''
        input_arr = loadtxt(file_name, delimiter=';', skiprows=2)

        elem_no = input_arr[:, 0]
        X = input_arr[:, 2]
        Y = input_arr[:, 3]

        return elem_no, X, Y

    def _read_nodal_coords(self, file_name):
        '''read the nodal coordinates of the mid - surface
        defined in a csv - file. To export the excel sheet
        to csv use ";" as a field delimiter and "" ( none )
        as a text delimiter.
        Note that some lines do not contain values !
        '''
        print '*** reading nodal coordinates from file: ', file_name, ' ***'

        file = open(file_name, 'r')

        # read the column headings (first two lines)
        #
        first_line = file.readline()
        second_line = file.readline()
        column_headings = second_line.split(';')
        # remove '\n' from last string element in list
        column_headings[-1] = column_headings[-1][:-1]
        column_headings_arr = array(column_headings)

        # check in which column the node number and the
        # carthesian coordinates can be found
        #
        elem_no_idx = where('Nr.' == column_headings_arr)[0]
        X_idx = where('X [m]' == column_headings_arr)[0]
        Y_idx = where('Y [m]' == column_headings_arr)[0]
        Z_idx = where('Z [m]' == column_headings_arr)[0]

        lines = file.readlines()

        lines_list = [line.split(';') for line in lines]

        empty_lines_idx = []
        ll = []
        for i_line, line in enumerate(lines_list):

            # check if line contains values or only a node number!
            #
            if line[1] == 'Standard':
                ll.append(
                    [line[elem_no_idx], line[X_idx], line[Y_idx], line[Z_idx]])
            else:
                # NOTE: current number in file starts with 1, index in loop starts with 0
                # therefore add 1 in the index list
                #
                empty_lines_idx.append(i_line + 1)

        input_arr = array(ll, dtype='float_')

        node_no = input_arr[:, 0]
        X = input_arr[:, 1]
        Y = input_arr[:, 2]
        Z = input_arr[:, 2]

        return node_no, X, Y, Z, empty_lines_idx

    def compare_thickness_values(self, thickness, thickness_stb):
        '''get relative difference between the calucated thickness
        read in from the obj file, cut of and projected with respect to
        the approximated data given from stb.
        '''
        thickness = thickness.reshape(shape(thickness_stb))
        error = abs(1 - thickness / thickness_stb) * 100
        return error

    def export_midsurface_data(self, node_no, x, y, z_middle, file_name,
                               empty_lines_idx):
        '''exports data to csv - worksheet
        '''
        print '*** writing middle surface data to file,', file_name, ' ***'

        data = c_[node_no, x, y, z_middle]
        file = open(file_name, 'w')
        writer = csv.writer(file, delimiter=";", lineterminator="\n")
        writer.writerow(['node_number', 'x[m]', 'y[m]', 'z[m]'])
        writer.writerows(data)

        file = file.close()

        # if file contains empty lines add them at the positions
        # defined in 'empty_lines_idx'
        #
        if len(empty_lines_idx) != 0:

            print '--- file contains ', len(
                empty_lines_idx), ' empty_lines ---'

            # file without empty lines
            #
            file = open(file_name, 'r')
            lines = file.readlines()

            # overwrite file including empty lines
            #
            file = open(file_name, 'w')

            # index 'n' runs in the array without empty lines
            # index 'i' runs in the array with empty lines
            #
            n = 0
            for i in range(data.shape[0] + len(empty_lines_idx)):

                if i in empty_lines_idx:
                    file.writelines(str(i) + ";;;;\n")
                else:
                    file.writelines(lines[n])
                    n += 1

            # add last line:
            #
            file.writelines(lines[-1])

            file.close()
            print '--- empty lines added to file ---'

        return

    def export_thickness_data(self, elem_no, x, y, t, file_name):
        '''exports data to csv - worksheet
        '''
        print '*** writing thickness data to file,', file_name, ' ***'

        data = c_[elem_no, x, y, t * 1000]
        print shape(data)
        writer = csv.writer(open(file_name, 'w'),
                            delimiter=";",
                            lineterminator="\n")
        writer.writerow(['element_number', 'x[m]', 'y[m]', 't[mm]'])
        writer.writerows(data)
        return

    @show
    def show(self, x, y, z_middle, displayed_value):
        """Test contour_surf on regularly spaced co-ordinates like MayaVi.
        """
        print '*** plotting data***'
        s = points3d(X,
                     Y,
                     z_middle,
                     displayed_value,
                     colormap="gist_rainbow",
                     mode="cube",
                     scale_factor=0.3)

        sb = colorbar(s)
        # Recorded script from Mayavi2
        #try:
        #    engine = mayavi.engine
        #except NameError:
        #    from etsproxy.mayavi.api import Engine
        #    engine = Engine()
        #    engine.start()
        #if len(engine.scenes) == 0:
        #    engine.new_scene()
        # -------------------------------------------
        glyph = s  #.pipeline.scenes[0].children[0].children[0].children[0]
        glyph.glyph.glyph_source.glyph_source.center = array([0., 0., 0.])
        glyph.glyph.glyph_source.glyph_source.progress = 1.0
        glyph.glyph.glyph_source.glyph_source.x_length = 0.6
        glyph.glyph.glyph_source.glyph_source.y_length = 0.6
        sb.scalar_bar.title = 'thickness [m]'
        #print s.pipeline
        #s.scene.background = (1.0, 1.0, 1.0)

        return s
예제 #21
0
class LC(HasTraits):
    '''Loading case class
    '''
    # path to the directory containing the state data files
    #
    data_dir = Directory

    # name of the file containing the hinge forces
    # or the displacements
    #
    file_name = Str(input=True)

    plt_export = Property

    def _get_plt_export(self):
        basename = 'hf_' + self.name + '_'
        return os.path.join(data_dir, basename)

    # data filter (used to hide unwanted values, e.g. high sigularities etc.)
    #
    data_filter = Callable(input=True)

    def _data_filter_default(self):
        return lambda x: x  #  - do nothing by default

    # name of the loading case
    #
    name = Str(input=True)

    # category of the loading case
    #
    category = Enum('dead-load',
                    'additional dead-load',
                    'imposed-load',
                    input=True)

    # list of keys specifying the names of the loading cases
    # that can not exist at the same time, i.e. which are exclusive to each other
    #
    exclusive_to = List(Str, input=True)

    def _exclusive_to_default(self):
        return []

    # combination factors (need to be defined in case of imposed loads)
    #
    psi_0 = Float(input=True)
    psi_1 = Float(input=True)
    psi_2 = Float(input=True)

    # security factors ULS
    #
    gamma_fav = Float(input=True)

    def _gamma_fav_default(self):
        if self.category == 'dead-load':
            return 1.00
        if self.category == 'additional dead-load':
            return 0.00
        if self.category == 'imposed-load':
            return 0.00

    gamma_unf = Float(input=True)

    def _gamma_unf_default(self):
        if self.category == 'dead-load':
            return 1.35
        if self.category == 'additional dead-load':
            return 1.35
        if self.category == 'imposed-load':
            return 1.50

    # security factors SLS:
    # (used to distinguish combinations where imposed-loads
    # or additional-dead-loads are favorable or unfavorable.)
    #
    gamma_fav_SLS = Float(input=True)

    def _gamma_fav_SLS_default(self):
        if self.category == 'dead-load':
            return 1.00
        elif self.category == 'additional dead-load' or \
             self.category == 'imposed-load':
            return 0.00

    gamma_unf_SLS = Float(input=True)

    def _gamma_unf_SLS_default(self):
        return 1.00

    def _read_data(self, file_name):
        '''read state data and geo data from csv-file using ';' as filed delimiter and ' ' (blank)
        as text delimiter.
        '''
        print '*** read state data from file: %s ***' % (file_name)

        # hinge forces N,V in global coordinates (as obtained from FE-modell)
        #
        #        input_arr = loadtxt(file_name , delimiter=';', skiprows=2, usecols=(2, 3, 4, 5, 6))

        #        # hinge forces N*,V* in local (hinge) coordinate system (transformed by inclination angle of the hinges, i.e. shell geometry)
        #        #
        input_arr = loadtxt(file_name,
                            delimiter=';',
                            skiprows=2,
                            usecols=(2, 3, 4, 8, 9))

        # hinge position [m]
        #
        X_hf = input_arr[:, 0][:, None]
        Y_hf = input_arr[:, 1][:, None]
        Z_hf = input_arr[:, 2][:, None]

        # INPUT Matthias - RFEM
        X_hf += 3.5
        Y_hf += 3.5

        # local hinge forces [kN]
        #
        N_ip = input_arr[:, 3][:, None]
        V_op = input_arr[:, 4][:, None]
        V_ip = 0. * V_op

        return {
            'X_hf': X_hf,
            'Y_hf': Y_hf,
            'Z_hf': Z_hf,
            'N_ip': N_ip,
            'V_ip': V_ip,
            'V_op': V_op,
        }

    # original data (before filtering)
    #
    data_orig = Property(Dict, depends_on='file_name')

    @cached_property
    def _get_data_orig(self):
        return self._read_data(self.file_name)

    # data (after filtering)
    #
    data_dict = Property(Dict, depends_on='file_name, data_filter')

    @cached_property
    def _get_data_dict(self):
        d = {}
        for k, arr in self.data_orig.items():
            d[k] = self.data_filter(arr)
        return d

    sr_columns = List(['N_ip', 'V_ip', 'V_op'])
    geo_columns = List(['X_hf', 'Y_hf', 'Z_hf'])

    sr_arr = Property(Array)

    def _get_sr_arr(self):
        '''return the stress resultants of the loading case
        as stack of all sr-column arrays.
        '''
        data_dict = self.data_dict
        return hstack([data_dict[sr_key] for sr_key in self.sr_columns])

    geo_data_dict = Property(Array)

    def _get_geo_data_dict(self):
        '''Dict of coords as sub-Dict of read in data dict
        '''
        data_dict = self.data_dict
        geo_data_dict = {}
        for geo_key in self.geo_columns:
            geo_data_dict[geo_key] = data_dict[geo_key]
        return geo_data_dict

    def _read_data_u(self, file_name_u):
        '''used to read RFEM files
        read state data and geo date from csv-file using ';' as filed delimiter and ' ' (blank)
        as text delimiter for displacement.
        '''
        print '*** read state data from file: %s ***' % (file_name_u)

        # get the column headings defined in the first row
        # of the csv displacement input file
        #
        file = open(file_name_u, 'r')
        lines = file.readlines()
        column_headings = lines[1].split(';')
        # remove '\n' from last string element in list
        #
        column_headings[-1] = column_headings[-1][:-1]
        column_headings_arr = array(column_headings)

        # skip the first two columns for 'loadtxt';
        # therefore subtract index by 2

        # geo_data:
        #
        X_idx = where('jX' == column_headings_arr)[0] - 2
        Y_idx = where('jY' == column_headings_arr)[0] - 2
        Z_idx = where('jZ' == column_headings_arr)[0] - 2

        # state_data:
        #
        U_x_idx = where('uX' == column_headings_arr)[0] - 2
        U_y_idx = where('uY' == column_headings_arr)[0] - 2
        U_z_idx = where('uZ' == column_headings_arr)[0] - 2

        file.close()
        input_arr = loadtxt(file_name_u,
                            delimiter=';',
                            skiprows=2,
                            usecols=(2, 3, 4, 5, 6, 7))

        # global coords [m]
        #
        self.X_u = input_arr[:, X_idx] + 3.5
        self.Y_u = input_arr[:, Y_idx] + 3.5
        self.Z_u = input_arr[:, Z_idx]

        print 'self.X_u', self.X_u
        print 'self.Y_u', self.Y_u

        # hinge forces [kN]
        #
        self.U_x = input_arr[:, U_x_idx]
        self.U_y = input_arr[:, U_y_idx]
        self.U_z = input_arr[:, U_z_idx]
예제 #22
0
class HPShell(HasTraits):
    '''Geometry definition of a hyperbolic parabolid shell.
    '''

    #-----------------------------------------------------------------
    # geometric parameters of the shell
    #-----------------------------------------------------------------

    # dimensions of the shell for one quarter of mush_roof
    #
    length_xy_quarter = Float(3.5, input=True)  # [m]
    length_z = Float(0.927, input=True)  # [m]

    # corresponds to the delta in the geometry .obj-file with name '4x4m' as a cut off
    #
    delta_h = Float(0.865, input=True)  # [m]

    # scale factors for geometric dimensions
    # NOTE: parameters can be scaled separately, i.e. scaling of 'delta_h' (inclination of the shell)
    # does not effect the scaling of the thickness
    #
    scalefactor_delta_h = Float(1.00, input=True)  # [-]
    scalefactor_length_xy = Float(1.00, input=True)  # [-]

    # thickness of the shell
    # NOTE: only used by the option 'const_reinf_layer'
    #
    t_shell = Float(0.06, input=True)  # [m]

    width_top_col = Float(0.45, input=True)  # [m]

    # factor shifting the z coordinates at the middle nodes of the edges upwards
    # in order to include the effect of imperfection in the formwork
    z_imperfection_factor = Float(0.0)

    #-----------------------------------------------------------------
    # specify the relation of the total structure (in the 'mushroof'-model)
    # with respect to a quarter of one shell defined in 'HPShell'
    #-----------------------------------------------------------------

    # @todo: "four" is not supported by "n_elems_xy_dict"in mushroff_model
    mushroof_part = Enum('one', 'quarter', 'four', input=True)

    # 'scale_size' parameter is used as scale factor for different mushroof parts
    # Defines the proportion between the length of the total model
    # with respect to the length of a quarter shell as the
    # basic substructure of which the model consists of.
    # @todo: add "depends_on" or remove "cached_property"
    # @todo: move to class definition of "mushroof_model" not in "HPShell"
    #        (also see there "n_elems_dict" with implicit "scale_factor")
    #
    scale_size = Property(Float, depends_on='mushroof_part')

    @cached_property
    def _get_scale_size(self):
        scale_dict = {'quarter': 1.0, 'one': 2.0, 'four': 4.0}
        return scale_dict[self.mushroof_part]

    # origin of the shell
    #
    X0 = Array(float, input=True)

    def _X0_default(self):
        return array([0., 0., 0.])

    #-----------------------------------------------------------------
    # discretisation
    #-----------------------------------------------------------------

    # number of element used for the discretisation ( dimensions of the entire model)
    #
    n_elems_xy_quarter = Int(5, input=True)
    n_elems_z = Int(3, input=True)

    n_elems_xy = Property(Int, depends_on='n_elems_xy_quarter, +input')

    @cached_property
    def _get_n_elems_xy(self):
        return self.n_elems_xy_quarter * self.scale_size

    #-----------------------------------------------------------------
    # option: 'shift_elems'
    #-----------------------------------------------------------------

    # shift of column elements
    # if set to "True" (default) the information defined in 'shift_array' is used.
    #
    shift_elems = Bool(True, input=True)

    # 'shift_array' is used to place element corners at a defined global
    # position in order to connect the shell with the corner nodes of the column.
    # [x_shift, y_shift, number of element s between the coordinate position]
    # NOTE: 'shift_array' needs to have shape (:,3)!
    #
    shift_array = Array(float, input=True)

    def _shift_array_default(self):
        return array(
            [[self.width_top_col / 2**0.5, self.width_top_col / 2**0.5, 1]])

    #-----------------------------------------------------------------
    # option: 'const_reinf_layer'
    #-----------------------------------------------------------------
    # 'const_reinf_layer' - parameter is used only for the non-linear analysis,
    # where an element layer with a constant thickness is needed to simulate the
    # reinforced concrete at the top and bottom of the TRC-shell.
    #
    const_reinf_layer_elem = Bool(False, input=True)
    t_reinf_layer = Float(0.03, input=True)  # [m]
    n_elems_reinf_layer = Int(
        1, input=True)  #number of dofs used for edge refinement

    #-----------------------------------------------------------------
    # read vertice points of the shell and derive a normalized
    # RBF-function for the shell approximation
    #-----------------------------------------------------------------

    # "lowerface_cut_off" - option replaces constant height for the coordinates
    # which connect to the column (this cuts of the shell geometry horizontally
    # at the bottom of the lower face of the shell geometry.
    #
    cut_off_lowerface = Bool(True, input=True)

    # choose geometric file (obj-data file)
    #
    geo_input_name = Enum('350x350cm', '4x4m', '02', input=True)

    # filter for '4x4m' file needs to be done to have regular grid
    # in order to rbf-function leading to stable solution without oscilation
    #
    geo_filter = Dict({'4x4m': delete_second_rows})

    #                        ,'350x350cm' : delete_second_rows} )

    def _read_arr(self, side='lowerface_'):
        '''read the robj-file saved in the subdirectory 
        'geometry_files'
        '''
        file_name = side + self.geo_input_name + '.robj'
        file_path = join('geometry_files', file_name)
        # get an array with the vertice coordinates
        #
        v_arr = read_rsurface(file_path)
        #        print 'v_arr before filtering \n', v_arr
        #        print 'v_arr.shape before filtering \n', v_arr.shape
        filter = self.geo_filter.get(self.geo_input_name, None)
        if filter != None:
            v_arr = filter(v_arr)


#        print 'v_arr after filtering \n', v_arr
#        print 'v_arr.shape after filtering \n', v_arr.shape
        return v_arr

    # array of the vertex positions in global
    # x,y,z-coordinates defining the lower surface of the shell
    #
    vl_arr = Property(Array(float), depends_on='geo_input_name')

    @cached_property
    def _get_vl_arr(self):
        vl_arr = self._read_arr('lowerface_')
        if self.cut_off_lowerface == True:
            print '--- lower face z-coords cut off ---'

            # z-values of the coords from the lower face are cut off.
            # From the highest z-coordinate of the lower face the vertical
            # distance is 'delta h (for 4x4m: delta_h = 1.0m).
            # At this limit the lower face is cut off.
            # NOTE: the global z coordinate is assumed to point up
            # and must be given in the same unite as 'delta_h', i.e. in [m].
            #
            vl_z_max = max(vl_arr[:, 2])
            if self.geo_input_name == '4x4m':
                # NOTE: the global z-coordinates are given in the geo data file in [m]
                # no conversion of unites necessary (self.delta_h is given in [m])
                delta_h = self.delta_h
            elif self.geo_input_name == '350x350cm':
                # NOTE: the global z-coordinates are given in the geo data file in [cm]
                # convert delta_h from [m] to [cm]
                #
                delta_h = self.delta_h * 100.
            vl_z_min = vl_z_max - delta_h
            vl_arr_z = where(vl_arr[:, 2] < vl_z_min, vl_z_min, vl_arr[:, 2])
            vl_arr = c_[vl_arr[:, 0:2], vl_arr_z]

        return vl_arr

    # array of the vertex positions in global
    # x,y,z-coordinates defining the upper surface of the shell
    #
    vu_arr = Property(Array(float), depends_on='geo_input_name')

    @cached_property
    def _get_vu_arr(self):
        return self._read_arr('upperface_')

    # normalized coordinates of the vertices for the lowerface
    # NOTE: the underline character indicates a normalized value
    # @todo: 'normalize_rsurfaces' is called twice for 'vl_arr_' and 'vu_arr_'
    #
    vl_arr_ = Property(Array(float), depends_on='geo_input_name')

    @cached_property
    def _get_vl_arr_(self):
        vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr)
        return vl_arr_

    # normalized coordinates of the vertices for the lowerface
    # NOTE: the underline character indicates a normalized value
    #
    vu_arr_ = Property(Array(float), depends_on='geo_input_name')

    @cached_property
    def _get_vu_arr_(self):
        vl_arr_, vu_arr_ = normalize_rsurfaces(self.vl_arr, self.vu_arr)
        return vu_arr_

    rbf_l_ = Property(Instance(Rbf), depends_on='geo_input_name')

    @cached_property
    def _get_rbf_l_(self):
        # use a radial basis function approximation (rbf) (i.e. interpolation of
        # scattered data) based on the normalized vertex points of the lower face
        #
        xl_ = self.vl_arr_[:, 0]
        yl_ = self.vl_arr_[:, 1]
        zl_ = self.vl_arr_[:, 2]

        # flip the orientation of the local coordinate axis
        # depending on the geometry file used
        #
        if self.geo_input_name == '350x350cm':
            xl_ = 1 - self.vl_arr_[:, 0]
        if self.geo_input_name == '4x4m':
            yl_ = 1 - self.vl_arr_[:, 1]

        rbf_l_ = Rbf(xl_, yl_, zl_, function='cubic')
        #        rbf_l_ = Rbf( xl_, yl_, zl_, function = 'linear' )
        return rbf_l_

    rbf_u_ = Property(Instance(Rbf), depends_on='geo_input_name')

    @cached_property
    def _get_rbf_u_(self):
        # use a radial basis function approximation (rbf) (i.e. interpolation of
        # scattered data) based on the normalized vertex points of the upper face
        #
        xu_ = self.vu_arr_[:, 0]
        yu_ = self.vu_arr_[:, 1]
        zu_ = self.vu_arr_[:, 2]

        # flip the orientation of the local coordinate axis
        # depending on the geometry file used
        #
        if self.geo_input_name == '350x350cm':
            xu_ = 1 - self.vu_arr_[:, 0]
        if self.geo_input_name == '4x4m':
            yu_ = 1 - self.vu_arr_[:, 1]

        rbf_u_ = Rbf(xu_, yu_, zu_, function='cubic')
        #        rbf_u_ = Rbf( xu_, yu_, zu_, function = 'linear' )
        return rbf_u_

    #------------------------------------------------------------------------------
    # hp_shell geometric transformation
    # NOTE: returns the global coordinates of the shell based on the supplied local
    #       grid points
    #------------------------------------------------------------------------------

    def __call__(self, points):
        '''Return the global coordinates of the supplied local points.
        '''

        # number of local grid points for each coordinate direction
        # NOTE: values must range between 0 and 1
        #
        xi_, yi_, zi_ = points[:, 0], points[:, 1], points[:, 2]

        # insert imperfection (shift the middle node of the shell upwards)
        imp = self.z_imperfection_factor
        zi_ += imp * xi_ + imp * yi_ - 2 * imp * xi_ * yi_

        # size of total structure
        #
        # @todo: move to class definition of "mushroof_model" and send to "__call__"
        scale_size = self.scale_size
        # @todo: add "_quarter" (see above)
        length_xy_tot = self.length_xy_quarter * scale_size
        n_elems_xy_quarter = self.n_elems_xy_quarter

        #        print 'HPShell n_elems_xy_quarter', n_elems_xy_quarter
        # distance from origin for each mushroof_part
        #
        def d_origin_fn(self, coords):
            if self.mushroof_part == 'quarter':
                return coords
            if self.mushroof_part == 'one':
                return abs(2.0 * coords - 1.0)
            # @todo: corresponding "scale_factor" needs to be added
            #        in order for this to work
            if self.mushroof_part == 'four':
                return where(coords < 0.5, abs(4 * coords - 1),
                             abs(-4 * coords + 3))

        # element at column shift
        #
        if self.shift_elems == True:

            # define the origin for each model part
            #
            def origin_fn(self, coords):
                if self.mushroof_part == 'quarter':
                    return zeros_like(coords)
                if self.mushroof_part == 'one':
                    return ones_like(xi_) * 0.5
                if self.mushroof_part == 'four':
                    return where(coords < 0.5, 0.25, 0.75)

            def piecewise_linear_fn(x, x_fix_arr_, y_fix_arr_):
                '''creates a piecewise linear_fn going through the fix_points
                values need to be normed running between 0..1
                and values have to be unique'''
                x_fix_arr_ = hstack((0, x_fix_arr_, 1))
                y_fix_arr_ = hstack((0, y_fix_arr_, 1))
                rbf_fn_ = Rbf(x_fix_arr_, y_fix_arr_,
                              function='linear')  #rbf has to be linear
                return rbf_fn_(x)

            # define origin for quarter
            #
            xi_origin_arr_ = origin_fn(self, xi_)
            yi_origin_arr_ = origin_fn(self, yi_)
            #            print 'xi_origin_arr_', xi_origin_arr_

            # delta towards origin
            #
            xi_delta_arr_ = (xi_ - xi_origin_arr_) * scale_size
            yi_delta_arr_ = (yi_ - yi_origin_arr_) * scale_size
            #            print 'xi_delta_arr_', xi_delta_arr

            # define sign
            #
            xi_sign_arr = where(xi_delta_arr_ == 0., 0.,
                                xi_delta_arr_ / abs(xi_delta_arr_))
            yi_sign_arr = where(yi_delta_arr_ == 0., 0.,
                                yi_delta_arr_ / abs(yi_delta_arr_))
            #            print 'xi_sign_arr', xi_sign_arr

            # fix points defined in shift array as normelized values
            #
            x_fix_ = self.shift_array[:, 0] / self.length_xy_quarter
            #            print 'x_fix_', x_fix_

            y_fix_ = self.shift_array[:, 1] / self.length_xy_quarter
            n_fix_ = add.accumulate(self.shift_array[:,
                                                     2]) / n_elems_xy_quarter

            #            print 'add.accumulate( self.shift_array[:, 2] )', add.accumulate( self.shift_array[:, 2] )
            #            print 'n_fix_', n_fix_
            #            print 'piecewise_linear_fn', piecewise_linear_fn( abs( xi_delta_arr_ ),
            #                                                                   n_fix_,
            #                                                                   x_fix_ ) / scale_size

            # new xi_
            #
            xi_ = xi_origin_arr_ + xi_sign_arr * piecewise_linear_fn(
                abs(xi_delta_arr_), n_fix_, x_fix_) / scale_size

            #            print 'xi_new', xi_

            # new yi
            #
            yi_ = yi_origin_arr_ + yi_sign_arr * piecewise_linear_fn(
                abs(yi_delta_arr_), n_fix_, y_fix_) / scale_size

            #-------------------------------------------------------------------------------------

        # values are used to calculate the z-coordinate using RBF-function of the quarter
        # (= values of the distance to the origin as absolute value)
        #
        xi_rbf_ = d_origin_fn(self, xi_)
        #        print 'xi_rbf_', xi_rbf_
        yi_rbf_ = d_origin_fn(self, yi_)

        # get the z-value at the supplied local grid points
        # of the lower face
        #
        zi_lower_ = self.rbf_l_(xi_rbf_, yi_rbf_)

        # get the z-value at the supplied local grid points
        # of the upper face
        #
        zi_upper_ = self.rbf_u_(xi_rbf_, yi_rbf_)

        # constant edge element transformation
        #
        if self.const_reinf_layer_elem == True:
            # arrange and check data
            #
            if self.t_reinf_layer > self.t_shell / 2. or self.n_elems_z < 3:
                print '--- constant edge element transformation canceled ---'
                print 'the following condition needs to be fullfilled: \n'
                print 'self.t_reinf_layer <= self.t_shell/2 and self.n_elems_z >= 3'
            else:
                n_elems_z = float(self.n_elems_z)
                # normed thickness will evaluate as t_reinf_layer at each element
                t_reinf_layer_ = self.t_reinf_layer / self.length_z / (
                    zi_upper_ - zi_lower_)

                # zi_old set off from top which needs to be shifted
                delta_ = self.n_elems_reinf_layer / n_elems_z

                # get upper, lower and internal coordinates, that need to be shifted
                zi_lower = where(zi_ <= delta_)
                zi_upper = where(abs(1 - zi_) <= delta_ + 1e-10)
                zi_inter = where(abs(zi_ - 0.5) < 0.5 - (delta_ + 1e-10))

                # narrowing of coordinates
                zi_[zi_lower] = zi_[zi_lower] * t_reinf_layer_[
                    zi_lower] / delta_
                zi_[zi_upper] = 1 - (
                    1 - zi_[zi_upper]) * t_reinf_layer_[zi_upper] / delta_
                zi_[zi_inter] = t_reinf_layer_[zi_inter] + \
                                (zi_[zi_inter] - delta_) / (1 - 2 * delta_)\
                                 * (1 - 2 * t_reinf_layer_[zi_inter])
                print '--- constant edge elements transformation done ---'

        # thickness is multiplied by the supplied zi coordinate
        #
        z_ = (zi_lower_ * self.scalefactor_delta_h +
              (zi_upper_ - zi_lower_) * zi_)

        # coordinates of origin
        #
        X_0, Y_0, Z_0 = self.X0

        print '--- geometric transformation done ---'

        # multiply the local grid points with the real dimensions in order to obtain the
        # global coordinates of the mushroof_part:
        #
        return c_[X_0 + (xi_ * length_xy_tot) * self.scalefactor_length_xy,
                  Y_0 + (yi_ * length_xy_tot) * self.scalefactor_length_xy,
                  Z_0 + z_ * self.length_z]
예제 #23
0
class PDistrib(HasTraits):

    implements = IPDistrib

    def __init__(self, **kw):
        super(PDistrib, self).__init__(**kw)
        self.on_trait_change(self.refresh,
                             'distr_type.changed,quantile,n_segments')
        self.refresh()

    # puts all chosen continuous distributions distributions defined
    # in the scipy.stats.distributions module as a list of strings
    # into the Enum trait
    # distr_choice = Enum(distr_enum)

    distr_choice = Enum('sin2x', 'weibull_min', 'sin_distr', 'uniform', 'norm',
                        'piecewise_uniform', 'gamma')
    distr_dict = {
        'sin2x': sin2x,
        'uniform': uniform,
        'norm': norm,
        'weibull_min': weibull_min,
        'sin_distr': sin_distr,
        'piecewise_uniform': piecewise_uniform,
        'gamma': gamma
    }

    # instantiating the continuous distributions
    distr_type = Property(Instance(Distribution), depends_on='distr_choice')

    @cached_property
    def _get_distr_type(self):
        return Distribution(self.distr_dict[self.distr_choice])

    # change monitor - accumulate the changes in a single event trait
    changed = Event

    @on_trait_change('distr_choice, distr_type.changed, quantile, n_segments')
    def _set_changed(self):
        self.changed = True

    #------------------------------------------------------------------------
    # Methods setting the statistical modments
    #------------------------------------------------------------------------
    mean = Property

    def _get_mean(self):
        return self.distr_type.mean

    def _set_mean(self, value):
        self.distr_type.mean = value

    variance = Property

    def _get_variance(self):
        return self.distr_type.mean

    def _set_variance(self, value):
        self.distr_type.mean = value

    #------------------------------------------------------------------------
    # Methods preparing visualization
    #------------------------------------------------------------------------

    quantile = Float(1e-14, auto_set=False, enter_set=True)
    range = Property(Tuple(Float), depends_on=\
                      'distr_type.changed, quantile')

    @cached_property
    def _get_range(self):
        return (self.distr_type.distr.ppf(self.quantile),
                self.distr_type.distr.ppf(1 - self.quantile))

    n_segments = Int(500, auto_set=False, enter_set=True)

    dx = Property(Float, depends_on=\
                      'distr_type.changed, quantile, n_segments')

    @cached_property
    def _get_dx(self):
        range_length = self.range[1] - self.range[0]
        return range_length / self.n_segments

    #-------------------------------------------------------------------------
    # Discretization of the distribution domain
    #-------------------------------------------------------------------------
    x_array = Property(Array('float_'), depends_on=\
                        'distr_type.changed,'\
                        'quantile, n_segments')

    @cached_property
    def _get_x_array(self):
        '''Get the intrinsic discretization of the distribution
        respecting its  bounds.
        '''
        return linspace(self.range[0], self.range[1], self.n_segments + 1)

    #===========================================================================
    # Access function to the scipy distribution
    #===========================================================================
    def pdf(self, x):
        return self.distr_type.distr.pdf(x)

    def cdf(self, x):
        return self.distr_type.distr.cdf(x)

    def rvs(self, n):
        return self.distr_type.distr.rvs(n)

    def ppf(self, e):
        return self.distr_type.distr.ppf(e)

    #===========================================================================
    # PDF - permanent array
    #===========================================================================

    pdf_array = Property(Array('float_'), depends_on=\
                                    'distr_type.changed,'\
                                     'quantile, n_segments')

    @cached_property
    def _get_pdf_array(self):
        '''Get pdf values in intrinsic positions'''
        return self.distr_type.distr.pdf(self.x_array)

    def get_pdf_array(self, x_array):
        '''Get pdf values in externally specified positions'''
        return self.distr_type.distr.pdf(x_array)

    #===========================================================================
    # CDF permanent array
    #===========================================================================
    cdf_array = Property(Array('float_'), depends_on=\
                                    'distr_type.changed,'\
                                     'quantile, n_segments')

    @cached_property
    def _get_cdf_array(self):
        '''Get cdf values in intrinsic positions'''
        return self.distr_type.distr.cdf(self.x_array)

    def get_cdf_array(self, x_array):
        '''Get cdf values in externally specified positions'''
        return self.distr_type.distr.cdf(x_array)

    #-------------------------------------------------------------------------
    # Randomization
    #-------------------------------------------------------------------------
    def get_rvs_array(self, n_samples):
        return self.distr_type.distr.rvs(n_samples)

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        return figure

    data_changed = Event

    def plot(self, fig):
        figure = fig
        figure.clear()
        axes = figure.gca()
        # plot PDF
        axes.plot(self.x_array, self.pdf_array, lw=1.0, color='blue', \
                  label='PDF')
        axes2 = axes.twinx()
        # plot CDF on a separate axis (tick labels left)
        axes2.plot(self.x_array, self.cdf_array, lw=2, color='red', \
                  label='CDF')
        # fill the unity area given by integrating PDF along the X-axis
        axes.fill_between(self.x_array,
                          0,
                          self.pdf_array,
                          color='lightblue',
                          alpha=0.8,
                          linewidth=2)
        # plot mean
        mean = self.distr_type.distr.stats('m')
        axes.plot([mean, mean], [0.0, self.distr_type.distr.pdf(mean)],
                  lw=1.5,
                  color='black',
                  linestyle='-')
        # plot stdev
        stdev = sqrt(self.distr_type.distr.stats('v'))
        axes.plot([mean - stdev, mean - stdev],
                  [0.0, self.distr_type.distr.pdf(mean - stdev)],
                  lw=1.5,
                  color='black',
                  linestyle='--')
        axes.plot([mean + stdev, mean + stdev],
                  [0.0, self.distr_type.distr.pdf(mean + stdev)],
                  lw=1.5,
                  color='black',
                  linestyle='--')

        axes.legend(loc='center left')
        axes2.legend(loc='center right')
        axes.ticklabel_format(scilimits=(-3., 4.))
        axes2.ticklabel_format(scilimits=(-3., 4.))

        # plot limits on X and Y axes
        axes.set_ylim(0.0, max(self.pdf_array) * 1.15)
        axes2.set_ylim(0.0, 1.15)
        range = self.range[1] - self.range[0]
        axes.set_xlim(self.x_array[0] - 0.05 * range,
                      self.x_array[-1] + 0.05 * range)
        axes2.set_xlim(self.x_array[0] - 0.05 * range,
                       self.x_array[-1] + 0.05 * range)

    def refresh(self):
        self.plot(self.figure)
        self.data_changed = True

    icon = Property(Instance(ImageResource),
                    depends_on='distr_type.changed,quantile,n_segments')

    @cached_property
    def _get_icon(self):
        fig = plt.figure(figsize=(4, 4), facecolor='white')
        self.plot(fig)
        tf_handle, tf_name = tempfile.mkstemp('.png')
        fig.savefig(tf_name, dpi=35)
        return ImageResource(name=tf_name)

    traits_view = View(HSplit(VGroup(
        Group(
            Item('distr_choice', show_label=False),
            Item('@distr_type', show_label=False),
        ),
        id='pdistrib.distr_type.pltctrls',
        label='Distribution parameters',
        scrollable=True,
    ),
                              Tabbed(
                                  Group(
                                      Item('figure',
                                           editor=MPLFigureEditor(),
                                           show_label=False,
                                           resizable=True),
                                      scrollable=True,
                                      label='Plot',
                                  ),
                                  Group(Item('quantile', label='quantile'),
                                        Item('n_segments',
                                             label='plot points'),
                                        label='Plot parameters'),
                                  label='Plot',
                                  id='pdistrib.figure.params',
                                  dock='tab',
                              ),
                              dock='tab',
                              id='pdistrib.figure.view'),
                       id='pdistrib.view',
                       dock='tab',
                       title='Statistical distribution',
                       buttons=['Ok', 'Cancel'],
                       scrollable=True,
                       resizable=True,
                       width=600,
                       height=400)
예제 #24
0
class SimBT4PTDB(SimBT4PT):

    # vary the failure strain in PhiFnGeneralExtended:
    factor_eps_fail = Float(1.0, input=True, ps_levels=(1.0, 1.2, 3))

    #-----------------
    # composite cross section unit cell:
    #-----------------
    #
    ccs_unit_cell_key = Enum('FIL-10-09_2D-05-11_0.00462_all0',
                             CCSUnitCell.db.keys(),
                             simdb=True,
                             input=True,
                             auto_set=False,
                             enter_set=True)

    ccs_unit_cell_ref = Property(Instance(SimDBClass),
                                 depends_on='ccs_unit_cell_key')

    @cached_property
    def _get_ccs_unit_cell_ref(self):
        return CCSUnitCell.db[self.ccs_unit_cell_key]

    #-----------------
    # damage function:
    #-----------------
    #
    material_model = Str(input=True)

    def _material_model_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].material_model

    calibration_test = Str(input=True)

    def _calibration_test_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].calibration_test

    damage_function = Property(Instance(MFnLineArray),
                               depends_on='input_change')

    @cached_property
    def _get_damage_function(self):
        return self.ccs_unit_cell_ref.get_param(self.material_model,
                                                self.calibration_test)

    #-----------------
    # phi function extended:
    #-----------------
    #


#    phi_fn = Property(Instance(PhiFnGeneralExtended),
#                       depends_on = 'input_change,+ps_levels')
#    @cached_property
#    def _get_phi_fn(self):
#        return PhiFnGeneralExtended(mfn = self.damage_function,
#                                     factor_eps_fail = self.factor_eps_fail)

#-----------------
# phi function General:
#-----------------
#
# definition of 'phi_fn' in order to run scripting
# @todo: how can the implementation be changed in order to allow for parameter passing, e.g. 'factor_eps_fail', etc.
#
    phi_fn_class = Enum(
        PhiFnGeneral,
        [PhiFnGeneral, PhiFnGeneralExtended, PhiFnGeneralExtendedExp],
        input=True,
    )
    phi_fn = Property(Instance(phi_fn_class),
                      depends_on='phi_fn_class, input_change, +ps_levels')

    @cached_property
    def _get_phi_fn(self):
        return self.phi_fn_class(mfn=self.damage_function)

    #----------------------------------------------------------------------------------
    # material properties stored in 'ccs'
    #----------------------------------------------------------------------------------

    # concrete matrix E-modulus (taken from 'ccs_unit_cell')
    #
    E_m = Property(Float, depends_on='input_change')

    @cached_property
    def _get_E_m(self):
        E_m = self.ccs_unit_cell_ref.get_E_m_time(self.age)
        print 'E_m (from ccs)', E_m
        return E_m

    # composite E-modulus (taken from 'ccs_unit_cell')
    #
    E_c = Property(Float, depends_on='input_change')

    @cached_property
    def _get_E_c(self):
        E_c = self.ccs_unit_cell_ref.get_E_c_time(self.age)
        print 'E_c (from ccs)', E_c
        return E_c

    # Poisson's ratio
    #
    nu = Property(Float, depends_on='input_change')

    @cached_property
    def _get_nu(self):
        nu = self.ccs_unit_cell_ref.nu
        print 'nu (from ccs)', nu
        # set nu explicitly corresponding to settings in 'mats_calib_damage_fn'
        #
        print 'nu set explicitly to 0.20'
        nu = 0.2
        return nu
예제 #25
0
class RandomVariable( HasTraits ):
    '''Class representing the definition and discretization of a random variable.
    '''
    trait_value = Float

    source_trait = CTrait

    # name of the parameter
    name = Str

    pd = Instance( IPDistrib )
    def _pd_changed( self ):
        self.pd.n_segments = self._n_int

    changed = Event
    @on_trait_change( 'pd.changed,+changed' )
    def _set_changed( self ):
        self.changed = True

    # Switch the parameter random 
    random = Bool( False, changed = True )

    def set_random( self, distribution = 'uniform', discr_type = 'T grid',
                    loc = 0., scale = 1., shape = 1., n_int = 30 ):

        possible_distr = self.source_trait.distr
        if distribution and distribution not in possible_distr:
            raise AssertionError, 'distribution type %s not allowed for parameter %s' \
                % ( distribution, self.name )

        self.pd = PDistrib( distr_choice = distribution, n_segments = n_int )
        self.pd.distr_type.set( scale = scale, shape = shape, loc = loc )

        self.n_int = n_int
        self.discr_type = discr_type
        self.random = True

    def unset_random( self ):
        self.random = False

    # Number of integration points (relevant only for grid based methods)
    _n_int = Int
    n_int = Property
    def _set_n_int( self, value ):
        if self.pd:
            self.pd.n_segments = value
        self._n_int = value
    def _get_n_int( self ):
        return self.pd.n_segments

    # type of the RandomVariable discretization 
    discr_type = Enum( 'T grid', 'P grid', 'MC',
                        changed = True )
    def _discr_type_default( self ):
        return 'T grid'

    theta_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_theta_arr( self ):
        '''Get the discr_type of the pdistrib
        '''
        if not self.random:
            return array( [ self.trait_value ], dtype = float )

        if self.discr_type == 'T grid':

            # Get the discr_type from pdistrib and shift it
            # such that the midpoints of the segments are used for the
            # integration.

            x_array = self.pd.x_array
            # Note assumption of T grid discr_type
            theta_array = x_array[:-1] + self.pd.dx / 2.0

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            distr = self.pd.distr_type.distr
            # Grid of constant probabilities
            pi_arr = linspace( 0.5 / self.n_int, 1. - 0.5 / self.n_int, self.n_int )
            theta_array = distr.ppf( pi_arr )

        return theta_array

    dG_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_dG_arr( self ):

        if not self.random:
            return array( [ 1.0 ], dtype = float )

        if self.discr_type == 'T grid':

            d_theta = self.theta_arr[1] - self.theta_arr[0]
            return self.pd.get_pdf_array( self.theta_arr ) * d_theta

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            return array( [ 1.0 / float( self.n_int ) ], dtype = 'float_' )

    def get_rvs_theta_arr( self, n_samples ):
        if self.random:
            return self.pd.get_rvs_array( n_samples )
        else:
            return array( [self.trait_value], float )
예제 #26
0
class RV( HasTraits ):
    '''Class representing the definition and discretization of a random variable.
    '''
    name = Str

    pd = Instance( IPDistrib )
    def _pd_changed( self ):
        self.pd.n_segments = self._n_int

    changed = Event
    @on_trait_change( 'pd.changed,+changed' )
    def _set_changed( self ):
        self.changed = True

    _n_int = Int
    n_int = Property
    def _set_n_int( self, value ):
        if self.pd:
            self.pd.n_segments = value
        self._n_int = value
    def _get_n_int( self ):
        return self.pd.n_segments

    # index within the randomization
    idx = Int( 0 )

    # type of the RV discretization 
    discr_type = Enum( 'T grid', 'P grid', 'MC',
                        changed = True )
    def _discr_type_default( self ):
        return 'T grid'

    theta_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_theta_arr( self ):
        '''Get the discr_type of the pdistrib
        '''
        if self.discr_type == 'T grid':

            # Get the discr_type from pdistrib and shift it
            # such that the midpoints of the segments are used for the
            # integration.

            x_array = self.pd.x_array
            # Note assumption of T grid discr_type
            theta_array = x_array[:-1] + self.pd.dx / 2.0

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            distr = self.pd.distr_type.distr
            # Grid of constant probabilities
            pi_arr = linspace( 0.5 / self.n_int, 1. - 0.5 / self.n_int, self.n_int )
            theta_array = distr.ppf( pi_arr )

        return theta_array

    dG_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_dG_arr( self ):

        if self.discr_type == 'T grid':

            d_theta = self.theta_arr[1] - self.theta_arr[0]
            return self.pd.get_pdf_array( self.theta_arr ) * d_theta

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            return array( [ 1.0 / float( self.n_int ) ], dtype = 'float_' )

    def get_rvs_theta_arr( self, n_samples ):
        return self.pd.get_rvs_array( n_samples )
예제 #27
0
class MKPullOut(HasTraits):

    rf = Instance(DoublePulloutSym)

    def _rf_default(self):
        return DoublePulloutSym(tau_fr=3.14,
                                l=0.0,
                                d=25.5e-3,
                                E_mod=70.0e3,
                                theta=0.0,
                                xi=0.0179,
                                phi=1.,
                                L=30.0)

    #--------------------------------------------------------------------------------
    # select the concrete mixture from the concrete database:
    #--------------------------------------------------------------------------------

    param_distribs_key = Enum(MKPullOutParamDistribs.db.keys(),
                              simdb=True,
                              input=True,
                              auto_set=False,
                              enter_set=True)

    param_distribs_ref = Property(Instance(SimDBClass),
                                  depends_on='param_distribs_key')

    @cached_property
    def _get_param_distribs_ref(self):
        return MKPullOutParamDistribs.db[self.param_distribs_key]

    po = Property(Instance(YarnPullOut), depends_on='param_distribs_key')

    @cached_property
    def _get_po(self):
        pd = self.param_distribs_ref
        return YarnPullOut(rf=self.rf,
                           pdf_phi=pd.phi,
                           pdf_phi_on=True,
                           pdf_theta=pd.theta,
                           pdf_theta_on=True,
                           pdf_l=pd.ell,
                           pdf_ell_on=True,
                           n_f=1743,
                           w_max=1.2)

    #--------------------------------------------------------------------------------
    # view
    #--------------------------------------------------------------------------------

    traits_view = View(
        VSplit(
            VGroup(
                Spring(),
                Item('param_distribs_key', label='parameters'),
                Spring(),
                Item('param_distribs_ref@', show_label=False),
                Spring(),
                id='mkpo.split.pd',
                label='parameter distributions',
                dock='tab',
            ),
            HSplit(
                Item('po@', show_label=False),
                label='Pull Out',
                id='mkpo.split.pu',
                scrollable=True,
                dock='tab',
            ),
            dock='tab',
            id='mkpo.split',
            orientation='vertical',
        ),
        dock='tab',
        id='mkpo',
        scrollable=True,
        resizable=True,
        height=0.4,
        width=0.5,
        buttons=['OK', 'Cancel'],
    )